diff --git a/.gitlab-ci.d/base.yml b/.gitlab-ci.d/base.yml index ef173a34e63..bf3d8efab6a 100644 --- a/.gitlab-ci.d/base.yml +++ b/.gitlab-ci.d/base.yml @@ -24,6 +24,10 @@ variables: # Each script line from will be in a collapsible section in the job output # and show the duration of each line. FF_SCRIPT_SECTIONS: 1 + # The project has a fairly fat GIT repo so we try and avoid bringing in things + # we don't need. The --filter options avoid blobs and tree references we aren't going to use + # and we also avoid fetching tags. + GIT_FETCH_EXTRA_FLAGS: --filter=blob:none --filter=tree:0 --no-tags --prune --quiet interruptible: true @@ -41,6 +45,10 @@ variables: - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_TAG' when: never + # Scheduled runs on mainline don't get pipelines except for the special Coverity job + - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"' + when: never + # Cirrus jobs can't run unless the creds / target repo are set - if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)' when: never diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml index 4fbfeb66674..22045add806 100644 --- a/.gitlab-ci.d/buildtest-template.yml +++ b/.gitlab-ci.d/buildtest-template.yml @@ -14,6 +14,7 @@ - export CCACHE_DIR="$CCACHE_BASEDIR/ccache" - export CCACHE_MAXSIZE="500M" - export PATH="$CCACHE_WRAPPERSDIR:$PATH" + - du -sh .git - mkdir build - cd build - ccache --zero-stats diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml index 0a01746cea9..cfdff175c38 100644 --- a/.gitlab-ci.d/buildtest.yml +++ b/.gitlab-ci.d/buildtest.yml @@ -41,7 +41,7 @@ build-system-ubuntu: variables: IMAGE: ubuntu2204 CONFIGURE_ARGS: --enable-docs - TARGETS: alpha-softmmu microblaze-softmmu mips64el-softmmu + TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu MAKE_CHECK_ARGS: check-build check-system-ubuntu: @@ -61,7 +61,7 @@ avocado-system-ubuntu: variables: IMAGE: ubuntu2204 MAKE_CHECK_ARGS: check-avocado - AVOCADO_TAGS: arch:alpha arch:microblaze arch:mips64el + AVOCADO_TAGS: arch:alpha arch:microblazeel arch:mips64el build-system-debian: extends: @@ -167,6 +167,75 @@ build-system-centos: x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu MAKE_CHECK_ARGS: check-build +# Previous QEMU release. Used for cross-version migration tests. +build-previous-qemu: + extends: .native_build_job_template + artifacts: + when: on_success + expire_in: 2 days + paths: + - build-previous + exclude: + - build-previous/**/*.p + - build-previous/**/*.a.p + - build-previous/**/*.fa.p + - build-previous/**/*.c.o + - build-previous/**/*.c.o.d + - build-previous/**/*.fa + needs: + job: amd64-opensuse-leap-container + variables: + IMAGE: opensuse-leap + TARGETS: x86_64-softmmu aarch64-softmmu + # Override the default flags as we need more to grab the old version + GIT_FETCH_EXTRA_FLAGS: --prune --quiet + before_script: + - export QEMU_PREV_VERSION="$(sed 's/\([0-9.]*\)\.[0-9]*/v\1.0/' VERSION)" + - git remote add upstream https://gitlab.com/qemu-project/qemu + - git fetch upstream refs/tags/$QEMU_PREV_VERSION:refs/tags/$QEMU_PREV_VERSION + - git checkout $QEMU_PREV_VERSION + after_script: + - mv build build-previous + +.migration-compat-common: + extends: .common_test_job_template + needs: + - job: build-previous-qemu + - job: build-system-opensuse + # The old QEMU could have bugs unrelated to migration that are + # already fixed in the current development branch, so this test + # might fail. + allow_failure: true + variables: + IMAGE: opensuse-leap + MAKE_CHECK_ARGS: check-build + script: + # Use the migration-tests from the older QEMU tree. This avoids + # testing an old QEMU against new features/tests that it is not + # compatible with. + - cd build-previous + # old to new + - QTEST_QEMU_BINARY_SRC=./qemu-system-${TARGET} + QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test + # new to old + - QTEST_QEMU_BINARY_DST=./qemu-system-${TARGET} + QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test + +# This job needs to be disabled until we can have an aarch64 CPU model that +# will both (1) support both KVM and TCG, and (2) provide a stable ABI. +# Currently only "-cpu max" can provide (1), however it doesn't guarantee +# (2). Mark this test skipped until later. +migration-compat-aarch64: + extends: .migration-compat-common + variables: + TARGET: aarch64 + QEMU_JOB_SKIPPED: 1 + +migration-compat-x86_64: + extends: .migration-compat-common + variables: + TARGET: x86_64 + check-system-centos: extends: .native_test_job_template needs: @@ -184,7 +253,7 @@ avocado-system-centos: variables: IMAGE: centos8 MAKE_CHECK_ARGS: check-avocado - AVOCADO_TAGS: arch:ppc64 arch:or1k arch:390x arch:x86_64 arch:rx + AVOCADO_TAGS: arch:ppc64 arch:or1k arch:s390x arch:x86_64 arch:rx arch:sh4 arch:nios2 build-system-opensuse: @@ -592,7 +661,7 @@ build-without-defaults: --disable-pie --disable-qom-cast-debug --disable-strip - TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu + TARGETS: avr-softmmu s390x-softmmu sh4-softmmu sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user MAKE_CHECK_ARGS: check @@ -662,3 +731,40 @@ pages: - public variables: QEMU_JOB_PUBLISH: 1 + +coverity: + image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG + stage: build + allow_failure: true + timeout: 3h + needs: + - job: amd64-fedora-container + optional: true + before_script: + - dnf install -y curl wget + script: + # would be nice to cancel the job if over quota (https://gitlab.com/gitlab-org/gitlab/-/issues/256089) + # for example: + # curl --request POST --header "PRIVATE-TOKEN: $CI_JOB_TOKEN" "${CI_SERVER_URL}/api/v4/projects/${CI_PROJECT_ID}/jobs/${CI_JOB_ID}/cancel + - 'scripts/coverity-scan/run-coverity-scan --check-upload-only || { exitcode=$?; if test $exitcode = 1; then + exit 0; + else + exit $exitcode; + fi; }; + scripts/coverity-scan/run-coverity-scan --update-tools-only > update-tools.log 2>&1 || { cat update-tools.log; exit 1; }; + scripts/coverity-scan/run-coverity-scan --no-update-tools' + rules: + - if: '$COVERITY_TOKEN == null' + when: never + - if: '$COVERITY_EMAIL == null' + when: never + # Never included on upstream pipelines, except for schedules + - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"' + when: on_success + - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM' + when: never + # Forks don't get any pipeline unless QEMU_CI=1 or QEMU_CI=2 is set + - if: '$QEMU_CI != "1" && $QEMU_CI != "2"' + when: never + # Always manual on forks even if $QEMU_CI == "2" + - when: manual diff --git a/.gitlab-ci.d/cirrus.yml b/.gitlab-ci.d/cirrus.yml index 64f2e25afab..4671f069c35 100644 --- a/.gitlab-ci.d/cirrus.yml +++ b/.gitlab-ci.d/cirrus.yml @@ -13,7 +13,7 @@ .cirrus_build_job: extends: .base_job_template stage: build - image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master + image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:latest needs: [] # 20 mins larger than "timeout_in" in cirrus/build.yml # as there's often a 5-10 minute delay before Cirrus CI @@ -52,7 +52,7 @@ x64-freebsd-13-build: NAME: freebsd-13 CIRRUS_VM_INSTANCE_TYPE: freebsd_instance CIRRUS_VM_IMAGE_SELECTOR: image_family - CIRRUS_VM_IMAGE_NAME: freebsd-13-2 + CIRRUS_VM_IMAGE_NAME: freebsd-13-3 CIRRUS_VM_CPUS: 8 CIRRUS_VM_RAM: 8G UPDATE_COMMAND: pkg update; pkg upgrade -y diff --git a/.gitlab-ci.d/container-cross.yml b/.gitlab-ci.d/container-cross.yml index 8d235cbea01..e3103940a0e 100644 --- a/.gitlab-ci.d/container-cross.yml +++ b/.gitlab-ci.d/container-cross.yml @@ -101,11 +101,6 @@ cris-fedora-cross-container: variables: NAME: fedora-cris-cross -win32-fedora-cross-container: - extends: .container_job_template - variables: - NAME: fedora-win32-cross - win64-fedora-cross-container: extends: .container_job_template variables: diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml index d19d98cde05..987ba9694ba 100644 --- a/.gitlab-ci.d/crossbuilds.yml +++ b/.gitlab-ci.d/crossbuilds.yml @@ -159,20 +159,6 @@ cross-mips64el-kvm-only: IMAGE: debian-mips64el-cross EXTRA_CONFIGURE_OPTS: --disable-tcg --target-list=mips64el-softmmu -cross-win32-system: - extends: .cross_system_build_job - needs: - job: win32-fedora-cross-container - variables: - IMAGE: fedora-win32-cross - EXTRA_CONFIGURE_OPTS: --enable-fdt=internal - CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu - microblazeel-softmmu mips64el-softmmu nios2-softmmu - artifacts: - when: on_success - paths: - - build/qemu-setup*.exe - cross-win64-system: extends: .cross_system_build_job needs: diff --git a/.gitlab-ci.d/custom-runners.yml b/.gitlab-ci.d/custom-runners.yml index 8e5b9500f40..a0e79acd39b 100644 --- a/.gitlab-ci.d/custom-runners.yml +++ b/.gitlab-ci.d/custom-runners.yml @@ -10,13 +10,14 @@ # gitlab-runner. To avoid problems that gitlab-runner can cause while # reusing the GIT repository, let's enable the clone strategy, which # guarantees a fresh repository on each job run. -variables: - GIT_STRATEGY: clone # All custom runners can extend this template to upload the testlog # data as an artifact and also feed the junit report .custom_runner_template: extends: .base_job_template + variables: + GIT_STRATEGY: clone + GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet artifacts: name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" expire_in: 7 days diff --git a/.gitlab-ci.d/opensbi.yml b/.gitlab-ci.d/opensbi.yml index fd293e6c317..42f137d624e 100644 --- a/.gitlab-ci.d/opensbi.yml +++ b/.gitlab-ci.d/opensbi.yml @@ -24,6 +24,10 @@ - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /opensbi/i' when: manual + # Scheduled runs on mainline don't get pipelines except for the special Coverity job + - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"' + when: never + # Run if any files affecting the build output are touched - changes: - .gitlab-ci.d/opensbi.yml diff --git a/.gitlab-ci.d/windows.yml b/.gitlab-ci.d/windows.yml index 8fc08218d28..94834269ec7 100644 --- a/.gitlab-ci.d/windows.yml +++ b/.gitlab-ci.d/windows.yml @@ -1,4 +1,4 @@ -.shared_msys2_builder: +msys2-64bit: extends: .base_job_template tags: - shared-windows @@ -14,9 +14,22 @@ stage: build timeout: 100m variables: + # Select the "64 bit, gcc and MSVCRT" MSYS2 environment + MSYSTEM: MINGW64 # This feature doesn't (currently) work with PowerShell, it stops # the echo'ing of commands being run and doesn't show any timing FF_SCRIPT_SECTIONS: 0 + # do not remove "--without-default-devices"! + # commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices" + # changed to compile QEMU with the --without-default-devices switch + # for this job, because otherwise the build could not complete within + # the project timeout. + CONFIGURE_ARGS: --target-list=x86_64-softmmu --without-default-devices -Ddebug=false -Doptimization=0 + # qTests don't run successfully with "--without-default-devices", + # so let's exclude the qtests from CI for now. + TEST_ARGS: --no-suite qtest + # The Windows git is a bit older so override the default + GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet artifacts: name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" expire_in: 7 days @@ -72,33 +85,35 @@ - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed bison diffutils flex git grep make sed - $MINGW_TARGET-binutils - $MINGW_TARGET-capstone - $MINGW_TARGET-ccache - $MINGW_TARGET-curl - $MINGW_TARGET-cyrus-sasl - $MINGW_TARGET-dtc - $MINGW_TARGET-gcc - $MINGW_TARGET-glib2 - $MINGW_TARGET-gnutls - $MINGW_TARGET-gtk3 - $MINGW_TARGET-libgcrypt - $MINGW_TARGET-libjpeg-turbo - $MINGW_TARGET-libnfs - $MINGW_TARGET-libpng - $MINGW_TARGET-libssh - $MINGW_TARGET-libtasn1 - $MINGW_TARGET-lzo2 - $MINGW_TARGET-nettle - $MINGW_TARGET-ninja - $MINGW_TARGET-pixman - $MINGW_TARGET-pkgconf - $MINGW_TARGET-python - $MINGW_TARGET-SDL2 - $MINGW_TARGET-SDL2_image - $MINGW_TARGET-snappy - $MINGW_TARGET-zstd - $EXTRA_PACKAGES " + mingw-w64-x86_64-binutils + mingw-w64-x86_64-capstone + mingw-w64-x86_64-ccache + mingw-w64-x86_64-curl + mingw-w64-x86_64-cyrus-sasl + mingw-w64-x86_64-dtc + mingw-w64-x86_64-gcc + mingw-w64-x86_64-glib2 + mingw-w64-x86_64-gnutls + mingw-w64-x86_64-gtk3 + mingw-w64-x86_64-libgcrypt + mingw-w64-x86_64-libjpeg-turbo + mingw-w64-x86_64-libnfs + mingw-w64-x86_64-libpng + mingw-w64-x86_64-libssh + mingw-w64-x86_64-libtasn1 + mingw-w64-x86_64-libusb + mingw-w64-x86_64-lzo2 + mingw-w64-x86_64-nettle + mingw-w64-x86_64-ninja + mingw-w64-x86_64-pixman + mingw-w64-x86_64-pkgconf + mingw-w64-x86_64-python + mingw-w64-x86_64-SDL2 + mingw-w64-x86_64-SDL2_image + mingw-w64-x86_64-snappy + mingw-w64-x86_64-spice + mingw-w64-x86_64-usbredir + mingw-w64-x86_64-zstd" - Write-Output "Running build at $(Get-Date -Format u)" - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink @@ -115,19 +130,3 @@ - ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;" - ..\msys64\usr\bin\bash -lc "ccache --show-stats" - Write-Output "Finished build at $(Get-Date -Format u)" - -msys2-64bit: - extends: .shared_msys2_builder - variables: - MINGW_TARGET: mingw-w64-x86_64 - MSYSTEM: MINGW64 - # msys2 only ship these packages for 64-bit, not 32-bit - EXTRA_PACKAGES: $MINGW_TARGET-libusb $MINGW_TARGET-usbredir $MINGW_TARGET-spice - # do not remove "--without-default-devices"! - # commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices" - # changed to compile QEMU with the --without-default-devices switch - # for the msys2 64-bit job, due to the build could not complete within - CONFIGURE_ARGS: --target-list=x86_64-softmmu --without-default-devices -Ddebug=false -Doptimization=0 - # qTests don't run successfully with "--without-default-devices", - # so let's exclude the qtests from CI for now. - TEST_ARGS: --no-suite qtest diff --git a/.mailmap b/.mailmap index e12e19f6917..88fb68143e2 100644 --- a/.mailmap +++ b/.mailmap @@ -36,6 +36,8 @@ Marek Dolata mkdolata@us.ibm.com Michael Ellerman michael@ozlabs.org Nick Hudson hnick@vmware.com Timothée Cocault timothee.cocault@gmail.com +Stefan Weil +Stefan Weil Stefan Weil # There is also a: # (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162> @@ -60,6 +62,7 @@ Ian McKellar Ian McKellar via Qemu-devel Julia Suvorova via Qemu-devel Justin Terry (VM) Justin Terry (VM) via Qemu-devel Stefan Weil Stefan Weil via +Stefan Weil Stefan Weil via Andrey Drobyshev Andrey Drobyshev via BALATON Zoltan BALATON Zoltan via @@ -81,6 +84,7 @@ Greg Kurz Huacai Chen Huacai Chen James Hogan +Juan Quintela Leif Lindholm Leif Lindholm Luc Michel @@ -97,6 +101,7 @@ Philippe Mathieu-Daudé Philippe Mathieu-Daudé Roman Bolshakov Stefan Brankovic +Stefan Weil Stefan Weil Taylor Simpson Yongbok Kim diff --git a/.travis.yml b/.travis.yml index 76859d48da5..8a3ae76a7c3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,7 +35,7 @@ env: - TEST_BUILD_CMD="" - TEST_CMD="make check V=1" # This is broadly a list of "mainline" system targets which have support across the major distros - - MAIN_SOFTMMU_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu" + - MAIN_SYSTEM_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu" - CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime" - CCACHE_MAXSIZE=1G - G_MESSAGES_DEBUG=error @@ -114,7 +114,7 @@ jobs: env: - TEST_CMD="make check check-tcg V=1" - CONFIG="--disable-containers --enable-fdt=system - --target-list=${MAIN_SOFTMMU_TARGETS} --cxx=/bin/false" + --target-list=${MAIN_SYSTEM_TARGETS} --cxx=/bin/false" - UNRELIABLE=true - name: "[ppc64] GCC check-tcg" @@ -184,8 +184,8 @@ jobs: - genisoimage env: - TEST_CMD="make check check-tcg V=1" - - CONFIG="--disable-containers --enable-fdt=system - --target-list=${MAIN_SOFTMMU_TARGETS},s390x-linux-user" + - CONFIG="--disable-containers + --target-list=hppa-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu" - UNRELIABLE=true script: - BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$? @@ -220,13 +220,12 @@ jobs: - libsnappy-dev - libzstd-dev - nettle-dev - - xfslibs-dev - ninja-build # Tests dependencies - genisoimage env: - - CONFIG="--disable-containers --enable-fdt=system --audio-drv-list=sdl - --disable-user --target-list-exclude=${MAIN_SOFTMMU_TARGETS}" + - CONFIG="--disable-containers --audio-drv-list=sdl --disable-user + --target-list=arm-softmmu,avr-softmmu,microblaze-softmmu,sh4eb-softmmu,sparc64-softmmu,xtensaeb-softmmu" - name: "[s390x] GCC (user)" arch: s390x @@ -240,6 +239,7 @@ jobs: - flex - bison env: + - TEST_CMD="make check check-tcg V=1" - CONFIG="--disable-containers --disable-system" - name: "[s390x] Clang (disable-tcg)" diff --git a/MAINTAINERS b/MAINTAINERS index 695e0bd34fb..f1f69220251 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -70,7 +70,6 @@ R: Daniel P. Berrangé R: Thomas Huth R: Markus Armbruster R: Philippe Mathieu-Daudé -R: Juan Quintela W: https://www.qemu.org/docs/master/devel/index.html S: Odd Fixes F: docs/devel/style.rst @@ -317,7 +316,6 @@ F: tests/tcg/openrisc/ PowerPC TCG CPUs M: Nicholas Piggin M: Daniel Henrique Barboza -R: Cédric Le Goater L: qemu-ppc@nongnu.org S: Odd Fixes F: target/ppc/ @@ -469,7 +467,6 @@ F: target/mips/sysemu/ PPC KVM CPUs M: Nicholas Piggin R: Daniel Henrique Barboza -R: Cédric Le Goater S: Odd Fixes F: target/ppc/kvm.c @@ -643,6 +640,7 @@ R: Strahinja Jankovic L: qemu-arm@nongnu.org S: Odd Fixes F: hw/*/allwinner* +F: hw/ide/ahci-allwinner.c F: include/hw/*/allwinner* F: hw/arm/cubieboard.c F: docs/system/arm/cubieboard.rst @@ -820,12 +818,13 @@ F: include/hw/misc/imx7_*.h F: hw/pci-host/designware.c F: include/hw/pci-host/designware.h -MPS2 +MPS2 / MPS3 M: Peter Maydell L: qemu-arm@nongnu.org S: Maintained F: hw/arm/mps2.c F: hw/arm/mps2-tz.c +F: hw/arm/mps3r.c F: hw/misc/mps2-*.c F: include/hw/misc/mps2-*.h F: hw/arm/armsse.c @@ -1123,6 +1122,25 @@ L: qemu-arm@nongnu.org S: Maintained F: hw/arm/olimex-stm32-h405.c +STM32L4x5 SoC Family +M: Arnaud Minier +M: Inès Varhol +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/stm32l4x5_soc.c +F: hw/misc/stm32l4x5_exti.c +F: hw/misc/stm32l4x5_syscfg.c +F: hw/misc/stm32l4x5_rcc.c +F: hw/gpio/stm32l4x5_gpio.c +F: include/hw/*/stm32l4x5_*.h + +B-L475E-IOT01A IoT Node +M: Arnaud Minier +M: Inès Varhol +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/b-l475e-iot01a.c + SmartFusion2 M: Subbaraya Sundeep M: Peter Maydell @@ -1155,9 +1173,7 @@ R: Joel Stanley L: qemu-arm@nongnu.org S: Maintained F: hw/*/*aspeed* -F: hw/misc/pca9552.c F: include/hw/*/*aspeed* -F: include/hw/misc/pca9552*.h F: hw/net/ftgmac100.c F: include/hw/net/ftgmac100.h F: docs/system/arm/aspeed.rst @@ -1343,6 +1359,7 @@ M: Philippe Mathieu-Daudé R: Aurelien Jarno S: Odd Fixes F: hw/isa/piix.c +F: hw/isa/fdc37m81x-superio.c F: hw/acpi/piix4.c F: hw/mips/malta.c F: hw/pci-host/gt64120.c @@ -1407,6 +1424,7 @@ Bamboo L: qemu-ppc@nongnu.org S: Orphan F: hw/ppc/ppc440_bamboo.c +F: hw/pci-host/ppc4xx_pci.c F: tests/avocado/ppc_bamboo.py e500 @@ -1488,7 +1506,6 @@ F: tests/avocado/ppc_prep_40p.py sPAPR (pseries) M: Nicholas Piggin R: Daniel Henrique Barboza -R: Cédric Le Goater R: David Gibson R: Harsh Prateek Bora L: qemu-ppc@nongnu.org @@ -1509,6 +1526,7 @@ F: tests/qtest/libqos/*spapr* F: tests/qtest/rtas* F: tests/qtest/libqos/rtas* F: tests/avocado/ppc_pseries.py +F: tests/avocado/ppc_hv_tests.py PowerNV (Non-Virtualized) M: Cédric Le Goater @@ -1526,6 +1544,14 @@ F: include/hw/pci-host/pnv* F: pc-bios/skiboot.lid F: tests/qtest/pnv* +pca955x +M: Glenn Miles +L: qemu-ppc@nongnu.org +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/gpio/pca955*.c +F: include/hw/gpio/pca955*.h + virtex_ml507 M: Edgar E. Iglesias L: qemu-ppc@nongnu.org @@ -1539,13 +1565,14 @@ L: qemu-ppc@nongnu.org S: Maintained F: hw/ppc/sam460ex.c F: hw/ppc/ppc440_uc.c -F: hw/ppc/ppc440_pcix.c +F: hw/pci-host/ppc440_pcix.c F: hw/display/sm501* F: hw/ide/sii3112.c F: hw/rtc/m41t80.c F: pc-bios/canyonlands.dt[sb] F: pc-bios/u-boot-sam460ex-20100605.bin F: roms/u-boot-sam460ex +F: docs/system/ppc/amigang.rst pegasos2 M: BALATON Zoltan @@ -1694,7 +1721,7 @@ F: hw/rtc/sun4v-rtc.c F: include/hw/rtc/sun4v-rtc.h Leon3 -M: Fabien Chouteau +M: Clément Chigot M: Frederic Konrad S: Maintained F: hw/sparc/leon3.c @@ -1853,7 +1880,8 @@ M: Marcel Apfelbaum R: Philippe Mathieu-Daudé R: Yanan Wang S: Supported -F: hw/core/cpu.c +F: hw/core/cpu-common.c +F: hw/core/cpu-sysemu.c F: hw/core/machine-qmp-cmds.c F: hw/core/machine.c F: hw/core/machine-smp.c @@ -1919,7 +1947,6 @@ IDE M: John Snow L: qemu-block@nongnu.org S: Odd Fixes -F: include/hw/ide.h F: include/hw/ide/ F: hw/ide/ F: hw/block/block.c @@ -2053,6 +2080,7 @@ F: hw/ppc/ppc4xx*.c F: hw/ppc/ppc440_uc.c F: hw/ppc/ppc440.h F: hw/i2c/ppc4xx_i2c.c +F: include/hw/pci-host/ppc4xx.h F: include/hw/ppc/ppc4xx.h F: include/hw/i2c/ppc4xx_i2c.h F: hw/intc/ppc-uic.c @@ -2142,7 +2170,7 @@ S: Supported F: hw/vfio/* F: include/hw/vfio/ F: docs/igd-assign.txt -F: docs/devel/vfio-migration.rst +F: docs/devel/migration/vfio.rst vfio-ccw M: Eric Farman @@ -2167,6 +2195,17 @@ F: hw/vfio/ap.c F: docs/system/s390x/vfio-ap.rst L: qemu-s390x@nongnu.org +iommufd +M: Yi Liu +M: Eric Auger +M: Zhenzhong Duan +S: Supported +F: backends/iommufd.c +F: include/sysemu/iommufd.h +F: include/qemu/chardev_open.h +F: util/chardev_open.c +F: docs/devel/vfio-iommufd.rst + vhost M: Michael S. Tsirkin S: Supported @@ -2192,6 +2231,7 @@ F: qapi/virtio.json F: net/vhost-user.c F: include/hw/virtio/ F: docs/devel/virtio* +F: docs/devel/migration/virtio.rst virtio-balloon M: Michael S. Tsirkin @@ -2263,8 +2303,9 @@ L: virtio-fs@lists.linux.dev virtio-input M: Gerd Hoffmann S: Odd Fixes -F: hw/input/vhost-user-input.c +F: docs/system/devices/vhost-user-input.rst F: hw/input/virtio-input*.c +F: hw/virtio/vhost-user-input.c F: include/hw/virtio/virtio-input.h F: contrib/vhost-user-input/* @@ -2293,6 +2334,12 @@ F: include/sysemu/rng*.h F: backends/rng*.c F: tests/qtest/virtio-rng-test.c +vhost-user-stubs +M: Alex Bennée +S: Maintained +F: hw/virtio/vhost-user-base.c +F: hw/virtio/vhost-user-device* + vhost-user-rng M: Mathieu Poirier S: Supported @@ -2310,6 +2357,13 @@ F: hw/virtio/vhost-user-gpio* F: include/hw/virtio/vhost-user-gpio.h F: tests/qtest/libqos/virtio-gpio.* +vhost-user-snd +M: Alex Bennée +R: Manos Pitsidianakis +S: Maintained +F: hw/virtio/vhost-user-snd* +F: include/hw/virtio/vhost-user-snd.h + vhost-user-scmi R: mzamazal@redhat.com S: Supported @@ -2352,6 +2406,7 @@ F: docs/system/devices/virtio-snd.rst nvme M: Keith Busch M: Klaus Jensen +R: Jesper Devantier L: qemu-block@nongnu.org S: Supported F: hw/nvme/* @@ -2388,8 +2443,13 @@ F: hw/net/net_tx_pkt* Vmware M: Dmitry Fleytman S: Maintained +F: docs/specs/vmw_pvscsi-spec.txt +F: hw/display/vmware_vga.c F: hw/net/vmxnet* F: hw/scsi/vmw_pvscsi* +F: pc-bios/efi-vmxnet3.rom +F: pc-bios/vgabios-vmware.bin +F: roms/config.vga-vmware F: tests/qtest/vmxnet3-test.c F: docs/specs/vwm_pvscsi-spec.rst @@ -2443,6 +2503,12 @@ S: Maintained F: hw/i2c/i2c_mux_pca954x.c F: include/hw/i2c/i2c_mux_pca954x.h +pcf8574 +M: Dmitrii Sharikhin +S: Maintained +F: hw/gpio/pcf8574.c +F: include/gpio/pcf8574.h + Generic Loader M: Alistair Francis S: Maintained @@ -2525,7 +2591,7 @@ F: include/hw/virtio/virtio-gpu.h F: docs/system/devices/virtio-gpu.rst vhost-user-blk -M: Raphael Norwitz +M: Raphael Norwitz S: Maintained F: contrib/vhost-user-blk/ F: contrib/vhost-user-scsi/ @@ -2863,6 +2929,7 @@ S: Supported F: hw/cxl/ F: hw/mem/cxl_type3.c F: include/hw/cxl/ +F: qapi/cxl.json Dirty Bitmaps M: Eric Blake @@ -2942,7 +3009,7 @@ F: include/qapi/error.h F: include/qemu/error-report.h F: qapi/error.json F: util/error.c -F: util/qemu-error.c +F: util/error-report.c F: scripts/coccinelle/err-bad-newline.cocci F: scripts/coccinelle/error-use-after-free.cocci F: scripts/coccinelle/error_propagate_null.cocci @@ -3289,6 +3356,7 @@ Stats S: Orphan F: include/sysemu/stats.h F: stats/ +F: qapi/stats.json Streams M: Edgar E. Iglesias @@ -3339,10 +3407,8 @@ S: Odd Fixes F: scripts/checkpatch.pl Migration -M: Juan Quintela M: Peter Xu M: Fabiano Rosas -R: Leonardo Bras S: Maintained F: hw/core/vmstate-if.c F: include/hw/vmstate-if.h @@ -3352,17 +3418,15 @@ F: migration/ F: scripts/vmstate-static-checker.py F: tests/vmstate-static-checker-data/ F: tests/qtest/migration-test.c -F: docs/devel/migration.rst +F: docs/devel/migration/ F: qapi/migration.json F: tests/migration/ F: util/userfaultfd.c X: migration/rdma* RDMA Migration -M: Juan Quintela R: Li Zhijian R: Peter Xu -R: Leonardo Bras S: Odd Fixes F: migration/rdma* @@ -3374,6 +3438,12 @@ F: include/sysemu/dirtylimit.h F: migration/dirtyrate.c F: migration/dirtyrate.h F: include/sysemu/dirtyrate.h +F: docs/devel/migration/dirty-limit.rst + +Detached LUKS header +M: Hyman Huang +S: Maintained +F: tests/qemu-iotests/tests/luks-detached-header D-Bus M: Marc-André Lureau @@ -3524,6 +3594,7 @@ F: util/iova-tree.c elf2dmp M: Viktor Prutyanov +R: Akihiko Odaki S: Maintained F: contrib/elf2dmp/ @@ -3558,6 +3629,15 @@ F: tests/qtest/adm1272-test.c F: tests/qtest/max34451-test.c F: tests/qtest/isl_pmbus_vr-test.c +FSI +M: Ninad Palsule +R: Cédric Le Goater +S: Maintained +F: hw/fsi/* +F: include/hw/fsi/* +F: docs/specs/fsi.rst +F: tests/qtest/aspeed_fsi-test.c + Firmware schema specifications M: Philippe Mathieu-Daudé R: Daniel P. Berrange @@ -3580,7 +3660,6 @@ F: tests/uefi-test-tools/ VT-d Emulation M: Michael S. Tsirkin -M: Peter Xu R: Jason Wang S: Supported F: hw/i386/intel_iommu.c @@ -3609,6 +3688,16 @@ F: hw/core/clock-vmstate.c F: hw/core/qdev-clock.c F: docs/devel/clocks.rst +Reset framework +M: Peter Maydell +S: Maintained +F: include/hw/resettable.h +F: include/hw/core/resetcontainer.h +F: include/sysemu/reset.h +F: hw/core/reset.c +F: hw/core/resettable.c +F: hw/core/resetcontainer.c + Usermode Emulation ------------------ Overall usermode emulation @@ -3649,6 +3738,7 @@ TCG Plugins M: Alex Bennée R: Alexandre Iooss R: Mahmoud Mandour +R: Pierrick Bouvier S: Maintained F: docs/devel/tcg-plugins.rst F: plugins/ @@ -4149,6 +4239,7 @@ F: docs/conf.py F: docs/*/conf.py F: docs/sphinx/ F: docs/_templates/ +F: docs/devel/docs.rst Miscellaneous ------------- diff --git a/Makefile b/Makefile index 676a4a54f48..02a257584ba 100644 --- a/Makefile +++ b/Makefile @@ -141,8 +141,13 @@ MAKE.n = $(findstring n,$(firstword $(filter-out --%,$(MAKEFLAGS)))) MAKE.k = $(findstring k,$(firstword $(filter-out --%,$(MAKEFLAGS)))) MAKE.q = $(findstring q,$(firstword $(filter-out --%,$(MAKEFLAGS)))) MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq) -NINJAFLAGS = $(if $V,-v) $(if $(MAKE.n), -n) $(if $(MAKE.k), -k0) \ - $(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \ +NINJAFLAGS = \ + $(if $V,-v) \ + $(if $(MAKE.n), -n) \ + $(if $(MAKE.k), -k0) \ + $(filter-out -j, \ + $(or $(filter -l% -j%, $(MAKEFLAGS)), \ + $(if $(filter --jobserver-auth=%, $(MAKEFLAGS)),, -j1))) \ -d keepdepfile ninja-cmd-goals = $(or $(MAKECMDGOALS), all) ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g)) @@ -202,6 +207,7 @@ clean: recurse-clean ! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \ -exec rm {} + rm -f TAGS cscope.* *~ */*~ + @$(MAKE) -Ctests/qemu-iotests clean VERSION = $(shell cat $(SRC_PATH)/VERSION) diff --git a/VERSION b/VERSION index 308c0cb541a..f7ee06693c1 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -8.2.2 +9.0.0 diff --git a/accel/Kconfig b/accel/Kconfig index a30cf2eb483..794e0d18d21 100644 --- a/accel/Kconfig +++ b/accel/Kconfig @@ -16,3 +16,4 @@ config KVM config XEN bool select FSDEV_9P if VIRTFS + select XEN_BUS diff --git a/accel/accel-blocker.c b/accel/accel-blocker.c index 1e7f423462d..e083f24aa80 100644 --- a/accel/accel-blocker.c +++ b/accel/accel-blocker.c @@ -41,7 +41,7 @@ void accel_blocker_init(void) void accel_ioctl_begin(void) { - if (likely(qemu_mutex_iothread_locked())) { + if (likely(bql_locked())) { return; } @@ -51,7 +51,7 @@ void accel_ioctl_begin(void) void accel_ioctl_end(void) { - if (likely(qemu_mutex_iothread_locked())) { + if (likely(bql_locked())) { return; } @@ -62,7 +62,7 @@ void accel_ioctl_end(void) void accel_cpu_ioctl_begin(CPUState *cpu) { - if (unlikely(qemu_mutex_iothread_locked())) { + if (unlikely(bql_locked())) { return; } @@ -72,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu) void accel_cpu_ioctl_end(CPUState *cpu) { - if (unlikely(qemu_mutex_iothread_locked())) { + if (unlikely(bql_locked())) { return; } @@ -105,7 +105,7 @@ void accel_ioctl_inhibit_begin(void) * We allow to inhibit only when holding the BQL, so we can identify * when an inhibitor wants to issue an ioctl easily. */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* Block further invocations of the ioctls outside the BQL. */ CPU_FOREACH(cpu) { diff --git a/accel/accel-system.c b/accel/accel-system.c index fa8f43757ce..f6c947dd821 100644 --- a/accel/accel-system.c +++ b/accel/accel-system.c @@ -62,7 +62,7 @@ void accel_setup_post(MachineState *ms) } /* initialize the arch-independent accel operation interfaces */ -void accel_init_ops_interfaces(AccelClass *ac) +void accel_system_init_ops_interfaces(AccelClass *ac) { const char *ac_name; char *ops_name; diff --git a/accel/accel-system.h b/accel/accel-system.h index d41c62f21b1..2d37c73c97b 100644 --- a/accel/accel-system.h +++ b/accel/accel-system.h @@ -10,6 +10,6 @@ #ifndef ACCEL_SYSTEM_H #define ACCEL_SYSTEM_H -void accel_init_ops_interfaces(AccelClass *ac); +void accel_system_init_ops_interfaces(AccelClass *ac); #endif /* ACCEL_SYSTEM_H */ diff --git a/accel/accel-target.c b/accel/accel-target.c index 7e3cbde5dfe..08626c00c2d 100644 --- a/accel/accel-target.c +++ b/accel/accel-target.c @@ -104,7 +104,7 @@ static void accel_init_cpu_interfaces(AccelClass *ac) void accel_init_interfaces(AccelClass *ac) { #ifndef CONFIG_USER_ONLY - accel_init_ops_interfaces(ac); + accel_system_init_ops_interfaces(ac); #endif /* !CONFIG_USER_ONLY */ accel_init_cpu_interfaces(ac); diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c index b75c919ac35..20519f1ea46 100644 --- a/accel/dummy-cpus.c +++ b/accel/dummy-cpus.c @@ -24,10 +24,9 @@ static void *dummy_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); - cpu->neg.can_do_io = true; current_cpu = cpu; #ifndef _WIN32 @@ -43,7 +42,7 @@ static void *dummy_cpu_thread_fn(void *arg) qemu_guest_random_seed_thread_part2(cpu->random_seed); do { - qemu_mutex_unlock_iothread(); + bql_unlock(); #ifndef _WIN32 do { int sig; @@ -56,11 +55,11 @@ static void *dummy_cpu_thread_fn(void *arg) #else qemu_sem_wait(&cpu->sem); #endif - qemu_mutex_lock_iothread(); + bql_lock(); qemu_wait_io_event(cpu); } while (!cpu->unplug); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index abe7adf7ee8..d94d41ab6d0 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -424,11 +424,10 @@ static void *hvf_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); - cpu->neg.can_do_io = true; current_cpu = cpu; hvf_init_vcpu(cpu); @@ -449,7 +448,7 @@ static void *hvf_cpu_thread_fn(void *arg) hvf_vcpu_destroy(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c index 6195150a0b4..b3c946dc4b4 100644 --- a/accel/kvm/kvm-accel-ops.c +++ b/accel/kvm/kvm-accel-ops.c @@ -33,10 +33,9 @@ static void *kvm_vcpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); - cpu->neg.can_do_io = true; current_cpu = cpu; r = kvm_init_vcpu(cpu, &error_fatal); @@ -58,7 +57,7 @@ static void *kvm_vcpu_thread_fn(void *arg) kvm_destroy_vcpu(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index e39a810a4e9..931f74256e8 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -69,16 +69,6 @@ #define KVM_GUESTDBG_BLOCKIRQ 0 #endif -//#define DEBUG_KVM - -#ifdef DEBUG_KVM -#define DPRINTF(fmt, ...) \ - do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) -#else -#define DPRINTF(fmt, ...) \ - do { } while (0) -#endif - struct KVMParkedVcpu { unsigned long vcpu_id; int kvm_fd; @@ -98,7 +88,7 @@ bool kvm_allowed; bool kvm_readonly_mem_allowed; bool kvm_vm_attributes_allowed; bool kvm_msi_use_devid; -bool kvm_has_guest_debug; +static bool kvm_has_guest_debug; static int kvm_sstep_flags; static bool kvm_immediate_exit; static hwaddr kvm_max_slot_size = ~0; @@ -331,7 +321,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu) struct KVMParkedVcpu *vcpu = NULL; int ret = 0; - DPRINTF("kvm_destroy_vcpu\n"); + trace_kvm_destroy_vcpu(); ret = kvm_arch_destroy_vcpu(cpu); if (ret < 0) { @@ -341,7 +331,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu) mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { ret = mmap_size; - DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); + trace_kvm_failed_get_vcpu_mmap_size(); goto err; } @@ -443,7 +433,6 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp) PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET); if (cpu->kvm_dirty_gfns == MAP_FAILED) { ret = -errno; - DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret); goto err; } } @@ -817,7 +806,7 @@ static void kvm_dirty_ring_flush(void) * should always be with BQL held, serialization is guaranteed. * However, let's be sure of it. */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); /* * First make sure to flush the hardware buffers by kicking all * vcpus out in a synchronous way. @@ -1130,6 +1119,11 @@ int kvm_vm_check_extension(KVMState *s, unsigned int extension) return ret; } +/* + * We track the poisoned pages to be able to: + * - replace them on VM reset + * - block a migration for a VM with a poisoned page + */ typedef struct HWPoisonPage { ram_addr_t ram_addr; QLIST_ENTRY(HWPoisonPage) list; @@ -1163,6 +1157,11 @@ void kvm_hwpoison_page_add(ram_addr_t ram_addr) QLIST_INSERT_HEAD(&hwpoison_page_list, page, list); } +bool kvm_hwpoisoned_mem(void) +{ + return !QLIST_EMPTY(&hwpoison_page_list); +} + static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size) { #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN @@ -1402,9 +1401,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data) trace_kvm_dirty_ring_reaper("wakeup"); r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; - qemu_mutex_lock_iothread(); + bql_lock(); kvm_dirty_ring_reap(s, NULL); - qemu_mutex_unlock_iothread(); + bql_unlock(); r->reaper_iteration++; } @@ -2000,12 +1999,17 @@ int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) return -EINVAL; } - trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A", - vector, virq); + if (s->irq_routes->nr < s->gsi_count) { + trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A", + vector, virq); - kvm_add_routing_entry(s, &kroute); - kvm_arch_add_msi_route_post(&kroute, vector, dev); - c->changes++; + kvm_add_routing_entry(s, &kroute); + kvm_arch_add_msi_route_post(&kroute, vector, dev); + c->changes++; + } else { + kvm_irqchip_release_virq(s, virq); + return -ENOSPC; + } return virq; } @@ -2360,7 +2364,7 @@ static int kvm_init(MachineState *ms) QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif QLIST_INIT(&s->kvm_parked_vcpus); - s->fd = qemu_open_old("/dev/kvm", O_RDWR); + s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR); if (s->fd == -1) { fprintf(stderr, "Could not access KVM kernel module: %m\n"); ret = -errno; @@ -2821,14 +2825,14 @@ int kvm_cpu_exec(CPUState *cpu) struct kvm_run *run = cpu->kvm_run; int ret, run_ret; - DPRINTF("kvm_cpu_exec()\n"); + trace_kvm_cpu_exec(); if (kvm_arch_process_async_events(cpu)) { qatomic_set(&cpu->exit_request, 0); return EXCP_HLT; } - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_start(cpu); do { @@ -2848,7 +2852,7 @@ int kvm_cpu_exec(CPUState *cpu) kvm_arch_pre_run(cpu, run); if (qatomic_read(&cpu->exit_request)) { - DPRINTF("interrupt exit requested\n"); + trace_kvm_interrupt_exit_request(); /* * KVM requires us to reenter the kernel after IO exits to complete * instruction emulation. This self-signal will ensure that we @@ -2868,17 +2872,17 @@ int kvm_cpu_exec(CPUState *cpu) #ifdef KVM_HAVE_MCE_INJECTION if (unlikely(have_sigbus_pending)) { - qemu_mutex_lock_iothread(); + bql_lock(); kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code, pending_sigbus_addr); have_sigbus_pending = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif if (run_ret < 0) { if (run_ret == -EINTR || run_ret == -EAGAIN) { - DPRINTF("io window exit\n"); + trace_kvm_io_window_exit(); kvm_eat_signals(cpu); ret = EXCP_INTERRUPT; break; @@ -2900,7 +2904,6 @@ int kvm_cpu_exec(CPUState *cpu) trace_kvm_run_exit(cpu->cpu_index, run->exit_reason); switch (run->exit_reason) { case KVM_EXIT_IO: - DPRINTF("handle_io\n"); /* Called outside BQL */ kvm_handle_io(run->io.port, attrs, (uint8_t *)run + run->io.data_offset, @@ -2910,7 +2913,6 @@ int kvm_cpu_exec(CPUState *cpu) ret = 0; break; case KVM_EXIT_MMIO: - DPRINTF("handle_mmio\n"); /* Called outside BQL */ address_space_rw(&address_space_memory, run->mmio.phys_addr, attrs, @@ -2920,11 +2922,9 @@ int kvm_cpu_exec(CPUState *cpu) ret = 0; break; case KVM_EXIT_IRQ_WINDOW_OPEN: - DPRINTF("irq_window_open\n"); ret = EXCP_INTERRUPT; break; case KVM_EXIT_SHUTDOWN: - DPRINTF("shutdown\n"); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); ret = EXCP_INTERRUPT; break; @@ -2942,7 +2942,7 @@ int kvm_cpu_exec(CPUState *cpu) * still full. Got kicked by KVM_RESET_DIRTY_RINGS. */ trace_kvm_dirty_ring_full(cpu->cpu_index); - qemu_mutex_lock_iothread(); + bql_lock(); /* * We throttle vCPU by making it sleep once it exit from kernel * due to dirty ring full. In the dirtylimit scenario, reaping @@ -2954,11 +2954,12 @@ int kvm_cpu_exec(CPUState *cpu) } else { kvm_dirty_ring_reap(kvm_state, NULL); } - qemu_mutex_unlock_iothread(); + bql_unlock(); dirtylimit_vcpu_execute(cpu); ret = 0; break; case KVM_EXIT_SYSTEM_EVENT: + trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type); switch (run->system_event.type) { case KVM_SYSTEM_EVENT_SHUTDOWN: qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); @@ -2970,26 +2971,24 @@ int kvm_cpu_exec(CPUState *cpu) break; case KVM_SYSTEM_EVENT_CRASH: kvm_cpu_synchronize_state(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_system_guest_panicked(cpu_get_crash_info(cpu)); - qemu_mutex_unlock_iothread(); + bql_unlock(); ret = 0; break; default: - DPRINTF("kvm_arch_handle_exit\n"); ret = kvm_arch_handle_exit(cpu, run); break; } break; default: - DPRINTF("kvm_arch_handle_exit\n"); ret = kvm_arch_handle_exit(cpu, run); break; } } while (ret == 0); cpu_exec_end(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); if (ret < 0) { cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); @@ -3601,6 +3600,24 @@ static void kvm_set_dirty_ring_size(Object *obj, Visitor *v, s->kvm_dirty_ring_size = value; } +static char *kvm_get_device(Object *obj, + Error **errp G_GNUC_UNUSED) +{ + KVMState *s = KVM_STATE(obj); + + return g_strdup(s->device); +} + +static void kvm_set_device(Object *obj, + const char *value, + Error **errp G_GNUC_UNUSED) +{ + KVMState *s = KVM_STATE(obj); + + g_free(s->device); + s->device = g_strdup(value); +} + static void kvm_accel_instance_init(Object *obj) { KVMState *s = KVM_STATE(obj); @@ -3619,6 +3636,7 @@ static void kvm_accel_instance_init(Object *obj) s->xen_version = 0; s->xen_gnttab_max_frames = 64; s->xen_evtchn_max_pirq = 256; + s->device = NULL; } /** @@ -3659,6 +3677,10 @@ static void kvm_accel_class_init(ObjectClass *oc, void *data) object_class_property_set_description(oc, "dirty-ring-size", "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)"); + object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device); + object_class_property_set_description(oc, "device", + "Path to the device node to use (default: /dev/kvm)"); + kvm_arch_accel_class_init(oc); } diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events index 399aaeb0ec7..a25902597b1 100644 --- a/accel/kvm/trace-events +++ b/accel/kvm/trace-events @@ -25,4 +25,9 @@ kvm_dirty_ring_reaper(const char *s) "%s" kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)" kvm_dirty_ring_reaper_kick(const char *reason) "%s" kvm_dirty_ring_flush(int finished) "%d" - +kvm_destroy_vcpu(void) "" +kvm_failed_get_vcpu_mmap_size(void) "" +kvm_cpu_exec(void) "" +kvm_interrupt_exit_request(void) "" +kvm_io_window_exit(void) "" +kvm_run_exit_system_event(int cpu_index, uint32_t event_type) "cpu_index %d, system_even_type %"PRIu32 diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c index 1b37d9a302c..ca381728840 100644 --- a/accel/stubs/kvm-stub.c +++ b/accel/stubs/kvm-stub.c @@ -124,3 +124,8 @@ uint32_t kvm_dirty_ring_size(void) { return 0; } + +bool kvm_hwpoisoned_mem(void) +{ + return false; +} diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 6a2412b496e..90dd6adfe22 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -30,9 +30,6 @@ #include "qemu/rcu.h" #include "exec/log.h" #include "qemu/main-loop.h" -#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) -#include "hw/i386/apic.h" -#endif #include "sysemu/cpus.h" #include "exec/cpu-all.h" #include "sysemu/cpu-timers.h" @@ -253,43 +250,29 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, hash = tb_jmp_cache_hash_func(pc); jc = cpu->tb_jmp_cache; - if (cflags & CF_PCREL) { - /* Use acquire to ensure current load of pc from jc. */ - tb = qatomic_load_acquire(&jc->array[hash].tb); + tb = qatomic_read(&jc->array[hash].tb); + if (likely(tb && + jc->array[hash].pc == pc && + tb->cs_base == cs_base && + tb->flags == flags && + tb_cflags(tb) == cflags)) { + goto hit; + } - if (likely(tb && - jc->array[hash].pc == pc && - tb->cs_base == cs_base && - tb->flags == flags && - tb_cflags(tb) == cflags)) { - return tb; - } - tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); - if (tb == NULL) { - return NULL; - } - jc->array[hash].pc = pc; - /* Ensure pc is written first. */ - qatomic_store_release(&jc->array[hash].tb, tb); - } else { - /* Use rcu_read to ensure current load of pc from *tb. */ - tb = qatomic_rcu_read(&jc->array[hash].tb); - - if (likely(tb && - tb->pc == pc && - tb->cs_base == cs_base && - tb->flags == flags && - tb_cflags(tb) == cflags)) { - return tb; - } - tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); - if (tb == NULL) { - return NULL; - } - /* Use the pc value already stored in tb->pc. */ - qatomic_set(&jc->array[hash].tb, tb); + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + return NULL; } + jc->array[hash].pc = pc; + qatomic_set(&jc->array[hash].tb, tb); + +hit: + /* + * As long as tb is not NULL, the contents are consistent. Therefore, + * the virtual PC has to match for non-CF_PCREL translations. + */ + assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc); return tb; } @@ -357,9 +340,9 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc, #ifdef CONFIG_USER_ONLY g_assert_not_reached(); #else - CPUClass *cc = CPU_GET_CLASS(cpu); - assert(cc->tcg_ops->debug_check_breakpoint); - match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; + assert(tcg_ops->debug_check_breakpoint); + match_bp = tcg_ops->debug_check_breakpoint(cpu); #endif } @@ -413,6 +396,14 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) uint64_t cs_base; uint32_t flags, cflags; + /* + * By definition we've just finished a TB, so I/O is OK. + * Avoid the possibility of calling cpu_io_recompile() if + * a page table walk triggered by tb_lookup() calling + * probe_access_internal() happens to touch an MMIO device. + * The next TB, if we chain to it, will clear the flag again. + */ + cpu->neg.can_do_io = true; cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); cflags = curr_cflags(cpu); @@ -445,7 +436,6 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) static inline TranslationBlock * QEMU_DISABLE_CFI cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) { - CPUArchState *env = cpu_env(cpu); uintptr_t ret; TranslationBlock *last_tb; const void *tb_ptr = itb->tc.ptr; @@ -455,7 +445,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) } qemu_thread_jit_execute(); - ret = tcg_qemu_tb_exec(env, tb_ptr); + ret = tcg_qemu_tb_exec(cpu_env(cpu), tb_ptr); cpu->neg.can_do_io = true; qemu_plugin_disable_mem_helpers(cpu); /* @@ -476,10 +466,11 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) * counter hit zero); we must restore the guest PC to the address * of the start of the TB. */ - CPUClass *cc = CPU_GET_CLASS(cpu); + CPUClass *cc = cpu->cc; + const TCGCPUOps *tcg_ops = cc->tcg_ops; - if (cc->tcg_ops->synchronize_from_tb) { - cc->tcg_ops->synchronize_from_tb(cpu, last_tb); + if (tcg_ops->synchronize_from_tb) { + tcg_ops->synchronize_from_tb(cpu, last_tb); } else { tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL)); assert(cc->set_pc); @@ -511,19 +502,19 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) static void cpu_exec_enter(CPUState *cpu) { - CPUClass *cc = CPU_GET_CLASS(cpu); + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; - if (cc->tcg_ops->cpu_exec_enter) { - cc->tcg_ops->cpu_exec_enter(cpu); + if (tcg_ops->cpu_exec_enter) { + tcg_ops->cpu_exec_enter(cpu); } } static void cpu_exec_exit(CPUState *cpu) { - CPUClass *cc = CPU_GET_CLASS(cpu); + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; - if (cc->tcg_ops->cpu_exec_exit) { - cc->tcg_ops->cpu_exec_exit(cpu); + if (tcg_ops->cpu_exec_exit) { + tcg_ops->cpu_exec_exit(cpu); } } @@ -558,8 +549,8 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu) tcg_ctx->gen_tb = NULL; } #endif - if (qemu_mutex_iothread_locked()) { - qemu_mutex_unlock_iothread(); + if (bql_locked()) { + bql_unlock(); } assert_no_pages_locked(); } @@ -677,15 +668,11 @@ static inline bool cpu_handle_halt(CPUState *cpu) { #ifndef CONFIG_USER_ONLY if (cpu->halted) { -#if defined(TARGET_I386) - if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { - X86CPU *x86_cpu = X86_CPU(cpu); - qemu_mutex_lock_iothread(); - apic_poll_irq(x86_cpu->apic_state); - cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); - qemu_mutex_unlock_iothread(); + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; + + if (tcg_ops->cpu_exec_halt) { + tcg_ops->cpu_exec_halt(cpu); } -#endif /* TARGET_I386 */ if (!cpu_has_work(cpu)) { return true; } @@ -699,7 +686,7 @@ static inline bool cpu_handle_halt(CPUState *cpu) static inline void cpu_handle_debug_exception(CPUState *cpu) { - CPUClass *cc = CPU_GET_CLASS(cpu); + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; CPUWatchpoint *wp; if (!cpu->watchpoint_hit) { @@ -708,8 +695,8 @@ static inline void cpu_handle_debug_exception(CPUState *cpu) } } - if (cc->tcg_ops->debug_excp_handler) { - cc->tcg_ops->debug_excp_handler(cpu); + if (tcg_ops->debug_excp_handler) { + tcg_ops->debug_excp_handler(cpu); } } @@ -726,6 +713,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) #endif return false; } + if (cpu->exception_index >= EXCP_INTERRUPT) { /* exit request from the cpu execution loop */ *ret = cpu->exception_index; @@ -734,62 +722,59 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) } cpu->exception_index = -1; return true; - } else { + } + #if defined(CONFIG_USER_ONLY) - /* if user mode only, we simulate a fake exception - which will be handled outside the cpu execution - loop */ + /* + * If user mode only, we simulate a fake exception which will be + * handled outside the cpu execution loop. + */ #if defined(TARGET_I386) - CPUClass *cc = CPU_GET_CLASS(cpu); - cc->tcg_ops->fake_user_interrupt(cpu); + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; + tcg_ops->fake_user_interrupt(cpu); #endif /* TARGET_I386 */ - *ret = cpu->exception_index; - cpu->exception_index = -1; - return true; + *ret = cpu->exception_index; + cpu->exception_index = -1; + return true; #else - if (replay_exception()) { - CPUClass *cc = CPU_GET_CLASS(cpu); - qemu_mutex_lock_iothread(); - cc->tcg_ops->do_interrupt(cpu); - qemu_mutex_unlock_iothread(); - cpu->exception_index = -1; - - if (unlikely(cpu->singlestep_enabled)) { - /* - * After processing the exception, ensure an EXCP_DEBUG is - * raised when single-stepping so that GDB doesn't miss the - * next instruction. - */ - *ret = EXCP_DEBUG; - cpu_handle_debug_exception(cpu); - return true; - } - } else if (!replay_has_interrupt()) { - /* give a chance to iothread in replay mode */ - *ret = EXCP_INTERRUPT; + if (replay_exception()) { + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; + + bql_lock(); + tcg_ops->do_interrupt(cpu); + bql_unlock(); + cpu->exception_index = -1; + + if (unlikely(cpu->singlestep_enabled)) { + /* + * After processing the exception, ensure an EXCP_DEBUG is + * raised when single-stepping so that GDB doesn't miss the + * next instruction. + */ + *ret = EXCP_DEBUG; + cpu_handle_debug_exception(cpu); return true; } -#endif + } else if (!replay_has_interrupt()) { + /* give a chance to iothread in replay mode */ + *ret = EXCP_INTERRUPT; + return true; } +#endif return false; } -#ifndef CONFIG_USER_ONLY -/* - * CPU_INTERRUPT_POLL is a virtual event which gets converted into a - * "real" interrupt event later. It does not need to be recorded for - * replay purposes. - */ -static inline bool need_replay_interrupt(int interrupt_request) +static inline bool icount_exit_request(CPUState *cpu) { -#if defined(TARGET_I386) - return !(interrupt_request & CPU_INTERRUPT_POLL); -#else - return true; -#endif + if (!icount_enabled()) { + return false; + } + if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) { + return false; + } + return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0; } -#endif /* !CONFIG_USER_ONLY */ static inline bool cpu_handle_interrupt(CPUState *cpu, TranslationBlock **last_tb) @@ -812,7 +797,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, if (unlikely(qatomic_read(&cpu->interrupt_request))) { int interrupt_request; - qemu_mutex_lock_iothread(); + bql_lock(); interrupt_request = cpu->interrupt_request; if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ @@ -821,7 +806,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, if (interrupt_request & CPU_INTERRUPT_DEBUG) { cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->exception_index = EXCP_DEBUG; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #if !defined(CONFIG_USER_ONLY) @@ -832,7 +817,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->halted = 1; cpu->exception_index = EXCP_HLT; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #if defined(TARGET_I386) @@ -843,14 +828,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); do_cpu_init(x86_cpu); cpu->exception_index = EXCP_HALTED; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #else else if (interrupt_request & CPU_INTERRUPT_RESET) { replay_interrupt(); cpu_reset(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #endif /* !TARGET_I386 */ @@ -859,11 +844,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, True when it is, and we should restart on a new TB, and via longjmp via cpu_loop_exit. */ else { - CPUClass *cc = CPU_GET_CLASS(cpu); + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; - if (cc->tcg_ops->cpu_exec_interrupt && - cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { - if (need_replay_interrupt(interrupt_request)) { + if (tcg_ops->cpu_exec_interrupt && + tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { + if (!tcg_ops->need_replay_interrupt || + tcg_ops->need_replay_interrupt(interrupt_request)) { replay_interrupt(); } /* @@ -873,7 +859,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, */ if (unlikely(cpu->singlestep_enabled)) { cpu->exception_index = EXCP_DEBUG; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } cpu->exception_index = -1; @@ -892,14 +878,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, } /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* Finally, check if we need to exit to the main loop. */ - if (unlikely(qatomic_read(&cpu->exit_request)) - || (icount_enabled() - && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) - && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) { + if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) { qatomic_set(&cpu->exit_request, 0); if (cpu->exception_index == -1) { cpu->exception_index = EXCP_INTERRUPT; @@ -1012,14 +995,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) */ h = tb_jmp_cache_hash_func(pc); jc = cpu->tb_jmp_cache; - if (cflags & CF_PCREL) { - jc->array[h].pc = pc; - /* Ensure pc is written first. */ - qatomic_store_release(&jc->array[h].tb, tb); - } else { - /* Use the pc value already stored in tb->pc. */ - qatomic_set(&jc->array[h].tb, tb); - } + jc->array[h].pc = pc; + qatomic_set(&jc->array[h].tb, tb); } #ifndef CONFIG_USER_ONLY @@ -1070,7 +1047,7 @@ int cpu_exec(CPUState *cpu) return EXCP_HALTED; } - rcu_read_lock(); + RCU_READ_LOCK_GUARD(); cpu_exec_enter(cpu); /* @@ -1084,18 +1061,15 @@ int cpu_exec(CPUState *cpu) ret = cpu_exec_setjmp(cpu, &sc); cpu_exec_exit(cpu); - rcu_read_unlock(); - return ret; } bool tcg_exec_realizefn(CPUState *cpu, Error **errp) { static bool tcg_target_initialized; - CPUClass *cc = CPU_GET_CLASS(cpu); if (!tcg_target_initialized) { - cc->tcg_ops->initialize(); + cpu->cc->tcg_ops->initialize(); tcg_target_initialized = true; } diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index db3f93fda99..93b1ca810bf 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1145,14 +1145,11 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, " prot=%x idx=%d\n", addr, full->phys_addr, prot, mmu_idx); - read_flags = 0; + read_flags = full->tlb_fill_flags; if (full->lg_page_size < TARGET_PAGE_BITS) { /* Repeat the MMU check and TLB fill on every access. */ read_flags |= TLB_INVALID_MASK; } - if (full->attrs.byte_swap) { - read_flags |= TLB_BSWAP; - } is_ram = memory_region_is_ram(section->mr); is_romd = memory_region_is_romd(section->mr); @@ -1456,9 +1453,8 @@ static int probe_access_internal(CPUState *cpu, vaddr addr, flags |= full->slow_flags[access_type]; /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ - if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY)) - || - (access_type != MMU_INST_FETCH && force_mmio)) { + if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED)) + || (access_type != MMU_INST_FETCH && force_mmio)) { *phost = NULL; return TLB_MMIO; } @@ -1601,7 +1597,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, void *p; (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH, - cpu_mmu_index(env, true), false, + cpu_mmu_index(env_cpu(env), true), false, &p, &full, 0, false); if (p == NULL) { return -1; @@ -1839,6 +1835,31 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, tcg_debug_assert((flags & TLB_BSWAP) == 0); } + /* + * This alignment check differs from the one above, in that this is + * based on the atomicity of the operation. The intended use case is + * the ARM memory type field of each PTE, where access to pages with + * Device memory type require alignment. + */ + if (unlikely(flags & TLB_CHECK_ALIGNED)) { + MemOp size = l->memop & MO_SIZE; + + switch (l->memop & MO_ATOM_MASK) { + case MO_ATOM_NONE: + size = MO_8; + break; + case MO_ATOM_IFALIGN_PAIR: + case MO_ATOM_WITHIN16_PAIR: + size = size ? size - 1 : 0; + break; + default: + break; + } + if (addr & ((1 << size) - 1)) { + cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); + } + } + return crosspage; } @@ -1975,7 +1996,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, * @size: number of bytes * @mmu_idx: virtual address context * @ra: return address into tcg generated code, or 0 - * Context: iothread lock held + * Context: BQL held * * Load @size bytes from @addr, which is memory-mapped i/o. * The bytes are concatenated in big-endian order with @ret_be. @@ -2022,7 +2043,6 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, MemoryRegion *mr; hwaddr mr_offset; MemTxAttrs attrs; - uint64_t ret; tcg_debug_assert(size > 0 && size <= 8); @@ -2030,12 +2050,9 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); - ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx, - type, ra, mr, mr_offset); - qemu_mutex_unlock_iothread(); - - return ret; + BQL_LOCK_GUARD(); + return int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx, + type, ra, mr, mr_offset); } static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, @@ -2054,13 +2071,11 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); + BQL_LOCK_GUARD(); a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx, MMU_DATA_LOAD, ra, mr, mr_offset); b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx, MMU_DATA_LOAD, ra, mr, mr_offset + size - 8); - qemu_mutex_unlock_iothread(); - return int128_make128(b, a); } @@ -2521,7 +2536,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, * @size: number of bytes * @mmu_idx: virtual address context * @ra: return address into tcg generated code, or 0 - * Context: iothread lock held + * Context: BQL held * * Store @size bytes at @addr, which is memory-mapped i/o. * The bytes to store are extracted in little-endian order from @val_le; @@ -2569,7 +2584,6 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, hwaddr mr_offset; MemoryRegion *mr; MemTxAttrs attrs; - uint64_t ret; tcg_debug_assert(size > 0 && size <= 8); @@ -2577,12 +2591,9 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); - ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx, - ra, mr, mr_offset); - qemu_mutex_unlock_iothread(); - - return ret; + BQL_LOCK_GUARD(); + return int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx, + ra, mr, mr_offset); } static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, @@ -2593,7 +2604,6 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, MemoryRegion *mr; hwaddr mr_offset; MemTxAttrs attrs; - uint64_t ret; tcg_debug_assert(size > 8 && size <= 16); @@ -2601,14 +2611,11 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); + BQL_LOCK_GUARD(); int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8, mmu_idx, ra, mr, mr_offset); - ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8, - size - 8, mmu_idx, ra, mr, mr_offset + 8); - qemu_mutex_unlock_iothread(); - - return ret; + return int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8, + size - 8, mmu_idx, ra, mr, mr_offset + 8); } /* @@ -2959,26 +2966,30 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) { - MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); - return do_ld1_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); + CPUState *cs = env_cpu(env); + MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true)); + return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH); } uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) { - MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); - return do_ld2_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); + CPUState *cs = env_cpu(env); + MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true)); + return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH); } uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) { - MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); - return do_ld4_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); + CPUState *cs = env_cpu(env); + MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true)); + return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH); } uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) { - MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); - return do_ld8_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); + CPUState *cs = env_cpu(env); + MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true)); + return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH); } uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, diff --git a/accel/tcg/icount-common.c b/accel/tcg/icount-common.c index ec57192be82..a4a747d1dc9 100644 --- a/accel/tcg/icount-common.c +++ b/accel/tcg/icount-common.c @@ -49,21 +49,19 @@ static bool icount_sleep = true; /* Arbitrarily pick 1MIPS as the minimum allowable speed. */ #define MAX_ICOUNT_SHIFT 10 -/* - * 0 = Do not count executed instructions. - * 1 = Fixed conversion of insn to ns via "shift" option - * 2 = Runtime adaptive algorithm to compute shift - */ -int use_icount; +/* Do not count executed instructions */ +ICountMode use_icount = ICOUNT_DISABLED; static void icount_enable_precise(void) { - use_icount = 1; + /* Fixed conversion of insn to ns via "shift" option */ + use_icount = ICOUNT_PRECISE; } static void icount_enable_adaptive(void) { - use_icount = 2; + /* Runtime adaptive algorithm to compute shift */ + use_icount = ICOUNT_ADAPTATIVE; } /* @@ -256,7 +254,7 @@ static void icount_warp_rt(void) int64_t warp_delta; warp_delta = clock - timers_state.vm_clock_warp_start; - if (icount_enabled() == 2) { + if (icount_enabled() == ICOUNT_ADAPTATIVE) { /* * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far * ahead of real time (it might already be ahead so careful not @@ -419,7 +417,7 @@ void icount_account_warp_timer(void) icount_warp_rt(); } -void icount_configure(QemuOpts *opts, Error **errp) +bool icount_configure(QemuOpts *opts, Error **errp) { const char *option = qemu_opt_get(opts, "shift"); bool sleep = qemu_opt_get_bool(opts, "sleep", true); @@ -429,27 +427,28 @@ void icount_configure(QemuOpts *opts, Error **errp) if (!option) { if (qemu_opt_get(opts, "align") != NULL) { error_setg(errp, "Please specify shift option when using align"); + return false; } - return; + return true; } if (align && !sleep) { error_setg(errp, "align=on and sleep=off are incompatible"); - return; + return false; } if (strcmp(option, "auto") != 0) { if (qemu_strtol(option, NULL, 0, &time_shift) < 0 || time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) { error_setg(errp, "icount: Invalid shift value"); - return; + return false; } } else if (icount_align_option) { error_setg(errp, "shift=auto and align=on are incompatible"); - return; + return false; } else if (!icount_sleep) { error_setg(errp, "shift=auto and sleep=off are incompatible"); - return; + return false; } icount_sleep = sleep; @@ -463,7 +462,7 @@ void icount_configure(QemuOpts *opts, Error **errp) if (time_shift >= 0) { timers_state.icount_time_shift = time_shift; icount_enable_precise(); - return; + return true; } icount_enable_adaptive(); @@ -491,11 +490,14 @@ void icount_configure(QemuOpts *opts, Error **errp) timer_mod(timers_state.icount_vm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + NANOSECONDS_PER_SECOND / 10); + return true; } void icount_notify_exit(void) { - if (icount_enabled() && current_cpu) { + assert(icount_enabled()); + + if (current_cpu) { qemu_cpu_kick(current_cpu); qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc index 33a04dec52f..97dae70d530 100644 --- a/accel/tcg/ldst_atomicity.c.inc +++ b/accel/tcg/ldst_atomicity.c.inc @@ -76,7 +76,7 @@ static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop) /* * Examine the alignment of p to determine if there are subobjects * that must be aligned. Note that we only really need ctz4() -- - * any more sigificant bits are discarded by the immediately + * any more significant bits are discarded by the immediately * following comparison. */ tmp = ctz32(p); diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc index 44833513fbc..c82048e377e 100644 --- a/accel/tcg/ldst_common.c.inc +++ b/accel/tcg/ldst_common.c.inc @@ -354,7 +354,8 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) { - return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra); } int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) @@ -364,7 +365,8 @@ int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) { - return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra); } int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) @@ -374,17 +376,20 @@ int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) { - return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra); } uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) { - return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra); } uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) { - return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra); } int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) @@ -394,54 +399,63 @@ int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) { - return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra); } uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) { - return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra); } void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra) { - cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra); } void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra) { - cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra); } void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra) { - cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra); } void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uintptr_t ra) { - cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra); } void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra) { - cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra); } void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra) { - cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra); } void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uintptr_t ra) { - cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra); } /*--------------------------*/ diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build index c7de4efba27..c83f7dc2b41 100644 --- a/accel/tcg/meson.build +++ b/accel/tcg/meson.build @@ -1,8 +1,8 @@ -tcg_ss = ss.source_set() common_ss.add(when: 'CONFIG_TCG', if_true: files( 'cpu-exec-common.c', )) -tcg_ss.add(files( +tcg_specific_ss = ss.source_set() +tcg_specific_ss.add(files( 'tcg-all.c', 'cpu-exec.c', 'tb-maint.c', @@ -14,17 +14,16 @@ tcg_ss.add(files( 'translate-all.c', 'translator.c', )) -tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c')) -tcg_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c')) +tcg_specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c')) +tcg_specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c')) if get_option('plugins') - tcg_ss.add(files('plugin-gen.c')) + tcg_specific_ss.add(files('plugin-gen.c')) endif -tcg_ss.add(when: libdw, if_true: files('debuginfo.c')) -tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c')) -specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss) +specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_specific_ss) specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files( 'cputlb.c', + 'watchpoint.c', )) system_ss.add(when: ['CONFIG_TCG'], if_true: files( diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 78b331b2510..cd78ef94a16 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -43,6 +43,7 @@ * CPU's index into a TCG temp, since the first callback did it already. */ #include "qemu/osdep.h" +#include "qemu/plugin.h" #include "cpu.h" #include "tcg/tcg.h" #include "tcg/tcg-temp-internal.h" @@ -56,12 +57,6 @@ #include "exec/helper-info.c.inc" #undef HELPER_H -#ifdef CONFIG_SOFTMMU -# define CONFIG_SOFTMMU_GATE 1 -#else -# define CONFIG_SOFTMMU_GATE 0 -#endif - /* * plugin_cb_start TCG op args[]: * 0: enum plugin_gen_from @@ -79,6 +74,7 @@ enum plugin_gen_from { enum plugin_gen_cb { PLUGIN_GEN_CB_UDATA, + PLUGIN_GEN_CB_UDATA_R, PLUGIN_GEN_CB_INLINE, PLUGIN_GEN_CB_MEM, PLUGIN_GEN_ENABLE_MEM_HELPER, @@ -90,7 +86,10 @@ enum plugin_gen_cb { * These helpers are stubs that get dynamically switched out for calls * direct to the plugin if they are subscribed to. */ -void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata) +void HELPER(plugin_vcpu_udata_cb_no_wg)(uint32_t cpu_index, void *udata) +{ } + +void HELPER(plugin_vcpu_udata_cb_no_rwg)(uint32_t cpu_index, void *udata) { } void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, @@ -98,7 +97,7 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, void *userdata) { } -static void gen_empty_udata_cb(void) +static void gen_empty_udata_cb(void (*gen_helper)(TCGv_i32, TCGv_ptr)) { TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); TCGv_ptr udata = tcg_temp_ebb_new_ptr(); @@ -106,28 +105,50 @@ static void gen_empty_udata_cb(void) tcg_gen_movi_ptr(udata, 0); tcg_gen_ld_i32(cpu_index, tcg_env, -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); - gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); + gen_helper(cpu_index, udata); tcg_temp_free_ptr(udata); tcg_temp_free_i32(cpu_index); } +static void gen_empty_udata_cb_no_wg(void) +{ + gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_wg); +} + +static void gen_empty_udata_cb_no_rwg(void) +{ + gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_rwg); +} + /* * For now we only support addi_i64. * When we support more ops, we can generate one empty inline cb for each. */ static void gen_empty_inline_cb(void) { + TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); + TCGv_ptr cpu_index_as_ptr = tcg_temp_ebb_new_ptr(); TCGv_i64 val = tcg_temp_ebb_new_i64(); TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); + tcg_gen_ld_i32(cpu_index, tcg_env, + -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); + /* second operand will be replaced by immediate value */ + tcg_gen_mul_i32(cpu_index, cpu_index, cpu_index); + tcg_gen_ext_i32_ptr(cpu_index_as_ptr, cpu_index); + tcg_gen_movi_ptr(ptr, 0); + tcg_gen_add_ptr(ptr, ptr, cpu_index_as_ptr); tcg_gen_ld_i64(val, ptr, 0); - /* pass an immediate != 0 so that it doesn't get optimized away */ - tcg_gen_addi_i64(val, val, 0xdeadface); + /* second operand will be replaced by immediate value */ + tcg_gen_add_i64(val, val, val); + tcg_gen_st_i64(val, ptr, 0); tcg_temp_free_ptr(ptr); tcg_temp_free_i64(val); + tcg_temp_free_ptr(cpu_index_as_ptr); + tcg_temp_free_i32(cpu_index); } static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info) @@ -192,7 +213,8 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from) gen_empty_mem_helper); /* fall through */ case PLUGIN_GEN_FROM_TB: - gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb); + gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb_no_rwg); + gen_wrapped(from, PLUGIN_GEN_CB_UDATA_R, gen_empty_udata_cb_no_wg); gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb); break; default: @@ -274,12 +296,37 @@ static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) return op; } +static TCGOp *copy_ld_i32(TCGOp **begin_op, TCGOp *op) +{ + return copy_op(begin_op, op, INDEX_op_ld_i32); +} + +static TCGOp *copy_ext_i32_ptr(TCGOp **begin_op, TCGOp *op) +{ + if (UINTPTR_MAX == UINT32_MAX) { + op = copy_op(begin_op, op, INDEX_op_mov_i32); + } else { + op = copy_op(begin_op, op, INDEX_op_ext_i32_i64); + } + return op; +} + +static TCGOp *copy_add_ptr(TCGOp **begin_op, TCGOp *op) +{ + if (UINTPTR_MAX == UINT32_MAX) { + op = copy_op(begin_op, op, INDEX_op_add_i32); + } else { + op = copy_op(begin_op, op, INDEX_op_add_i64); + } + return op; +} + static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) { if (TCG_TARGET_REG_BITS == 32) { /* 2x ld_i32 */ - op = copy_op(begin_op, op, INDEX_op_ld_i32); - op = copy_op(begin_op, op, INDEX_op_ld_i32); + op = copy_ld_i32(begin_op, op); + op = copy_ld_i32(begin_op, op); } else { /* ld_i64 */ op = copy_op(begin_op, op, INDEX_op_ld_i64); @@ -315,6 +362,13 @@ static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) return op; } +static TCGOp *copy_mul_i32(TCGOp **begin_op, TCGOp *op, uint32_t v) +{ + op = copy_op(begin_op, op, INDEX_op_mul_i32); + op->args[2] = tcgv_i32_arg(tcg_constant_i32(v)); + return op; +} + static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op) { if (UINTPTR_MAX == UINT32_MAX) { @@ -380,18 +434,19 @@ static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb, TCGOp *begin_op, TCGOp *op, int *unused) { - /* const_ptr */ - op = copy_const_ptr(&begin_op, op, cb->userp); + char *ptr = cb->inline_insn.entry.score->data->data; + size_t elem_size = g_array_get_element_size( + cb->inline_insn.entry.score->data); + size_t offset = cb->inline_insn.entry.offset; - /* ld_i64 */ + op = copy_ld_i32(&begin_op, op); + op = copy_mul_i32(&begin_op, op, elem_size); + op = copy_ext_i32_ptr(&begin_op, op); + op = copy_const_ptr(&begin_op, op, ptr + offset); + op = copy_add_ptr(&begin_op, op); op = copy_ld_i64(&begin_op, op); - - /* add_i64 */ op = copy_add_i64(&begin_op, op, cb->inline_insn.imm); - - /* st_i64 */ op = copy_st_i64(&begin_op, op); - return op; } @@ -588,6 +643,12 @@ static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb, inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op); } +static void plugin_gen_tb_udata_r(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op) +{ + inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR_R], begin_op); +} + static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb, TCGOp *begin_op) { @@ -602,6 +663,14 @@ static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb, inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op); } +static void plugin_gen_insn_udata_r(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + + inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR_R], begin_op); +} + static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb, TCGOp *begin_op, int insn_idx) { @@ -721,6 +790,9 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb) case PLUGIN_GEN_CB_UDATA: plugin_gen_tb_udata(plugin_tb, op); break; + case PLUGIN_GEN_CB_UDATA_R: + plugin_gen_tb_udata_r(plugin_tb, op); + break; case PLUGIN_GEN_CB_INLINE: plugin_gen_tb_inline(plugin_tb, op); break; @@ -737,6 +809,9 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb) case PLUGIN_GEN_CB_UDATA: plugin_gen_insn_udata(plugin_tb, op, insn_idx); break; + case PLUGIN_GEN_CB_UDATA_R: + plugin_gen_insn_udata_r(plugin_tb, op, insn_idx); + break; case PLUGIN_GEN_CB_INLINE: plugin_gen_insn_inline(plugin_tb, op, insn_idx); break; @@ -796,7 +871,7 @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db, { bool ret = false; - if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) { + if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_state->event_mask)) { struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; int i; diff --git a/accel/tcg/plugin-helpers.h b/accel/tcg/plugin-helpers.h index 8e685e06545..11796436f35 100644 --- a/accel/tcg/plugin-helpers.h +++ b/accel/tcg/plugin-helpers.h @@ -1,4 +1,5 @@ #ifdef CONFIG_PLUGIN -DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr) +DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb_no_wg, TCG_CALL_NO_WG | TCG_CALL_PLUGIN, void, i32, ptr) +DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb_no_rwg, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr) DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, i32, i64, ptr) #endif diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h index bb424c8a05b..4ab8553afcc 100644 --- a/accel/tcg/tb-jmp-cache.h +++ b/accel/tcg/tb-jmp-cache.h @@ -13,9 +13,11 @@ #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) /* - * Accessed in parallel; all accesses to 'tb' must be atomic. - * For CF_PCREL, accesses to 'pc' must be protected by a - * load_acquire/store_release to 'tb'. + * Invalidated in parallel; all accesses to 'tb' must be atomic. + * A valid entry is read/written by a single CPU, therefore there is + * no need for qatomic_rcu_read() and pc is always consistent with a + * non-NULL value of 'tb'. Strictly speaking pc is only needed for + * CF_PCREL, but it's used always for simplicity. */ struct CPUJumpCache { struct rcu_head rcu; diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c index 0d069a081ec..de20e7da156 100644 --- a/accel/tcg/tb-maint.c +++ b/accel/tcg/tb-maint.c @@ -1021,7 +1021,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) * Called with mmap_lock held for user-mode emulation * NOTE: this function must not be called while a TB is running. */ -void tb_invalidate_phys_page(tb_page_addr_t addr) +static void tb_invalidate_phys_page(tb_page_addr_t addr) { tb_page_addr_t start, last; @@ -1160,28 +1160,6 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, #endif } -/* - * Invalidate all TBs which intersect with the target physical - * address page @addr. - */ -void tb_invalidate_phys_page(tb_page_addr_t addr) -{ - struct page_collection *pages; - tb_page_addr_t start, last; - PageDesc *p; - - p = page_find(addr >> TARGET_PAGE_BITS); - if (p == NULL) { - return; - } - - start = addr & TARGET_PAGE_MASK; - last = addr | ~TARGET_PAGE_MASK; - pages = page_collection_lock(start, last); - tb_invalidate_phys_page_range__locked(pages, p, start, last, 0); - page_collection_unlock(pages); -} - /* * Invalidate all TBs which intersect with the target physical address range * [start;last]. NOTE: start and end may refer to *different* physical pages. diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c index b25685fb712..9e1ae66f651 100644 --- a/accel/tcg/tcg-accel-ops-icount.c +++ b/accel/tcg/tcg-accel-ops-icount.c @@ -123,12 +123,12 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget) if (cpu->icount_budget == 0) { /* - * We're called without the iothread lock, so must take it while + * We're called without the BQL, so must take it while * we're calling timer handlers. */ - qemu_mutex_lock_iothread(); + bql_lock(); icount_notify_aio_contexts(); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c index fac80095bbd..c552b45b8ed 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.c +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -76,7 +76,7 @@ static void *mttcg_cpu_thread_fn(void *arg) rcu_add_force_rcu_notifier(&force_rcu.notifier); tcg_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); @@ -91,9 +91,9 @@ static void *mttcg_cpu_thread_fn(void *arg) do { if (cpu_can_run(cpu)) { int r; - qemu_mutex_unlock_iothread(); - r = tcg_cpus_exec(cpu); - qemu_mutex_lock_iothread(); + bql_unlock(); + r = tcg_cpu_exec(cpu); + bql_lock(); switch (r) { case EXCP_DEBUG: cpu_handle_guest_debug(cpu); @@ -105,9 +105,9 @@ static void *mttcg_cpu_thread_fn(void *arg) */ break; case EXCP_ATOMIC: - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_step_atomic(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); default: /* Ignore everything else? */ break; @@ -118,8 +118,8 @@ static void *mttcg_cpu_thread_fn(void *arg) qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); - tcg_cpus_destroy(cpu); - qemu_mutex_unlock_iothread(); + tcg_cpu_destroy(cpu); + bql_unlock(); rcu_remove_force_rcu_notifier(&force_rcu.notifier); rcu_unregister_thread(); return NULL; diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index 611932f3c3a..894e73e52cb 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -109,9 +109,9 @@ static void rr_wait_io_event(void) { CPUState *cpu; - while (all_cpu_threads_idle()) { + while (all_cpu_threads_idle() && replay_can_wait()) { rr_stop_kick_timer(); - qemu_cond_wait_iothread(first_cpu->halt_cond); + qemu_cond_wait_bql(first_cpu->halt_cond); } rr_start_kick_timer(); @@ -131,7 +131,7 @@ static void rr_deal_with_unplugged_cpus(void) CPU_FOREACH(cpu) { if (cpu->unplug && !cpu_can_run(cpu)) { - tcg_cpus_destroy(cpu); + tcg_cpu_destroy(cpu); break; } } @@ -188,7 +188,7 @@ static void *rr_cpu_thread_fn(void *arg) rcu_add_force_rcu_notifier(&force_rcu); tcg_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); @@ -198,7 +198,7 @@ static void *rr_cpu_thread_fn(void *arg) /* wait for initial kick-off after machine start */ while (first_cpu->stopped) { - qemu_cond_wait_iothread(first_cpu->halt_cond); + qemu_cond_wait_bql(first_cpu->halt_cond); /* process any pending work */ CPU_FOREACH(cpu) { @@ -218,9 +218,9 @@ static void *rr_cpu_thread_fn(void *arg) /* Only used for icount_enabled() */ int64_t cpu_budget = 0; - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); if (icount_enabled()) { int cpu_count = rr_cpu_count(); @@ -254,23 +254,23 @@ static void *rr_cpu_thread_fn(void *arg) if (cpu_can_run(cpu)) { int r; - qemu_mutex_unlock_iothread(); + bql_unlock(); if (icount_enabled()) { icount_prepare_for_run(cpu, cpu_budget); } - r = tcg_cpus_exec(cpu); + r = tcg_cpu_exec(cpu); if (icount_enabled()) { icount_process_data(cpu); } - qemu_mutex_lock_iothread(); + bql_lock(); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); break; } else if (r == EXCP_ATOMIC) { - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_step_atomic(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); break; } } else if (cpu->stop) { diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index 1b572906820..9c957f421c7 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -63,12 +63,12 @@ void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) cpu->tcg_cflags |= cflags; } -void tcg_cpus_destroy(CPUState *cpu) +void tcg_cpu_destroy(CPUState *cpu) { cpu_thread_signal_destroyed(cpu); } -int tcg_cpus_exec(CPUState *cpu) +int tcg_cpu_exec(CPUState *cpu) { int ret; assert(tcg_enabled()); @@ -88,7 +88,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu) /* mask must never be zero, except for A20 change call */ void tcg_handle_interrupt(CPUState *cpu, int mask) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); cpu->interrupt_request |= mask; diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h index f9bc6330e2d..44c4079972a 100644 --- a/accel/tcg/tcg-accel-ops.h +++ b/accel/tcg/tcg-accel-ops.h @@ -14,8 +14,8 @@ #include "sysemu/cpus.h" -void tcg_cpus_destroy(CPUState *cpu); -int tcg_cpus_exec(CPUState *cpu); +void tcg_cpu_destroy(CPUState *cpu); +int tcg_cpu_exec(CPUState *cpu); void tcg_handle_interrupt(CPUState *cpu, int mask); void tcg_cpu_init_cflags(CPUState *cpu, bool parallel); diff --git a/accel/tcg/tcg-runtime-sym-vec.c b/accel/tcg/tcg-runtime-sym-vec.c index e698f76cfe1..ce86a961d8d 100644 --- a/accel/tcg/tcg-runtime-sym-vec.c +++ b/accel/tcg/tcg-runtime-sym-vec.c @@ -96,10 +96,26 @@ static void *build_expression_for_vector_vector_op( uint64_t vector_size, uint64_t vece, void *(*symbolic_operation)(void *, void *) ) { - g_assert(vector_size == 64 || vector_size == 128 || vector_size == 256); uint64_t element_size = vece_element_size(vece); - g_assert(element_size <= vector_size); + g_assert(vector_size % element_size == 0); + uint64_t element_count = vector_size / element_size; + + static void** arg1_elts = NULL; + static void** arg2_elts = NULL; + + static uint64_t arg_len = 0; + + uint64_t arg_len_required = element_count * sizeof(void*); + if (arg_len < arg_len_required) { + arg1_elts = g_realloc(arg1_elts, arg_len_required); + arg2_elts = g_realloc(arg2_elts, arg_len_required); + + arg_len = arg_len_required; + } + + g_assert(vector_size == 64 || vector_size == 128 || vector_size == 256); + g_assert(element_size <= vector_size); if (arg1_symbolic == NULL && arg2_symbolic == NULL) { return NULL; @@ -116,13 +132,10 @@ static void *build_expression_for_vector_vector_op( g_assert(_sym_bits_helper(arg1_symbolic) == _sym_bits_helper(arg2_symbolic) && _sym_bits_helper(arg1_symbolic) == vector_size); - uint64_t element_count = vector_size / element_size; - void *arg1_elements[element_count]; - void *arg2_elements[element_count]; - split_expression(arg1_symbolic, element_count, element_size, arg1_elements); - split_expression(arg2_symbolic, element_count, element_size, arg2_elements); + split_expression(arg1_symbolic, element_count, element_size, arg1_elts); + split_expression(arg2_symbolic, element_count, element_size, arg2_elts); - void *result = apply_op_and_merge(symbolic_operation, arg1_elements, arg2_elements, element_count); + void *result = apply_op_and_merge(symbolic_operation, arg1_elts, arg2_elts, element_count); g_assert(_sym_bits_helper(result) == vector_size); return result; } @@ -150,10 +163,26 @@ build_expression_for_vector_int32_op( uint64_t vector_size, uint64_t vece, void *(*symbolic_operation)(void *, void *) ) { - g_assert(vector_size == 64 || vector_size == 128 || vector_size == 256); uint64_t element_size = vece_element_size(vece); - g_assert(element_size <= vector_size); + g_assert(vector_size % element_size == 0); + uint64_t element_count = vector_size / element_size; + + static void** arg1_elts = NULL; + static void** arg2_elts = NULL; + + static uint64_t arg_len = 0; + + uint64_t arg_len_required = element_count * sizeof(void*); + if (arg_len < arg_len_required) { + arg1_elts = g_realloc(arg1_elts, arg_len_required); + arg2_elts = g_realloc(arg2_elts, arg_len_required); + + arg_len = arg_len_required; + } + + g_assert(vector_size == 64 || vector_size == 128 || vector_size == 256); + g_assert(element_size <= vector_size); if (arg1_symbolic == NULL && arg2_symbolic == NULL) { return NULL; @@ -174,15 +203,12 @@ build_expression_for_vector_int32_op( arg2_symbolic = _sym_build_zext(arg2_symbolic, element_size - 32); } - uint64_t element_count = vector_size / element_size; - void *arg1_elements[element_count]; - void *arg2_elements[element_count]; - split_expression(arg1_symbolic, element_count, element_size, arg1_elements); + split_expression(arg1_symbolic, element_count, element_size, arg1_elts); for (uint64_t i = 0; i < element_count; i++) { - arg2_elements[i] = arg2_symbolic; + arg2_elts[i] = arg2_symbolic; } - void *result = apply_op_and_merge(symbolic_operation, arg1_elements, arg2_elements, element_count); + void *result = apply_op_and_merge(symbolic_operation, arg1_elts, arg2_elts, element_count); g_assert(_sym_bits_helper(result) == vector_size); return result; } @@ -390,8 +416,25 @@ void *HELPER(sym_cmp_vec)( /* TCGCond */ uint32_t comparison_operator, void *result_concrete, uint64_t vector_size, uint64_t vece ) { - g_assert(vector_size == 64 || vector_size == 128 || vector_size == 256); uint64_t element_size = vece_element_size(vece); + + g_assert(vector_size % element_size == 0); + uint64_t element_count = vector_size / element_size; + + static void** arg1_elts = NULL; + static void** arg2_elts = NULL; + + static uint64_t arg_len = 0; + + uint64_t arg_len_required = element_count * sizeof(void*); + if (arg_len < arg_len_required) { + arg1_elts = g_realloc(arg1_elts, arg_len_required); + arg2_elts = g_realloc(arg2_elts, arg_len_required); + + arg_len = arg_len_required; + } + + g_assert(vector_size == 64 || vector_size == 128 || vector_size == 256); g_assert(element_size <= vector_size); g_assert(vector_size % element_size == 0); @@ -410,13 +453,8 @@ void *HELPER(sym_cmp_vec)( g_assert(_sym_bits_helper(arg1_symbolic) == _sym_bits_helper(arg2_symbolic) && _sym_bits_helper(arg1_symbolic) == vector_size); - uint64_t element_count = vector_size / element_size; - - void *arg1_elements[element_count]; - void *arg2_elements[element_count]; - - split_expression(arg1_symbolic, element_count, element_size, arg1_elements); - split_expression(arg2_symbolic, element_count, element_size, arg2_elements); + split_expression(arg1_symbolic, element_count, element_size, arg1_elts); + split_expression(arg2_symbolic, element_count, element_size, arg2_elts); for (uint64_t i = 0; i < element_count; i++) { /* For each element, the comparison was true iff the element of the result is equal to -1. @@ -424,8 +462,8 @@ void *HELPER(sym_cmp_vec)( uint8_t is_taken = *(uint8_t *) element_address(result_concrete, i, element_size, vector_size); build_and_push_path_constraint( env, - arg1_elements[i], - arg2_elements[i], + arg1_elts[i], + arg2_elts[i], comparison_operator, is_taken ); @@ -471,10 +509,38 @@ void *HELPER(sym_ternary_vec)( /* TCGCond */ uint32_t comparison_operator, void *concrete_result, uint64_t vector_size, uint64_t vece ) { - g_assert(vector_size == 64 || vector_size == 128 || vector_size == 256); uint64_t element_size = vece_element_size(vece); - g_assert(element_size <= vector_size); + g_assert(vector_size % element_size == 0); + uint64_t element_count = vector_size / element_size; + + static void** arg1_elts = NULL; + static void** arg2_elts = NULL; + + /* For each element, the condition of the ternary was true iff the element of the result is equal to the element + * of arg1. */ + static int* concrete_condition_was_true = NULL; + + static uint64_t arg_len = 0; + static uint64_t concrete_condition_len = 0; + + uint64_t arg_len_required = element_count * sizeof(void*); + if (arg_len < arg_len_required) { + arg1_elts = g_realloc(arg1_elts, arg_len_required); + arg2_elts = g_realloc(arg2_elts, arg_len_required); + + arg_len = arg_len_required; + } + + uint64_t concrete_condition_len_required = element_count * sizeof(int); + if (concrete_condition_len < concrete_condition_len_required) { + concrete_condition_was_true = g_realloc(concrete_condition_was_true, concrete_condition_len_required); + + concrete_condition_len = concrete_condition_len_required; + } + + g_assert(vector_size == 64 || vector_size == 128 || vector_size == 256); + g_assert(element_size <= vector_size); if (arg1_symbolic == NULL && arg2_symbolic == NULL) { return NULL; @@ -491,17 +557,8 @@ void *HELPER(sym_ternary_vec)( g_assert(_sym_bits_helper(arg1_symbolic) == _sym_bits_helper(arg2_symbolic) && _sym_bits_helper(arg1_symbolic) == vector_size); - uint64_t element_count = vector_size / element_size; - - void *arg1_elements[element_count]; - void *arg2_elements[element_count]; - - split_expression(arg1_symbolic, element_count, element_size, arg1_elements); - split_expression(arg2_symbolic, element_count, element_size, arg2_elements); - - /* For each element, the condition of the ternary was true iff the element of the result is equal to the element - * of arg1. */ - int concrete_condition_was_true[element_count]; + split_expression(arg1_symbolic, element_count, element_size, arg1_elts); + split_expression(arg2_symbolic, element_count, element_size, arg2_elts); for (int i = 0; i < element_count; i++) { void *result_element_ptr = element_address(concrete_result, i, element_size, vector_size); @@ -512,16 +569,16 @@ void *HELPER(sym_ternary_vec)( for (int i = 0; i < element_count; i++) { build_and_push_path_constraint( env, - arg1_elements[i], - arg2_elements[i], + arg1_elts[i], + arg2_elts[i], comparison_operator, concrete_condition_was_true[i] ); } - void *result_expression = concrete_condition_was_true[0] ? arg1_elements[0] : arg2_elements[0]; + void *result_expression = concrete_condition_was_true[0] ? arg1_elts[0] : arg2_elts[0]; for (int i = 1; i < element_count; i++) { - result_expression = _sym_concat_helper(concrete_condition_was_true[i] ? arg1_elements[i] : arg2_elements[i], + result_expression = _sym_concat_helper(concrete_condition_was_true[i] ? arg1_elts[i] : arg2_elts[i], result_expression); } diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index c1708afcb02..661252d92d9 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -63,7 +63,7 @@ #include "tb-context.h" #include "internal-common.h" #include "internal-target.h" -#include "perf.h" +#include "tcg/perf.h" #include "tcg/insn-start-words.h" TBContext tb_ctx; @@ -256,7 +256,6 @@ bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data) void page_init(void) { - page_size_init(); page_table_config_init(); } @@ -633,7 +632,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n; if (qemu_loglevel_mask(CPU_LOG_EXEC)) { - vaddr pc = log_pc(cpu, tb); + vaddr pc = cpu->cc->get_pc(cpu); if (qemu_log_in_addr_range(pc)) { qemu_log("cpu_io_recompile: rewound execution of TB to %016" VADDR_PRIx "\n", pc); @@ -647,7 +646,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) void cpu_interrupt(CPUState *cpu, int mask) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); cpu->interrupt_request |= mask; qatomic_set(&cpu->neg.icount_decr.u16.high, -1); } diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c index 0a783be91c2..762f84c8311 100644 --- a/accel/tcg/translator.c +++ b/accel/tcg/translator.c @@ -18,20 +18,14 @@ static void set_can_do_io(DisasContextBase *db, bool val) { - if (db->saved_can_do_io != val) { - db->saved_can_do_io = val; - - QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1); - tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env, - offsetof(ArchCPU, parent_obj.neg.can_do_io) - - offsetof(ArchCPU, env)); - } + QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1); + tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env, + offsetof(ArchCPU, parent_obj.neg.can_do_io) - + offsetof(ArchCPU, env)); } bool translator_io_start(DisasContextBase *db) { - set_can_do_io(db, true); - /* * Ensure that this instruction will be the last in the TB. * The target may override this to something more forceful. @@ -84,13 +78,6 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags) - offsetof(ArchCPU, env)); } - /* - * cpu->neg.can_do_io is set automatically here at the beginning of - * each translation block. The cost is minimal, plus it would be - * very easy to forget doing it in the translator. - */ - set_can_do_io(db, db->max_insns == 1); - TCGv_i64 block = tcg_constant_i64((int64_t) db->tb); gen_helper_sym_notify_block(block); // tcg_temp_free_i64(block); TODO: free is reserved for internal now, is it ok in that case? @@ -133,6 +120,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, { uint32_t cflags = tb_cflags(tb); TCGOp *icount_start_insn; + TCGOp *first_insn_start = NULL; bool plugin_enabled; /* Initialize DisasContext */ @@ -143,7 +131,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, db->num_insns = 0; db->max_insns = *max_insns; db->singlestep_enabled = cflags & CF_SINGLE_STEP; - db->saved_can_do_io = -1; + db->insn_start = NULL; db->host_addr[0] = host_pc; db->host_addr[1] = NULL; @@ -161,6 +149,10 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, while (true) { *max_insns = ++db->num_insns; ops->insn_start(db, cpu); + db->insn_start = tcg_last_op(); + if (first_insn_start == NULL) { + first_insn_start = db->insn_start; + } tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ if (plugin_enabled) { @@ -173,10 +165,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, * done next -- either exiting this loop or locate the start of * the next instruction. */ - if (db->num_insns == db->max_insns) { - /* Accept I/O on the last instruction. */ - set_can_do_io(db, true); - } ops->translate_insn(db, cpu); /* @@ -209,6 +197,21 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, ops->tb_stop(db, cpu); gen_tb_end(tb, cflags, icount_start_insn, db->num_insns); + /* + * Manage can_do_io for the translation block: set to false before + * the first insn and set to true before the last insn. + */ + if (db->num_insns == 1) { + tcg_debug_assert(first_insn_start == db->insn_start); + } else { + tcg_debug_assert(first_insn_start != db->insn_start); + tcg_ctx->emit_before_op = first_insn_start; + set_can_do_io(db, false); + } + tcg_ctx->emit_before_op = db->insn_start; + set_can_do_io(db, true); + tcg_ctx->emit_before_op = NULL; + if (plugin_enabled) { plugin_gen_tb_end(cpu, db->num_insns); } diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 68b252cb8e8..3cac3a78c4c 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -651,16 +651,17 @@ void page_protect(tb_page_addr_t address) { PageFlagsNode *p; target_ulong start, last; + int host_page_size = qemu_real_host_page_size(); int prot; assert_memory_lock(); - if (qemu_host_page_size <= TARGET_PAGE_SIZE) { + if (host_page_size <= TARGET_PAGE_SIZE) { start = address & TARGET_PAGE_MASK; last = start + TARGET_PAGE_SIZE - 1; } else { - start = address & qemu_host_page_mask; - last = start + qemu_host_page_size - 1; + start = address & -host_page_size; + last = start + host_page_size - 1; } p = pageflags_find(start, last); @@ -671,7 +672,7 @@ void page_protect(tb_page_addr_t address) if (unlikely(p->itree.last < last)) { /* More than one protection region covers the one host page. */ - assert(TARGET_PAGE_SIZE < qemu_host_page_size); + assert(TARGET_PAGE_SIZE < host_page_size); while ((p = pageflags_next(p, start, last)) != NULL) { prot |= p->flags; } @@ -679,7 +680,7 @@ void page_protect(tb_page_addr_t address) if (prot & PAGE_WRITE) { pageflags_set_clear(start, last, 0, PAGE_WRITE); - mprotect(g2h_untagged(start), qemu_host_page_size, + mprotect(g2h_untagged(start), last - start + 1, prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE); } } @@ -725,18 +726,19 @@ int page_unprotect(target_ulong address, uintptr_t pc) } #endif } else { + int host_page_size = qemu_real_host_page_size(); target_ulong start, len, i; int prot; - if (qemu_host_page_size <= TARGET_PAGE_SIZE) { + if (host_page_size <= TARGET_PAGE_SIZE) { start = address & TARGET_PAGE_MASK; len = TARGET_PAGE_SIZE; prot = p->flags | PAGE_WRITE; pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0); current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc); } else { - start = address & qemu_host_page_mask; - len = qemu_host_page_size; + start = address & -host_page_size; + len = host_page_size; prot = 0; for (i = 0; i < len; i += TARGET_PAGE_SIZE) { @@ -862,7 +864,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, typedef struct TargetPageDataNode { struct rcu_head rcu; IntervalTreeNode itree; - char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned)); + char data[] __attribute__((aligned)); } TargetPageDataNode; static IntervalTreeRoot targetdata_root; @@ -900,7 +902,8 @@ void page_reset_target_data(target_ulong start, target_ulong last) n_last = MIN(last, n->last); p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS; - memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE); + memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0, + p_len * TARGET_PAGE_DATA_SIZE); } } @@ -908,7 +911,7 @@ void *page_get_target_data(target_ulong address) { IntervalTreeNode *n; TargetPageDataNode *t; - target_ulong page, region; + target_ulong page, region, p_ofs; page = address & TARGET_PAGE_MASK; region = address & TBD_MASK; @@ -924,7 +927,8 @@ void *page_get_target_data(target_ulong address) mmap_lock(); n = interval_tree_iter_first(&targetdata_root, page, page); if (!n) { - t = g_new0(TargetPageDataNode, 1); + t = g_malloc0(sizeof(TargetPageDataNode) + + TPD_PAGES * TARGET_PAGE_DATA_SIZE); n = &t->itree; n->start = region; n->last = region | ~TBD_MASK; @@ -934,7 +938,8 @@ void *page_get_target_data(target_ulong address) } t = container_of(n, TargetPageDataNode, itree); - return t->data[(page - region) >> TARGET_PAGE_BITS]; + p_ofs = (page - region) >> TARGET_PAGE_BITS; + return t->data + p_ofs * TARGET_PAGE_DATA_SIZE; } #else void page_reset_target_data(target_ulong start, target_ulong last) { } diff --git a/accel/tcg/watchpoint.c b/accel/tcg/watchpoint.c new file mode 100644 index 00000000000..d3aab114588 --- /dev/null +++ b/accel/tcg/watchpoint.c @@ -0,0 +1,143 @@ +/* + * CPU watchpoints + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/error-report.h" +#include "exec/exec-all.h" +#include "exec/translate-all.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "hw/core/tcg-cpu-ops.h" +#include "hw/core/cpu.h" + +/* + * Return true if this watchpoint address matches the specified + * access (ie the address range covered by the watchpoint overlaps + * partially or completely with the address range covered by the + * access). + */ +static inline bool watchpoint_address_matches(CPUWatchpoint *wp, + vaddr addr, vaddr len) +{ + /* + * We know the lengths are non-zero, but a little caution is + * required to avoid errors in the case where the range ends + * exactly at the top of the address space and so addr + len + * wraps round to zero. + */ + vaddr wpend = wp->vaddr + wp->len - 1; + vaddr addrend = addr + len - 1; + + return !(addr > wpend || wp->vaddr > addrend); +} + +/* Return flags for watchpoints that match addr + prot. */ +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) +{ + CPUWatchpoint *wp; + int ret = 0; + + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + if (watchpoint_address_matches(wp, addr, len)) { + ret |= wp->flags; + } + } + return ret; +} + +/* Generate a debug exception if a watchpoint has been hit. */ +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs attrs, int flags, uintptr_t ra) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUWatchpoint *wp; + + assert(tcg_enabled()); + if (cpu->watchpoint_hit) { + /* + * We re-entered the check after replacing the TB. + * Now raise the debug interrupt so that it will + * trigger after the current instruction. + */ + bql_lock(); + cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); + bql_unlock(); + return; + } + + if (cc->tcg_ops->adjust_watchpoint_address) { + /* this is currently used only by ARM BE32 */ + addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len); + } + + assert((flags & ~BP_MEM_ACCESS) == 0); + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + int hit_flags = wp->flags & flags; + + if (hit_flags && watchpoint_address_matches(wp, addr, len)) { + if (replay_running_debug()) { + /* + * replay_breakpoint reads icount. + * Force recompile to succeed, because icount may + * be read only at the end of the block. + */ + if (!cpu->neg.can_do_io) { + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); + cpu_loop_exit_restore(cpu, ra); + } + /* + * Don't process the watchpoints when we are + * in a reverse debugging operation. + */ + replay_breakpoint(); + return; + } + + wp->flags |= hit_flags << BP_HIT_SHIFT; + wp->hitaddr = MAX(addr, wp->vaddr); + wp->hitattrs = attrs; + + if (wp->flags & BP_CPU + && cc->tcg_ops->debug_check_watchpoint + && !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { + wp->flags &= ~BP_WATCHPOINT_HIT; + continue; + } + cpu->watchpoint_hit = wp; + + mmap_lock(); + /* This call also restores vCPU state */ + tb_check_watchpoint(cpu, ra); + if (wp->flags & BP_STOP_BEFORE_ACCESS) { + cpu->exception_index = EXCP_DEBUG; + mmap_unlock(); + cpu_loop_exit(cpu); + } else { + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); + mmap_unlock(); + cpu_loop_exit_noexc(cpu); + } + } else { + wp->flags &= ~BP_WATCHPOINT_HIT; + } + } +} diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c index 5ff0cb8bd9e..0bdefce5375 100644 --- a/accel/xen/xen-all.c +++ b/accel/xen/xen-all.c @@ -15,6 +15,7 @@ #include "hw/xen/xen_native.h" #include "hw/xen/xen-legacy-backend.h" #include "hw/xen/xen_pt.h" +#include "hw/xen/xen_igd.h" #include "chardev/char.h" #include "qemu/accel.h" #include "sysemu/cpus.h" diff --git a/audio/audio.c b/audio/audio.c index 7ac74f9e16e..af0ae33fedb 100644 --- a/audio/audio.c +++ b/audio/audio.c @@ -1683,7 +1683,7 @@ static const VMStateDescription vmstate_audio = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_audio_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() } }; diff --git a/audio/coreaudio.m b/audio/coreaudio.m index 8cd129a27d0..ab632b9bbbb 100644 --- a/audio/coreaudio.m +++ b/audio/coreaudio.m @@ -299,7 +299,7 @@ static ret_type glue(coreaudio_, name)args_decl \ #undef COREAUDIO_WRAPPER_FUNC /* - * callback to feed audiooutput buffer. called without iothread lock. + * callback to feed audiooutput buffer. called without BQL. * allowed to lock "buf_mutex", but disallowed to have any other locks. */ static OSStatus audioDeviceIOProc( @@ -538,7 +538,7 @@ static void update_device_playback_state(coreaudioVoiceOut *core) } } -/* called without iothread lock. */ +/* called without BQL. */ static OSStatus handle_voice_change( AudioObjectID in_object_id, UInt32 in_number_addresses, @@ -547,7 +547,7 @@ static OSStatus handle_voice_change( { coreaudioVoiceOut *core = in_client_data; - qemu_mutex_lock_iothread(); + bql_lock(); if (core->outputDeviceID) { fini_out_device(core); @@ -557,7 +557,7 @@ static OSStatus handle_voice_change( update_device_playback_state(core); } - qemu_mutex_unlock_iothread(); + bql_unlock(); return 0; } diff --git a/audio/pwaudio.c b/audio/pwaudio.c index 3ce5f6507b4..3b14e04fbb0 100644 --- a/audio/pwaudio.c +++ b/audio/pwaudio.c @@ -11,7 +11,6 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "audio.h" -#include #include "qemu/error-report.h" #include "qapi/error.h" #include diff --git a/backends/Kconfig b/backends/Kconfig index f35abc16092..2cb23f62fa1 100644 --- a/backends/Kconfig +++ b/backends/Kconfig @@ -1 +1,5 @@ source tpm/Kconfig + +config IOMMUFD + bool + depends on VFIO diff --git a/backends/cryptodev-builtin.c b/backends/cryptodev-builtin.c index 39d04552807..a514bbb3101 100644 --- a/backends/cryptodev-builtin.c +++ b/backends/cryptodev-builtin.c @@ -427,7 +427,9 @@ static int cryptodev_builtin_close_session( CRYPTODEV_BACKEND_BUILTIN(backend); CryptoDevBackendBuiltinSession *session; - assert(session_id < MAX_NUM_SESSIONS && builtin->sessions[session_id]); + if (session_id >= MAX_NUM_SESSIONS || !builtin->sessions[session_id]) { + return -VIRTIO_CRYPTO_INVSESS; + } session = builtin->sessions[session_id]; if (session->cipher) { diff --git a/backends/dbus-vmstate.c b/backends/dbus-vmstate.c index a9d8cb0acd5..be6c4d8e0ae 100644 --- a/backends/dbus-vmstate.c +++ b/backends/dbus-vmstate.c @@ -393,7 +393,7 @@ static const VMStateDescription dbus_vmstate = { .version_id = 0, .pre_save = dbus_vmstate_pre_save, .post_load = dbus_vmstate_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(data_size, DBusVMState), VMSTATE_VBUFFER_ALLOC_UINT32(data, DBusVMState, 0, 0, data_size), VMSTATE_END_OF_LIST() diff --git a/backends/hostmem-epc.c b/backends/hostmem-epc.c index 4e162d6789e..735e2e1cf84 100644 --- a/backends/hostmem-epc.c +++ b/backends/hostmem-epc.c @@ -17,31 +17,29 @@ #include "sysemu/hostmem.h" #include "hw/i386/hostmem-epc.h" -static void +static bool sgx_epc_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { + g_autofree char *name = NULL; uint32_t ram_flags; - char *name; int fd; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); if (fd < 0) { error_setg_errno(errp, errno, "failed to open /dev/sgx_vepc to alloc SGX EPC"); - return; + return false; } name = object_get_canonical_path(OBJECT(backend)); ram_flags = (backend->share ? RAM_SHARED : 0) | RAM_PROTECTED; - memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), - name, backend->size, ram_flags, - fd, 0, errp); - g_free(name); + return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, + backend->size, ram_flags, fd, 0, errp); } static void sgx_epc_backend_instance_init(Object *obj) diff --git a/backends/hostmem-file.c b/backends/hostmem-file.c index 361d4a8103e..ac3e433cbdd 100644 --- a/backends/hostmem-file.c +++ b/backends/hostmem-file.c @@ -36,24 +36,25 @@ struct HostMemoryBackendFile { OnOffAuto rom; }; -static void +static bool file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { #ifndef CONFIG_POSIX error_setg(errp, "backend '%s' not supported on this host", object_get_typename(OBJECT(backend))); + return false; #else HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(backend); + g_autofree gchar *name = NULL; uint32_t ram_flags; - gchar *name; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } if (!fb->mem_path) { error_setg(errp, "mem-path property not set"); - return; + return false; } switch (fb->rom) { @@ -65,18 +66,18 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) if (!fb->readonly) { error_setg(errp, "property 'rom' = 'on' is not supported with" " 'readonly' = 'off'"); - return; + return false; } break; case ON_OFF_AUTO_OFF: if (fb->readonly && backend->share) { error_setg(errp, "property 'rom' = 'off' is incompatible with" " 'readonly' = 'on' and 'share' = 'on'"); - return; + return false; } break; default: - assert(false); + g_assert_not_reached(); } name = host_memory_backend_get_name(backend); @@ -86,10 +87,9 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; ram_flags |= fb->is_pmem ? RAM_PMEM : 0; ram_flags |= RAM_NAMED_FILE; - memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name, - backend->size, fb->align, ram_flags, - fb->mem_path, fb->offset, errp); - g_free(name); + return memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name, + backend->size, fb->align, ram_flags, + fb->mem_path, fb->offset, errp); #endif } diff --git a/backends/hostmem-memfd.c b/backends/hostmem-memfd.c index 3fc85c3db81..3923ea9364d 100644 --- a/backends/hostmem-memfd.c +++ b/backends/hostmem-memfd.c @@ -31,17 +31,17 @@ struct HostMemoryBackendMemfd { bool seal; }; -static void +static bool memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(backend); + g_autofree char *name = NULL; uint32_t ram_flags; - char *name; int fd; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } fd = qemu_memfd_create(TYPE_MEMORY_BACKEND_MEMFD, backend->size, @@ -49,15 +49,14 @@ memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL : 0, errp); if (fd == -1) { - return; + return false; } name = host_memory_backend_get_name(backend); ram_flags = backend->share ? RAM_SHARED : 0; ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; - memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, - backend->size, ram_flags, fd, 0, errp); - g_free(name); + return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, + backend->size, ram_flags, fd, 0, errp); } static bool diff --git a/backends/hostmem-ram.c b/backends/hostmem-ram.c index b8e55cdbd0f..d121249f0f4 100644 --- a/backends/hostmem-ram.c +++ b/backends/hostmem-ram.c @@ -16,23 +16,23 @@ #include "qemu/module.h" #include "qom/object_interfaces.h" -static void +static bool ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { + g_autofree char *name = NULL; uint32_t ram_flags; - char *name; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } name = host_memory_backend_get_name(backend); ram_flags = backend->share ? RAM_SHARED : 0; ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; - memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend), name, - backend->size, ram_flags, errp); - g_free(name); + return memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend), + name, backend->size, + ram_flags, errp); } static void diff --git a/backends/hostmem.c b/backends/hostmem.c index 747e7838c03..81a72ce40b7 100644 --- a/backends/hostmem.c +++ b/backends/hostmem.c @@ -20,6 +20,7 @@ #include "qom/object_interfaces.h" #include "qemu/mmap-alloc.h" #include "qemu/madvise.h" +#include "hw/qdev-core.h" #ifdef CONFIG_NUMA #include @@ -219,7 +220,6 @@ static bool host_memory_backend_get_prealloc(Object *obj, Error **errp) static void host_memory_backend_set_prealloc(Object *obj, bool value, Error **errp) { - Error *local_err = NULL; HostMemoryBackend *backend = MEMORY_BACKEND(obj); if (!backend->reserve && value) { @@ -237,10 +237,8 @@ static void host_memory_backend_set_prealloc(Object *obj, bool value, void *ptr = memory_region_get_ram_ptr(&backend->mr); uint64_t sz = memory_region_size(&backend->mr); - qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads, - backend->prealloc_context, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads, + backend->prealloc_context, false, errp)) { return; } backend->prealloc = true; @@ -324,91 +322,92 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) { HostMemoryBackend *backend = MEMORY_BACKEND(uc); HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc); - Error *local_err = NULL; void *ptr; uint64_t sz; + bool async = !phase_check(PHASE_LATE_BACKENDS_CREATED); - if (bc->alloc) { - bc->alloc(backend, &local_err); - if (local_err) { - goto out; - } + if (!bc->alloc) { + return; + } + if (!bc->alloc(backend, errp)) { + return; + } - ptr = memory_region_get_ram_ptr(&backend->mr); - sz = memory_region_size(&backend->mr); + ptr = memory_region_get_ram_ptr(&backend->mr); + sz = memory_region_size(&backend->mr); - if (backend->merge) { - qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); - } - if (!backend->dump) { - qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); - } + if (backend->merge) { + qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); + } + if (!backend->dump) { + qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); + } #ifdef CONFIG_NUMA - unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); - /* lastbit == MAX_NODES means maxnode = 0 */ - unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); - /* ensure policy won't be ignored in case memory is preallocated - * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so - * this doesn't catch hugepage case. */ - unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; - int mode = backend->policy; - - /* check for invalid host-nodes and policies and give more verbose - * error messages than mbind(). */ - if (maxnode && backend->policy == MPOL_DEFAULT) { - error_setg(errp, "host-nodes must be empty for policy default," - " or you should explicitly specify a policy other" - " than default"); - return; - } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { - error_setg(errp, "host-nodes must be set for policy %s", - HostMemPolicy_str(backend->policy)); - return; - } + unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); + /* lastbit == MAX_NODES means maxnode = 0 */ + unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); + /* + * Ensure policy won't be ignored in case memory is preallocated + * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so + * this doesn't catch hugepage case. + */ + unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; + int mode = backend->policy; + + /* check for invalid host-nodes and policies and give more verbose + * error messages than mbind(). */ + if (maxnode && backend->policy == MPOL_DEFAULT) { + error_setg(errp, "host-nodes must be empty for policy default," + " or you should explicitly specify a policy other" + " than default"); + return; + } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { + error_setg(errp, "host-nodes must be set for policy %s", + HostMemPolicy_str(backend->policy)); + return; + } - /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1 - * as argument to mbind() due to an old Linux bug (feature?) which - * cuts off the last specified node. This means backend->host_nodes - * must have MAX_NODES+1 bits available. - */ - assert(sizeof(backend->host_nodes) >= - BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); - assert(maxnode <= MAX_NODES); + /* + * We can have up to MAX_NODES nodes, but we need to pass maxnode+1 + * as argument to mbind() due to an old Linux bug (feature?) which + * cuts off the last specified node. This means backend->host_nodes + * must have MAX_NODES+1 bits available. + */ + assert(sizeof(backend->host_nodes) >= + BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); + assert(maxnode <= MAX_NODES); #ifdef HAVE_NUMA_HAS_PREFERRED_MANY - if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) { - /* - * Replace with MPOL_PREFERRED_MANY otherwise the mbind() below - * silently picks the first node. - */ - mode = MPOL_PREFERRED_MANY; - } + if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) { + /* + * Replace with MPOL_PREFERRED_MANY otherwise the mbind() below + * silently picks the first node. + */ + mode = MPOL_PREFERRED_MANY; + } #endif - if (maxnode && - mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) { - if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { - error_setg_errno(errp, errno, - "cannot bind memory to host NUMA nodes"); - return; - } + if (maxnode && + mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) { + if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { + error_setg_errno(errp, errno, + "cannot bind memory to host NUMA nodes"); + return; } + } #endif - /* Preallocate memory after the NUMA policy has been instantiated. - * This is necessary to guarantee memory is allocated with - * specified NUMA policy in place. - */ - if (backend->prealloc) { - qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz, - backend->prealloc_threads, - backend->prealloc_context, &local_err); - if (local_err) { - goto out; - } - } + /* + * Preallocate memory after the NUMA policy has been instantiated. + * This is necessary to guarantee memory is allocated with + * specified NUMA policy in place. + */ + if (backend->prealloc && !qemu_prealloc_mem(memory_region_get_fd(&backend->mr), + ptr, sz, + backend->prealloc_threads, + backend->prealloc_context, + async, errp)) { + return; } -out: - error_propagate(errp, local_err); } static bool diff --git a/backends/iommufd.c b/backends/iommufd.c new file mode 100644 index 00000000000..62a79fa6b04 --- /dev/null +++ b/backends/iommufd.c @@ -0,0 +1,234 @@ +/* + * iommufd container backend + * + * Copyright (C) 2023 Intel Corporation. + * Copyright Red Hat, Inc. 2023 + * + * Authors: Yi Liu + * Eric Auger + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "sysemu/iommufd.h" +#include "qapi/error.h" +#include "qapi/qmp/qerror.h" +#include "qemu/module.h" +#include "qom/object_interfaces.h" +#include "qemu/error-report.h" +#include "monitor/monitor.h" +#include "trace.h" +#include +#include + +static void iommufd_backend_init(Object *obj) +{ + IOMMUFDBackend *be = IOMMUFD_BACKEND(obj); + + be->fd = -1; + be->users = 0; + be->owned = true; +} + +static void iommufd_backend_finalize(Object *obj) +{ + IOMMUFDBackend *be = IOMMUFD_BACKEND(obj); + + if (be->owned) { + close(be->fd); + be->fd = -1; + } +} + +static void iommufd_backend_set_fd(Object *obj, const char *str, Error **errp) +{ + ERRP_GUARD(); + IOMMUFDBackend *be = IOMMUFD_BACKEND(obj); + int fd = -1; + + fd = monitor_fd_param(monitor_cur(), str, errp); + if (fd == -1) { + error_prepend(errp, "Could not parse remote object fd %s:", str); + return; + } + be->fd = fd; + be->owned = false; + trace_iommu_backend_set_fd(be->fd); +} + +static bool iommufd_backend_can_be_deleted(UserCreatable *uc) +{ + IOMMUFDBackend *be = IOMMUFD_BACKEND(uc); + + return !be->users; +} + +static void iommufd_backend_class_init(ObjectClass *oc, void *data) +{ + UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); + + ucc->can_be_deleted = iommufd_backend_can_be_deleted; + + object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd); +} + +int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp) +{ + int fd, ret = 0; + + if (be->owned && !be->users) { + fd = qemu_open_old("/dev/iommu", O_RDWR); + if (fd < 0) { + error_setg_errno(errp, errno, "/dev/iommu opening failed"); + ret = fd; + goto out; + } + be->fd = fd; + } + be->users++; +out: + trace_iommufd_backend_connect(be->fd, be->owned, + be->users, ret); + return ret; +} + +void iommufd_backend_disconnect(IOMMUFDBackend *be) +{ + if (!be->users) { + goto out; + } + be->users--; + if (!be->users && be->owned) { + close(be->fd); + be->fd = -1; + } +out: + trace_iommufd_backend_disconnect(be->fd, be->users); +} + +int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id, + Error **errp) +{ + int ret, fd = be->fd; + struct iommu_ioas_alloc alloc_data = { + .size = sizeof(alloc_data), + .flags = 0, + }; + + ret = ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data); + if (ret) { + error_setg_errno(errp, errno, "Failed to allocate ioas"); + return ret; + } + + *ioas_id = alloc_data.out_ioas_id; + trace_iommufd_backend_alloc_ioas(fd, *ioas_id, ret); + + return ret; +} + +void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id) +{ + int ret, fd = be->fd; + struct iommu_destroy des = { + .size = sizeof(des), + .id = id, + }; + + ret = ioctl(fd, IOMMU_DESTROY, &des); + trace_iommufd_backend_free_id(fd, id, ret); + if (ret) { + error_report("Failed to free id: %u %m", id); + } +} + +int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly) +{ + int ret, fd = be->fd; + struct iommu_ioas_map map = { + .size = sizeof(map), + .flags = IOMMU_IOAS_MAP_READABLE | + IOMMU_IOAS_MAP_FIXED_IOVA, + .ioas_id = ioas_id, + .__reserved = 0, + .user_va = (uintptr_t)vaddr, + .iova = iova, + .length = size, + }; + + if (!readonly) { + map.flags |= IOMMU_IOAS_MAP_WRITEABLE; + } + + ret = ioctl(fd, IOMMU_IOAS_MAP, &map); + trace_iommufd_backend_map_dma(fd, ioas_id, iova, size, + vaddr, readonly, ret); + if (ret) { + ret = -errno; + + /* TODO: Not support mapping hardware PCI BAR region for now. */ + if (errno == EFAULT) { + warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?"); + } else { + error_report("IOMMU_IOAS_MAP failed: %m"); + } + } + return ret; +} + +int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, + hwaddr iova, ram_addr_t size) +{ + int ret, fd = be->fd; + struct iommu_ioas_unmap unmap = { + .size = sizeof(unmap), + .ioas_id = ioas_id, + .iova = iova, + .length = size, + }; + + ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap); + /* + * IOMMUFD takes mapping as some kind of object, unmapping + * nonexistent mapping is treated as deleting a nonexistent + * object and return ENOENT. This is different from legacy + * backend which allows it. vIOMMU may trigger a lot of + * redundant unmapping, to avoid flush the log, treat them + * as succeess for IOMMUFD just like legacy backend. + */ + if (ret && errno == ENOENT) { + trace_iommufd_backend_unmap_dma_non_exist(fd, ioas_id, iova, size, ret); + ret = 0; + } else { + trace_iommufd_backend_unmap_dma(fd, ioas_id, iova, size, ret); + } + + if (ret) { + ret = -errno; + error_report("IOMMU_IOAS_UNMAP failed: %m"); + } + return ret; +} + +static const TypeInfo iommufd_backend_info = { + .name = TYPE_IOMMUFD_BACKEND, + .parent = TYPE_OBJECT, + .instance_size = sizeof(IOMMUFDBackend), + .instance_init = iommufd_backend_init, + .instance_finalize = iommufd_backend_finalize, + .class_size = sizeof(IOMMUFDBackendClass), + .class_init = iommufd_backend_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_USER_CREATABLE }, + { } + } +}; + +static void register_types(void) +{ + type_register_static(&iommufd_backend_info); +} + +type_init(register_types); diff --git a/backends/meson.build b/backends/meson.build index 914c7c4afb9..8b2b111497f 100644 --- a/backends/meson.build +++ b/backends/meson.build @@ -10,9 +10,13 @@ system_ss.add([files( 'confidential-guest-support.c', ), numa]) -system_ss.add(when: 'CONFIG_POSIX', if_true: files('rng-random.c')) -system_ss.add(when: 'CONFIG_POSIX', if_true: files('hostmem-file.c')) -system_ss.add(when: 'CONFIG_LINUX', if_true: files('hostmem-memfd.c')) +if host_os != 'windows' + system_ss.add(files('rng-random.c')) + system_ss.add(files('hostmem-file.c')) +endif +if host_os == 'linux' + system_ss.add(files('hostmem-memfd.c')) +endif if keyutils.found() system_ss.add(keyutils, files('cryptodev-lkcf.c')) endif @@ -20,6 +24,7 @@ if have_vhost_user system_ss.add(when: 'CONFIG_VIRTIO', if_true: files('vhost-user.c')) endif system_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c')) +system_ss.add(when: 'CONFIG_IOMMUFD', if_true: files('iommufd.c')) if have_vhost_user_crypto system_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost-user.c')) endif diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c index f7f1b4ad7a8..5a8fba9bded 100644 --- a/backends/tpm/tpm_emulator.c +++ b/backends/tpm/tpm_emulator.c @@ -904,7 +904,7 @@ static void tpm_emulator_vm_state_change(void *opaque, bool running, trace_tpm_emulator_vm_state_change(running, state); - if (!running || state != RUN_STATE_RUNNING || !tpm_emu->relock_storage) { + if (!running || !tpm_emu->relock_storage) { return; } @@ -939,7 +939,7 @@ static const VMStateDescription vmstate_tpm_emulator = { .version_id = 0, .pre_save = tpm_emulator_pre_save, .post_load = tpm_emulator_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(state_blobs.permanent_flags, TPMEmulator), VMSTATE_UINT32(state_blobs.permanent.size, TPMEmulator), VMSTATE_VBUFFER_ALLOC_UINT32(state_blobs.permanent.buffer, diff --git a/backends/trace-events b/backends/trace-events index 652eb76a572..d45c6e31a67 100644 --- a/backends/trace-events +++ b/backends/trace-events @@ -5,3 +5,13 @@ dbus_vmstate_pre_save(void) dbus_vmstate_post_load(int version_id) "version_id: %d" dbus_vmstate_loading(const char *id) "id: %s" dbus_vmstate_saving(const char *id) "id: %s" + +# iommufd.c +iommufd_backend_connect(int fd, bool owned, uint32_t users, int ret) "fd=%d owned=%d users=%d (%d)" +iommufd_backend_disconnect(int fd, uint32_t users) "fd=%d users=%d" +iommu_backend_set_fd(int fd) "pre-opened /dev/iommu fd=%d" +iommufd_backend_map_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, void *vaddr, bool readonly, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" addr=%p readonly=%d (%d)" +iommufd_backend_unmap_dma_non_exist(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " Unmap nonexistent mapping: iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)" +iommufd_backend_unmap_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)" +iommufd_backend_alloc_ioas(int iommufd, uint32_t ioas, int ret) " iommufd=%d ioas=%d (%d)" +iommufd_backend_free_id(int iommufd, uint32_t id, int ret) " iommufd=%d id=%d (%d)" diff --git a/block.c b/block.c index bfb0861ec61..468cf5e67d7 100644 --- a/block.c +++ b/block.c @@ -534,9 +534,9 @@ typedef struct CreateCo { int coroutine_fn bdrv_co_create(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { + ERRP_GUARD(); int ret; GLOBAL_STATE_CODE(); - ERRP_GUARD(); if (!drv->bdrv_co_create_opts) { error_setg(errp, "Driver '%s' does not support image creation", @@ -633,6 +633,7 @@ int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv, QemuOpts *opts, Error **errp) { + ERRP_GUARD(); BlockBackend *blk; QDict *options; int64_t size = 0; @@ -1309,11 +1310,14 @@ static void bdrv_backing_detach(BdrvChild *c) } static int bdrv_backing_update_filename(BdrvChild *c, BlockDriverState *base, - const char *filename, Error **errp) + const char *filename, + bool backing_mask_protocol, + Error **errp) { BlockDriverState *parent = c->opaque; bool read_only = bdrv_is_read_only(parent); int ret; + const char *format_name; GLOBAL_STATE_CODE(); if (read_only) { @@ -1323,9 +1327,23 @@ static int bdrv_backing_update_filename(BdrvChild *c, BlockDriverState *base, } } - ret = bdrv_change_backing_file(parent, filename, - base->drv ? base->drv->format_name : "", - false); + if (base->drv) { + /* + * If the new base image doesn't have a format driver layer, which we + * detect by the fact that @base is a protocol driver, we record + * 'raw' as the format instead of putting the protocol name as the + * backing format + */ + if (backing_mask_protocol && base->drv->protocol_name) { + format_name = "raw"; + } else { + format_name = base->drv->format_name; + } + } else { + format_name = ""; + } + + ret = bdrv_change_backing_file(parent, filename, format_name, false); if (ret < 0) { error_setg_errno(errp, -ret, "Could not update backing file link"); } @@ -1479,10 +1497,14 @@ static void GRAPH_WRLOCK bdrv_child_cb_detach(BdrvChild *child) } static int bdrv_child_cb_update_filename(BdrvChild *c, BlockDriverState *base, - const char *filename, Error **errp) + const char *filename, + bool backing_mask_protocol, + Error **errp) { if (c->role & BDRV_CHILD_COW) { - return bdrv_backing_update_filename(c, base, filename, errp); + return bdrv_backing_update_filename(c, base, filename, + backing_mask_protocol, + errp); } return 0; } @@ -1616,16 +1638,10 @@ static void bdrv_assign_node_name(BlockDriverState *bs, g_free(gen_node_name); } -/* - * The caller must always hold @bs AioContext lock, because this function calls - * bdrv_refresh_total_sectors() which polls when called from non-coroutine - * context. - */ static int no_coroutine_fn GRAPH_UNLOCKED bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name, QDict *options, int open_flags, Error **errp) { - AioContext *ctx; Error *local_err = NULL; int i, ret; GLOBAL_STATE_CODE(); @@ -1673,21 +1689,15 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name, bs->supported_read_flags |= BDRV_REQ_REGISTERED_BUF; bs->supported_write_flags |= BDRV_REQ_REGISTERED_BUF; - /* Get the context after .bdrv_open, it can change the context */ - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); - ret = bdrv_refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { error_setg_errno(errp, -ret, "Could not refresh total sector count"); - aio_context_release(ctx); return ret; } bdrv_graph_rdlock_main_loop(); bdrv_refresh_limits(bs, NULL, &local_err); bdrv_graph_rdunlock_main_loop(); - aio_context_release(ctx); if (local_err) { error_propagate(errp, local_err); @@ -1708,12 +1718,12 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name, open_failed: bs->drv = NULL; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); if (bs->file != NULL) { bdrv_unref_child(bs, bs->file); assert(!bs->file); } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_free(bs->opaque); bs->opaque = NULL; @@ -1989,6 +1999,7 @@ static int bdrv_open_common(BlockDriverState *bs, BlockBackend *file, static QDict *parse_json_filename(const char *filename, Error **errp) { + ERRP_GUARD(); QObject *options_obj; QDict *options; int ret; @@ -2908,7 +2919,7 @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm) * Replaces the node that a BdrvChild points to without updating permissions. * * If @new_bs is non-NULL, the parent of @child must already be drained through - * @child and the caller must hold the AioContext lock for @new_bs. + * @child. */ static void GRAPH_WRLOCK bdrv_replace_child_noperm(BdrvChild *child, BlockDriverState *new_bs) @@ -3048,9 +3059,8 @@ static TransactionActionDrv bdrv_attach_child_common_drv = { * * Returns new created child. * - * The caller must hold the AioContext lock for @child_bs. Both @parent_bs and - * @child_bs can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * Both @parent_bs and @child_bs can move to a different AioContext in this + * function. */ static BdrvChild * GRAPH_WRLOCK bdrv_attach_child_common(BlockDriverState *child_bs, @@ -3062,7 +3072,7 @@ bdrv_attach_child_common(BlockDriverState *child_bs, Transaction *tran, Error **errp) { BdrvChild *new_child; - AioContext *parent_ctx, *new_child_ctx; + AioContext *parent_ctx; AioContext *child_ctx = bdrv_get_aio_context(child_bs); assert(child_class->get_parent_desc); @@ -3114,12 +3124,6 @@ bdrv_attach_child_common(BlockDriverState *child_bs, } } - new_child_ctx = bdrv_get_aio_context(child_bs); - if (new_child_ctx != child_ctx) { - aio_context_release(child_ctx); - aio_context_acquire(new_child_ctx); - } - bdrv_ref(child_bs); /* * Let every new BdrvChild start with a drained parent. Inserting the child @@ -3149,20 +3153,14 @@ bdrv_attach_child_common(BlockDriverState *child_bs, }; tran_add(tran, &bdrv_attach_child_common_drv, s); - if (new_child_ctx != child_ctx) { - aio_context_release(new_child_ctx); - aio_context_acquire(child_ctx); - } - return new_child; } /* * Function doesn't update permissions, caller is responsible for this. * - * The caller must hold the AioContext lock for @child_bs. Both @parent_bs and - * @child_bs can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * Both @parent_bs and @child_bs can move to a different AioContext in this + * function. * * After calling this function, the transaction @tran may only be completed * while holding a writer lock for the graph. @@ -3202,9 +3200,6 @@ bdrv_attach_child_noperm(BlockDriverState *parent_bs, * * On failure NULL is returned, errp is set and the reference to * child_bs is also dropped. - * - * The caller must hold the AioContext lock @child_bs, but not that of @ctx - * (unless @child_bs is already in @ctx). */ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, const char *child_name, @@ -3244,9 +3239,6 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, * * On failure NULL is returned, errp is set and the reference to * child_bs is also dropped. - * - * If @parent_bs and @child_bs are in different AioContexts, the caller must - * hold the AioContext lock for @child_bs, but not for @parent_bs. */ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, @@ -3436,9 +3428,8 @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs) * * Function doesn't update permissions, caller is responsible for this. * - * The caller must hold the AioContext lock for @child_bs. Both @parent_bs and - * @child_bs can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * Both @parent_bs and @child_bs can move to a different AioContext in this + * function. * * After calling this function, the transaction @tran may only be completed * while holding a writer lock for the graph. @@ -3531,9 +3522,8 @@ bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs, } /* - * The caller must hold the AioContext lock for @backing_hd. Both @bs and - * @backing_hd can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * Both @bs and @backing_hd can move to a different AioContext in this + * function. * * If a backing child is already present (i.e. we're detaching a node), that * child node must be drained. @@ -3575,9 +3565,9 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, bdrv_ref(drain_bs); bdrv_drained_begin(drain_bs); - bdrv_graph_wrlock(backing_hd); + bdrv_graph_wrlock(); ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp); - bdrv_graph_wrunlock(backing_hd); + bdrv_graph_wrunlock(); bdrv_drained_end(drain_bs); bdrv_unref(drain_bs); @@ -3592,20 +3582,18 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, * itself, all options starting with "${bdref_key}." are considered part of the * BlockdevRef. * - * The caller must hold the main AioContext lock. - * * TODO Can this be unified with bdrv_open_image()? */ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, const char *bdref_key, Error **errp) { + ERRP_GUARD(); char *backing_filename = NULL; char *bdref_key_dot; const char *reference = NULL; int ret = 0; bool implicit_backing = false; BlockDriverState *backing_hd; - AioContext *backing_hd_ctx; QDict *options; QDict *tmp_parent_options = NULL; Error *local_err = NULL; @@ -3691,11 +3679,8 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, /* Hook up the backing file link; drop our reference, bs owns the * backing_hd reference now */ - backing_hd_ctx = bdrv_get_aio_context(backing_hd); - aio_context_acquire(backing_hd_ctx); ret = bdrv_set_backing_hd(bs, backing_hd, errp); bdrv_unref(backing_hd); - aio_context_release(backing_hd_ctx); if (ret < 0) { goto free_exit; @@ -3767,9 +3752,7 @@ bdrv_open_child_bs(const char *filename, QDict *options, const char *bdref_key, * * The BlockdevRef will be removed from the options QDict. * - * The caller must hold the lock of the main AioContext and no other AioContext. - * @parent can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * @parent can move to a different AioContext in this function. */ BdrvChild *bdrv_open_child(const char *filename, QDict *options, const char *bdref_key, @@ -3780,7 +3763,6 @@ BdrvChild *bdrv_open_child(const char *filename, { BlockDriverState *bs; BdrvChild *child; - AioContext *ctx; GLOBAL_STATE_CODE(); @@ -3790,13 +3772,10 @@ BdrvChild *bdrv_open_child(const char *filename, return NULL; } - bdrv_graph_wrlock(NULL); - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); + bdrv_graph_wrlock(); child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role, errp); - aio_context_release(ctx); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); return child; } @@ -3804,9 +3783,7 @@ BdrvChild *bdrv_open_child(const char *filename, /* * Wrapper on bdrv_open_child() for most popular case: open primary child of bs. * - * The caller must hold the lock of the main AioContext and no other AioContext. - * @parent can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * @parent can move to a different AioContext in this function. */ int bdrv_open_file_child(const char *filename, QDict *options, const char *bdref_key, @@ -3877,11 +3854,11 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, QDict *snapshot_options, Error **errp) { + ERRP_GUARD(); g_autofree char *tmp_filename = NULL; int64_t total_size; QemuOpts *opts = NULL; BlockDriverState *bs_snapshot = NULL; - AioContext *ctx = bdrv_get_aio_context(bs); int ret; GLOBAL_STATE_CODE(); @@ -3890,9 +3867,7 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, instead of opening 'filename' directly */ /* Get the required size from the image */ - aio_context_acquire(ctx); total_size = bdrv_getlength(bs); - aio_context_release(ctx); if (total_size < 0) { error_setg_errno(errp, -total_size, "Could not get image size"); @@ -3927,10 +3902,7 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, goto out; } - aio_context_acquire(ctx); ret = bdrv_append(bs_snapshot, bs, errp); - aio_context_release(ctx); - if (ret < 0) { bs_snapshot = NULL; goto out; @@ -3955,8 +3927,6 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, * The reference parameter may be used to specify an existing block device which * should be opened. If specified, neither options nor a filename may be given, * nor can an existing BDS be reused (that is, *pbs has to be NULL). - * - * The caller must always hold the main AioContext lock. */ static BlockDriverState * no_coroutine_fn bdrv_open_inherit(const char *filename, const char *reference, QDict *options, @@ -3974,7 +3944,6 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, Error *local_err = NULL; QDict *snapshot_options = NULL; int snapshot_flags = 0; - AioContext *ctx = qemu_get_aio_context(); assert(!child_class || !flags); assert(!child_class == !parent); @@ -4115,12 +4084,10 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, /* Not requesting BLK_PERM_CONSISTENT_READ because we're only * looking at the header to guess the image format. This works even * in cases where a guest would not see a consistent state. */ - ctx = bdrv_get_aio_context(file_bs); - aio_context_acquire(ctx); + AioContext *ctx = bdrv_get_aio_context(file_bs); file = blk_new(ctx, 0, BLK_PERM_ALL); blk_insert_bs(file, file_bs, &local_err); bdrv_unref(file_bs); - aio_context_release(ctx); if (local_err) { goto fail; @@ -4167,13 +4134,8 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, goto fail; } - /* The AioContext could have changed during bdrv_open_common() */ - ctx = bdrv_get_aio_context(bs); - if (file) { - aio_context_acquire(ctx); blk_unref(file); - aio_context_release(ctx); file = NULL; } @@ -4231,16 +4193,13 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, * (snapshot_bs); thus, we have to drop the strong reference to bs * (which we obtained by calling bdrv_new()). bs will not be deleted, * though, because the overlay still has a reference to it. */ - aio_context_acquire(ctx); bdrv_unref(bs); - aio_context_release(ctx); bs = snapshot_bs; } return bs; fail: - aio_context_acquire(ctx); blk_unref(file); qobject_unref(snapshot_options); qobject_unref(bs->explicit_options); @@ -4249,21 +4208,17 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, bs->options = NULL; bs->explicit_options = NULL; bdrv_unref(bs); - aio_context_release(ctx); error_propagate(errp, local_err); return NULL; close_and_fail: - aio_context_acquire(ctx); bdrv_unref(bs); - aio_context_release(ctx); qobject_unref(snapshot_options); qobject_unref(options); error_propagate(errp, local_err); return NULL; } -/* The caller must always hold the main AioContext lock. */ BlockDriverState *bdrv_open(const char *filename, const char *reference, QDict *options, int flags, Error **errp) { @@ -4540,12 +4495,7 @@ void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue) if (bs_queue) { BlockReopenQueueEntry *bs_entry, *next; QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { - AioContext *ctx = bdrv_get_aio_context(bs_entry->state.bs); - - aio_context_acquire(ctx); bdrv_drained_end(bs_entry->state.bs); - aio_context_release(ctx); - qobject_unref(bs_entry->state.explicit_options); qobject_unref(bs_entry->state.options); g_free(bs_entry); @@ -4577,7 +4527,6 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) { int ret = -1; BlockReopenQueueEntry *bs_entry, *next; - AioContext *ctx; Transaction *tran = tran_new(); g_autoptr(GSList) refresh_list = NULL; @@ -4586,10 +4535,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) GLOBAL_STATE_CODE(); QTAILQ_FOREACH(bs_entry, bs_queue, entry) { - ctx = bdrv_get_aio_context(bs_entry->state.bs); - aio_context_acquire(ctx); ret = bdrv_flush(bs_entry->state.bs); - aio_context_release(ctx); if (ret < 0) { error_setg_errno(errp, -ret, "Error flushing drive"); goto abort; @@ -4598,10 +4544,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) QTAILQ_FOREACH(bs_entry, bs_queue, entry) { assert(bs_entry->state.bs->quiesce_counter > 0); - ctx = bdrv_get_aio_context(bs_entry->state.bs); - aio_context_acquire(ctx); ret = bdrv_reopen_prepare(&bs_entry->state, bs_queue, tran, errp); - aio_context_release(ctx); if (ret < 0) { goto abort; } @@ -4644,24 +4587,18 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) * to first element. */ QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) { - ctx = bdrv_get_aio_context(bs_entry->state.bs); - aio_context_acquire(ctx); bdrv_reopen_commit(&bs_entry->state); - aio_context_release(ctx); } - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); tran_commit(tran); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) { BlockDriverState *bs = bs_entry->state.bs; if (bs->drv->bdrv_reopen_commit_post) { - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); bs->drv->bdrv_reopen_commit_post(&bs_entry->state); - aio_context_release(ctx); } } @@ -4669,16 +4606,13 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) goto cleanup; abort: - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); tran_abort(tran); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { if (bs_entry->prepared) { - ctx = bdrv_get_aio_context(bs_entry->state.bs); - aio_context_acquire(ctx); bdrv_reopen_abort(&bs_entry->state); - aio_context_release(ctx); } } @@ -4691,24 +4625,13 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts, Error **errp) { - AioContext *ctx = bdrv_get_aio_context(bs); BlockReopenQueue *queue; - int ret; GLOBAL_STATE_CODE(); queue = bdrv_reopen_queue(NULL, bs, opts, keep_old_opts); - if (ctx != qemu_get_aio_context()) { - aio_context_release(ctx); - } - ret = bdrv_reopen_multiple(queue, errp); - - if (ctx != qemu_get_aio_context()) { - aio_context_acquire(ctx); - } - - return ret; + return bdrv_reopen_multiple(queue, errp); } int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, @@ -4743,10 +4666,7 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, * * Return 0 on success, otherwise return < 0 and set @errp. * - * The caller must hold the AioContext lock of @reopen_state->bs. * @reopen_state->bs can move to a different AioContext in this function. - * Callers must make sure that their AioContext locking is still correct after - * this. */ static int GRAPH_UNLOCKED bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, @@ -4760,7 +4680,6 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, const char *child_name = is_backing ? "backing" : "file"; QObject *value; const char *str; - AioContext *ctx, *old_ctx; bool has_child; int ret; @@ -4844,25 +4763,13 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, bdrv_drained_begin(old_child_bs); } - old_ctx = bdrv_get_aio_context(bs); - ctx = bdrv_get_aio_context(new_child_bs); - if (old_ctx != ctx) { - aio_context_release(old_ctx); - aio_context_acquire(ctx); - } - bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(new_child_bs); + bdrv_graph_wrlock(); ret = bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing, tran, errp); - bdrv_graph_wrunlock_ctx(ctx); - - if (old_ctx != ctx) { - aio_context_release(ctx); - aio_context_acquire(old_ctx); - } + bdrv_graph_wrunlock(); if (old_child_bs) { bdrv_drained_end(old_child_bs); @@ -4892,8 +4799,6 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, * It is the responsibility of the caller to then call the abort() or * commit() for any other BDS that have been left in a prepare() state * - * The caller must hold the AioContext lock of @reopen_state->bs. - * * After calling this function, the transaction @change_child_tran may only be * completed while holding a writer lock for the graph. */ @@ -5209,14 +5114,14 @@ static void bdrv_close(BlockDriverState *bs) bs->drv = NULL; } - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); QLIST_FOREACH_SAFE(child, &bs->children, next, next) { bdrv_unref_child(bs, child); } assert(!bs->backing); assert(!bs->file); - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); g_free(bs->opaque); bs->opaque = NULL; @@ -5509,9 +5414,9 @@ int bdrv_drop_filter(BlockDriverState *bs, Error **errp) bdrv_graph_rdunlock_main_loop(); bdrv_drained_begin(child_bs); - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); ret = bdrv_replace_node_common(bs, child_bs, true, true, errp); - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); bdrv_drained_end(child_bs); return ret; @@ -5528,8 +5433,6 @@ int bdrv_drop_filter(BlockDriverState *bs, Error **errp) * child. * * This function does not create any image files. - * - * The caller must hold the AioContext lock for @bs_top. */ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, Error **errp) @@ -5537,7 +5440,6 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, int ret; BdrvChild *child; Transaction *tran = tran_new(); - AioContext *old_context, *new_context = NULL; GLOBAL_STATE_CODE(); @@ -5545,23 +5447,10 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, assert(!bs_new->backing); bdrv_graph_rdunlock_main_loop(); - old_context = bdrv_get_aio_context(bs_top); bdrv_drained_begin(bs_top); - - /* - * bdrv_drained_begin() requires that only the AioContext of the drained - * node is locked, and at this point it can still differ from the AioContext - * of bs_top. - */ - new_context = bdrv_get_aio_context(bs_new); - aio_context_release(old_context); - aio_context_acquire(new_context); bdrv_drained_begin(bs_new); - aio_context_release(new_context); - aio_context_acquire(old_context); - new_context = NULL; - bdrv_graph_wrlock(bs_top); + bdrv_graph_wrlock(); child = bdrv_attach_child_noperm(bs_new, bs_top, "backing", &child_of_bds, bdrv_backing_role(bs_new), @@ -5571,18 +5460,6 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, goto out; } - /* - * bdrv_attach_child_noperm could change the AioContext of bs_top and - * bs_new, but at least they are in the same AioContext now. This is the - * AioContext that we need to lock for the rest of the function. - */ - new_context = bdrv_get_aio_context(bs_top); - - if (old_context != new_context) { - aio_context_release(old_context); - aio_context_acquire(new_context); - } - ret = bdrv_replace_node_noperm(bs_top, bs_new, true, tran, errp); if (ret < 0) { goto out; @@ -5593,16 +5470,11 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, tran_finalize(tran, ret); bdrv_refresh_limits(bs_top, NULL, NULL); - bdrv_graph_wrunlock(bs_top); + bdrv_graph_wrunlock(); bdrv_drained_end(bs_top); bdrv_drained_end(bs_new); - if (new_context && old_context != new_context) { - aio_context_release(new_context); - aio_context_acquire(old_context); - } - return ret; } @@ -5620,7 +5492,7 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, bdrv_ref(old_bs); bdrv_drained_begin(old_bs); bdrv_drained_begin(new_bs); - bdrv_graph_wrlock(new_bs); + bdrv_graph_wrlock(); bdrv_replace_child_tran(child, new_bs, tran); @@ -5631,7 +5503,7 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, tran_finalize(tran, ret); - bdrv_graph_wrunlock(new_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(old_bs); bdrv_drained_end(new_bs); bdrv_unref(old_bs); @@ -5667,9 +5539,8 @@ static void bdrv_delete(BlockDriverState *bs) * after the call (even on failure), so if the caller intends to reuse the * dictionary, it needs to use qobject_ref() before calling bdrv_open. * - * The caller holds the AioContext lock for @bs. It must make sure that @bs - * stays in the same AioContext, i.e. @options must not refer to nodes in a - * different AioContext. + * The caller must make sure that @bs stays in the same AioContext, i.e. + * @options must not refer to nodes in a different AioContext. */ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, int flags, Error **errp) @@ -5697,12 +5568,8 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, GLOBAL_STATE_CODE(); - aio_context_release(ctx); - aio_context_acquire(qemu_get_aio_context()); new_node_bs = bdrv_new_open_driver_opts(drv, node_name, options, flags, errp); - aio_context_release(qemu_get_aio_context()); - aio_context_acquire(ctx); assert(bdrv_get_aio_context(bs) == ctx); options = NULL; /* bdrv_new_open_driver() eats options */ @@ -5718,9 +5585,9 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, bdrv_ref(bs); bdrv_drained_begin(bs); bdrv_drained_begin(new_node_bs); - bdrv_graph_wrlock(new_node_bs); + bdrv_graph_wrlock(); ret = bdrv_replace_node(bs, new_node_bs, errp); - bdrv_graph_wrunlock(new_node_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(new_node_bs); bdrv_drained_end(bs); bdrv_unref(bs); @@ -5961,7 +5828,8 @@ void bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base) * */ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, - const char *backing_file_str) + const char *backing_file_str, + bool backing_mask_protocol) { BlockDriverState *explicit_top = top; bool update_inherits_from; @@ -5975,7 +5843,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, bdrv_ref(top); bdrv_drained_begin(base); - bdrv_graph_wrlock(base); + bdrv_graph_wrlock(); if (!top->drv || !base->drv) { goto exit_wrlock; @@ -6015,7 +5883,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, * That's a FIXME. */ bdrv_replace_node_common(top, base, false, false, &local_err); - bdrv_graph_wrunlock(base); + bdrv_graph_wrunlock(); if (local_err) { error_report_err(local_err); @@ -6027,6 +5895,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, if (c->klass->update_filename) { ret = c->klass->update_filename(c, base, backing_file_str, + backing_mask_protocol, &local_err); if (ret < 0) { /* @@ -6052,7 +5921,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, goto exit; exit_wrlock: - bdrv_graph_wrunlock(base); + bdrv_graph_wrunlock(); exit: bdrv_drained_end(base); bdrv_unref(top); @@ -7037,12 +6906,9 @@ void bdrv_activate_all(Error **errp) GRAPH_RDLOCK_GUARD_MAINLOOP(); for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { - AioContext *aio_context = bdrv_get_aio_context(bs); int ret; - aio_context_acquire(aio_context); ret = bdrv_activate(bs, errp); - aio_context_release(aio_context); if (ret < 0) { bdrv_next_cleanup(&it); return; @@ -7137,20 +7003,10 @@ int bdrv_inactivate_all(void) BlockDriverState *bs = NULL; BdrvNextIterator it; int ret = 0; - GSList *aio_ctxs = NULL, *ctx; GLOBAL_STATE_CODE(); GRAPH_RDLOCK_GUARD_MAINLOOP(); - for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { - AioContext *aio_context = bdrv_get_aio_context(bs); - - if (!g_slist_find(aio_ctxs, aio_context)) { - aio_ctxs = g_slist_prepend(aio_ctxs, aio_context); - aio_context_acquire(aio_context); - } - } - for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { /* Nodes with BDS parents are covered by recursion from the last * parent that gets inactivated. Don't inactivate them a second @@ -7161,17 +7017,10 @@ int bdrv_inactivate_all(void) ret = bdrv_inactivate_recurse(bs); if (ret < 0) { bdrv_next_cleanup(&it); - goto out; + break; } } -out: - for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) { - AioContext *aio_context = ctx->data; - aio_context_release(aio_context); - } - g_slist_free(aio_ctxs); - return ret; } @@ -7257,11 +7106,8 @@ void bdrv_unref(BlockDriverState *bs) static void bdrv_schedule_unref_bh(void *opaque) { BlockDriverState *bs = opaque; - AioContext *ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); bdrv_unref(bs); - aio_context_release(ctx); } /* @@ -7398,8 +7244,6 @@ void bdrv_img_create(const char *filename, const char *fmt, return; } - aio_context_acquire(qemu_get_aio_context()); - /* Create parameter list */ create_opts = qemu_opts_append(create_opts, drv->create_opts); create_opts = qemu_opts_append(create_opts, proto_drv->create_opts); @@ -7517,7 +7361,10 @@ void bdrv_img_create(const char *filename, const char *fmt, goto out; } - if (size == -1) { + /* Parameter 'size' is not needed for detached LUKS header */ + if (size == -1 && + !(!strcmp(fmt, "luks") && + qemu_opt_get_bool(opts, "detached-header", false))) { error_setg(errp, "Image creation needs a size parameter"); goto out; } @@ -7549,7 +7396,6 @@ void bdrv_img_create(const char *filename, const char *fmt, qemu_opts_del(opts); qemu_opts_free(create_opts); error_propagate(errp, local_err); - aio_context_release(qemu_get_aio_context()); } AioContext *bdrv_get_aio_context(BlockDriverState *bs) @@ -7583,33 +7429,6 @@ void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx) bdrv_dec_in_flight(bs); } -void coroutine_fn bdrv_co_lock(BlockDriverState *bs) -{ - AioContext *ctx = bdrv_get_aio_context(bs); - - /* In the main thread, bs->aio_context won't change concurrently */ - assert(qemu_get_current_aio_context() == qemu_get_aio_context()); - - /* - * We're in coroutine context, so we already hold the lock of the main - * loop AioContext. Don't lock it twice to avoid deadlocks. - */ - assert(qemu_in_coroutine()); - if (ctx != qemu_get_aio_context()) { - aio_context_acquire(ctx); - } -} - -void coroutine_fn bdrv_co_unlock(BlockDriverState *bs) -{ - AioContext *ctx = bdrv_get_aio_context(bs); - - assert(qemu_in_coroutine()); - if (ctx != qemu_get_aio_context()) { - aio_context_release(ctx); - } -} - static void bdrv_do_remove_aio_context_notifier(BdrvAioNotifier *ban) { GLOBAL_STATE_CODE(); @@ -7728,21 +7547,8 @@ static void bdrv_set_aio_context_commit(void *opaque) BdrvStateSetAioContext *state = (BdrvStateSetAioContext *) opaque; BlockDriverState *bs = (BlockDriverState *) state->bs; AioContext *new_context = state->new_ctx; - AioContext *old_context = bdrv_get_aio_context(bs); - /* - * Take the old AioContex when detaching it from bs. - * At this point, new_context lock is already acquired, and we are now - * also taking old_context. This is safe as long as bdrv_detach_aio_context - * does not call AIO_POLL_WHILE(). - */ - if (old_context != qemu_get_aio_context()) { - aio_context_acquire(old_context); - } bdrv_detach_aio_context(bs); - if (old_context != qemu_get_aio_context()) { - aio_context_release(old_context); - } bdrv_attach_aio_context(bs, new_context); } @@ -7757,10 +7563,6 @@ static TransactionActionDrv set_aio_context = { * * Must be called from the main AioContext. * - * The caller must own the AioContext lock for the old AioContext of bs, but it - * must not own the AioContext lock for new_context (unless new_context is the - * same as the current context of bs). - * * @visited will accumulate all visited BdrvChild objects. The caller is * responsible for freeing the list afterwards. */ @@ -7813,13 +7615,6 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, * * If ignore_child is not NULL, that child (and its subgraph) will not * be touched. - * - * This function still requires the caller to take the bs current - * AioContext lock, otherwise draining will fail since AIO_WAIT_WHILE - * assumes the lock is always held if bs is in another AioContext. - * For the same reason, it temporarily also holds the new AioContext, since - * bdrv_drained_end calls BDRV_POLL_WHILE that assumes the lock is taken too. - * Therefore the new AioContext lock must not be taken by the caller. */ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, BdrvChild *ignore_child, Error **errp) @@ -7827,7 +7622,6 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, Transaction *tran; GHashTable *visited; int ret; - AioContext *old_context = bdrv_get_aio_context(bs); GLOBAL_STATE_CODE(); /* @@ -7846,8 +7640,8 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, /* * Linear phase: go through all callbacks collected in the transaction. - * Run all callbacks collected in the recursion to switch all nodes - * AioContext lock (transaction commit), or undo all changes done in the + * Run all callbacks collected in the recursion to switch every node's + * AioContext (transaction commit), or undo all changes done in the * recursion (transaction abort). */ @@ -7857,34 +7651,7 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, return -EPERM; } - /* - * Release old AioContext, it won't be needed anymore, as all - * bdrv_drained_begin() have been called already. - */ - if (qemu_get_aio_context() != old_context) { - aio_context_release(old_context); - } - - /* - * Acquire new AioContext since bdrv_drained_end() is going to be called - * after we switched all nodes in the new AioContext, and the function - * assumes that the lock of the bs is always taken. - */ - if (qemu_get_aio_context() != ctx) { - aio_context_acquire(ctx); - } - tran_commit(tran); - - if (qemu_get_aio_context() != ctx) { - aio_context_release(ctx); - } - - /* Re-acquire the old AioContext, since the caller takes and releases it. */ - if (qemu_get_aio_context() != old_context) { - aio_context_acquire(old_context); - } - return 0; } @@ -8006,7 +7773,6 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, const char *node_name, Error **errp) { BlockDriverState *to_replace_bs = bdrv_find_node(node_name); - AioContext *aio_context; GLOBAL_STATE_CODE(); @@ -8015,12 +7781,8 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, return NULL; } - aio_context = bdrv_get_aio_context(to_replace_bs); - aio_context_acquire(aio_context); - if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) { - to_replace_bs = NULL; - goto out; + return NULL; } /* We don't want arbitrary node of the BDS chain to be replaced only the top @@ -8033,12 +7795,9 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, "because it cannot be guaranteed that doing so would not " "lead to an abrupt change of visible data", node_name, parent_bs->node_name); - to_replace_bs = NULL; - goto out; + return NULL; } -out: - aio_context_release(aio_context); return to_replace_bs; } diff --git a/block/backup.c b/block/backup.c index 8aae5836d76..ec29d6b8108 100644 --- a/block/backup.c +++ b/block/backup.c @@ -496,10 +496,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, block_copy_set_speed(bcs, speed); /* Required permissions are taken by copy-before-write filter target */ - bdrv_graph_wrlock(target); + bdrv_graph_wrlock(); block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, &error_abort); - bdrv_graph_wrunlock(target); + bdrv_graph_wrunlock(); return &job->common; diff --git a/block/blkio.c b/block/blkio.c index bc2f21784c7..882e1c297b4 100644 --- a/block/blkio.c +++ b/block/blkio.c @@ -89,6 +89,9 @@ static int blkio_resize_bounce_pool(BDRVBlkioState *s, int64_t bytes) /* Pad size to reduce frequency of resize calls */ bytes += 128 * 1024; + /* Align the pool size to avoid blkio_alloc_mem_region() failure */ + bytes = QEMU_ALIGN_UP(bytes, s->mem_region_alignment); + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { int ret; diff --git a/block/blklogwrites.c b/block/blklogwrites.c index 84e03f309f3..ed38a93f21b 100644 --- a/block/blklogwrites.c +++ b/block/blklogwrites.c @@ -3,7 +3,7 @@ * * Copyright (c) 2017 Tuomas Tynkkynen * Copyright (c) 2018 Aapo Vienamo - * Copyright (c) 2018 Ari Sundholm + * Copyright (c) 2018-2024 Ari Sundholm * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -55,9 +55,34 @@ typedef struct { BdrvChild *log_file; uint32_t sectorsize; uint32_t sectorbits; + uint64_t update_interval; + + /* + * The mutable state of the driver, consisting of the current log sector + * and the number of log entries. + * + * May be read and/or written from multiple threads, and the mutex must be + * held when accessing these fields. + */ uint64_t cur_log_sector; uint64_t nr_entries; - uint64_t update_interval; + QemuMutex mutex; + + /* + * The super block sequence number. Non-zero if a super block update is in + * progress. + * + * The mutex must be held when accessing this field. + */ + uint64_t super_update_seq; + + /* + * A coroutine-aware queue to serialize super block updates. + * + * Used with the mutex to ensure that only one thread be updating the super + * block at a time. + */ + CoQueue super_update_queue; } BDRVBlkLogWritesState; static QemuOptsList runtime_opts = { @@ -169,6 +194,9 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags, goto fail; } + qemu_mutex_init(&s->mutex); + qemu_co_queue_init(&s->super_update_queue); + log_append = qemu_opt_get_bool(opts, "log-append", false); if (log_append) { @@ -231,6 +259,8 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags, s->nr_entries = 0; } + s->super_update_seq = 0; + if (!blk_log_writes_sector_size_valid(log_sector_size)) { ret = -EINVAL; error_setg(errp, "Invalid log sector size %"PRIu64, log_sector_size); @@ -251,10 +281,11 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags, ret = 0; fail_log: if (ret < 0) { - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, s->log_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); s->log_file = NULL; + qemu_mutex_destroy(&s->mutex); } fail: qemu_opts_del(opts); @@ -265,10 +296,11 @@ static void blk_log_writes_close(BlockDriverState *bs) { BDRVBlkLogWritesState *s = bs->opaque; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, s->log_file); s->log_file = NULL; - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); + qemu_mutex_destroy(&s->mutex); } static int64_t coroutine_fn GRAPH_RDLOCK @@ -295,7 +327,7 @@ static void blk_log_writes_child_perm(BlockDriverState *bs, BdrvChild *c, static void blk_log_writes_refresh_limits(BlockDriverState *bs, Error **errp) { - BDRVBlkLogWritesState *s = bs->opaque; + const BDRVBlkLogWritesState *s = bs->opaque; bs->bl.request_alignment = s->sectorsize; } @@ -338,15 +370,18 @@ blk_log_writes_co_do_log(BlkLogWritesLogReq *lr) * driver may be modified by other driver operations while waiting for the * I/O to complete. */ + qemu_mutex_lock(&s->mutex); const uint64_t entry_start_sector = s->cur_log_sector; const uint64_t entry_offset = entry_start_sector << s->sectorbits; const uint64_t qiov_aligned_size = ROUND_UP(lr->qiov->size, s->sectorsize); const uint64_t entry_aligned_size = qiov_aligned_size + ROUND_UP(lr->zero_size, s->sectorsize); const uint64_t entry_nr_sectors = entry_aligned_size >> s->sectorbits; + const uint64_t entry_seq = s->nr_entries + 1; - s->nr_entries++; + s->nr_entries = entry_seq; s->cur_log_sector += entry_nr_sectors; + qemu_mutex_unlock(&s->mutex); /* * Write the log entry. Note that if this is a "write zeroes" operation, @@ -366,17 +401,44 @@ blk_log_writes_co_do_log(BlkLogWritesLogReq *lr) /* Update super block on flush or every update interval */ if (lr->log_ret == 0 && ((lr->entry.flags & LOG_FLUSH_FLAG) - || (s->nr_entries % s->update_interval == 0))) + || (entry_seq % s->update_interval == 0))) { struct log_write_super super = { .magic = cpu_to_le64(WRITE_LOG_MAGIC), .version = cpu_to_le64(WRITE_LOG_VERSION), - .nr_entries = cpu_to_le64(s->nr_entries), + .nr_entries = 0, /* updated below */ .sectorsize = cpu_to_le32(s->sectorsize), }; - void *zeroes = g_malloc0(s->sectorsize - sizeof(super)); + void *zeroes; QEMUIOVector qiov; + /* + * Wait if a super block update is already in progress. + * Bail out if a newer update got its turn before us. + */ + WITH_QEMU_LOCK_GUARD(&s->mutex) { + CoQueueWaitFlags wait_flags = 0; + while (s->super_update_seq) { + if (entry_seq < s->super_update_seq) { + return; + } + qemu_co_queue_wait_flags(&s->super_update_queue, + &s->mutex, wait_flags); + + /* + * In case the wait condition remains true after wakeup, + * to avoid starvation, make sure that this request is + * scheduled to rerun next by pushing it to the front of the + * queue. + */ + wait_flags = CO_QUEUE_WAIT_FRONT; + } + s->super_update_seq = entry_seq; + super.nr_entries = cpu_to_le64(s->nr_entries); + } + + zeroes = g_malloc0(s->sectorsize - sizeof(super)); + qemu_iovec_init(&qiov, 2); qemu_iovec_add(&qiov, &super, sizeof(super)); qemu_iovec_add(&qiov, zeroes, s->sectorsize - sizeof(super)); @@ -386,6 +448,13 @@ blk_log_writes_co_do_log(BlkLogWritesLogReq *lr) if (lr->log_ret == 0) { lr->log_ret = bdrv_co_flush(s->log_file->bs); } + + /* The super block has been updated. Let another request have a go. */ + qemu_mutex_lock(&s->mutex); + s->super_update_seq = 0; + (void) qemu_co_queue_next(&s->super_update_queue); + qemu_mutex_unlock(&s->mutex); + qemu_iovec_destroy(&qiov); g_free(zeroes); } @@ -405,7 +474,7 @@ blk_log_writes_co_log(BlockDriverState *bs, uint64_t offset, uint64_t bytes, { QEMUIOVector log_qiov; size_t niov = qiov ? qiov->niov : 0; - BDRVBlkLogWritesState *s = bs->opaque; + const BDRVBlkLogWritesState *s = bs->opaque; BlkLogWritesFileReq fr = { .bs = bs, .offset = offset, diff --git a/block/blkverify.c b/block/blkverify.c index 9b17c466443..ec45d8335ed 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -151,10 +151,10 @@ static void blkverify_close(BlockDriverState *bs) { BDRVBlkverifyState *s = bs->opaque; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, s->test_file); s->test_file = NULL; - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } static int64_t coroutine_fn GRAPH_RDLOCK diff --git a/block/block-backend.c b/block/block-backend.c index ec211488069..db6f9b92a39 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -44,7 +44,7 @@ struct BlockBackend { char *name; int refcnt; BdrvChild *root; - AioContext *ctx; + AioContext *ctx; /* access with atomic operations only */ DriveInfo *legacy_dinfo; /* null unless created by drive_new() */ QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */ QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */ @@ -390,8 +390,6 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm) * Both sets of permissions can be changed later using blk_set_perm(). * * Return the new BlockBackend on success, null on failure. - * - * Callers must hold the AioContext lock of @bs. */ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, Error **errp) @@ -416,8 +414,6 @@ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm, * Just as with bdrv_open(), after having called this function the reference to * @options belongs to the block layer (even on failure). * - * Called without holding an AioContext lock. - * * TODO: Remove @filename and @flags; it should be possible to specify a whole * BDS tree just by specifying the @options QDict (or @reference, * alternatively). At the time of adding this function, this is not possible, @@ -429,7 +425,6 @@ BlockBackend *blk_new_open(const char *filename, const char *reference, { BlockBackend *blk; BlockDriverState *bs; - AioContext *ctx; uint64_t perm = 0; uint64_t shared = BLK_PERM_ALL; @@ -459,23 +454,18 @@ BlockBackend *blk_new_open(const char *filename, const char *reference, shared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED; } - aio_context_acquire(qemu_get_aio_context()); bs = bdrv_open(filename, reference, options, flags, errp); - aio_context_release(qemu_get_aio_context()); if (!bs) { return NULL; } /* bdrv_open() could have moved bs to a different AioContext */ - ctx = bdrv_get_aio_context(bs); blk = blk_new(bdrv_get_aio_context(bs), perm, shared); blk->perm = perm; blk->shared_perm = shared; - aio_context_acquire(ctx); blk_insert_bs(blk, bs, errp); bdrv_unref(bs); - aio_context_release(ctx); if (!blk->root) { blk_unref(blk); @@ -577,13 +567,9 @@ void blk_remove_all_bs(void) GLOBAL_STATE_CODE(); while ((blk = blk_all_next(blk)) != NULL) { - AioContext *ctx = blk_get_aio_context(blk); - - aio_context_acquire(ctx); if (blk->root) { blk_remove_bs(blk); } - aio_context_release(ctx); } } @@ -613,14 +599,14 @@ BlockDriverState *bdrv_next(BdrvNextIterator *it) /* Must be called from the main loop */ assert(qemu_get_current_aio_context() == qemu_get_aio_context()); + old_bs = it->bs; + /* First, return all root nodes of BlockBackends. In order to avoid * returning a BDS twice when multiple BBs refer to it, we only return it * if the BB is the first one in the parent list of the BDS. */ if (it->phase == BDRV_NEXT_BACKEND_ROOTS) { BlockBackend *old_blk = it->blk; - old_bs = old_blk ? blk_bs(old_blk) : NULL; - do { it->blk = blk_all_next(it->blk); bs = it->blk ? blk_bs(it->blk) : NULL; @@ -634,11 +620,10 @@ BlockDriverState *bdrv_next(BdrvNextIterator *it) if (bs) { bdrv_ref(bs); bdrv_unref(old_bs); + it->bs = bs; return bs; } it->phase = BDRV_NEXT_MONITOR_OWNED; - } else { - old_bs = it->bs; } /* Then return the monitor-owned BDSes without a BB attached. Ignore all @@ -678,13 +663,10 @@ void bdrv_next_cleanup(BdrvNextIterator *it) /* Must be called from the main loop */ assert(qemu_get_current_aio_context() == qemu_get_aio_context()); - if (it->phase == BDRV_NEXT_BACKEND_ROOTS) { - if (it->blk) { - bdrv_unref(blk_bs(it->blk)); - blk_unref(it->blk); - } - } else { - bdrv_unref(it->bs); + bdrv_unref(it->bs); + + if (it->phase == BDRV_NEXT_BACKEND_ROOTS && it->blk) { + blk_unref(it->blk); } bdrv_next_reset(it); @@ -882,14 +864,11 @@ BlockBackend *blk_by_public(BlockBackendPublic *public) /* * Disassociates the currently associated BlockDriverState from @blk. - * - * The caller must hold the AioContext lock for the BlockBackend. */ void blk_remove_bs(BlockBackend *blk) { ThrottleGroupMember *tgm = &blk->public.throttle_group_member; BdrvChild *root; - AioContext *ctx; GLOBAL_STATE_CODE(); @@ -919,30 +898,26 @@ void blk_remove_bs(BlockBackend *blk) root = blk->root; blk->root = NULL; - ctx = bdrv_get_aio_context(root->bs); - bdrv_graph_wrlock(root->bs); + bdrv_graph_wrlock(); bdrv_root_unref_child(root); - bdrv_graph_wrunlock_ctx(ctx); + bdrv_graph_wrunlock(); } /* * Associates a new BlockDriverState with @blk. - * - * Callers must hold the AioContext lock of @bs. */ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) { ThrottleGroupMember *tgm = &blk->public.throttle_group_member; - AioContext *ctx = bdrv_get_aio_context(bs); GLOBAL_STATE_CODE(); bdrv_ref(bs); - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); blk->root = bdrv_root_attach_child(bs, "root", &child_root, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, blk->perm, blk->shared_perm, blk, errp); - bdrv_graph_wrunlock_ctx(ctx); + bdrv_graph_wrunlock(); if (blk->root == NULL) { return -EPERM; } @@ -2435,22 +2410,22 @@ void blk_op_unblock_all(BlockBackend *blk, Error *reason) } } +/** + * Return BB's current AioContext. Note that this context may change + * concurrently at any time, with one exception: If the BB has a root node + * attached, its context will only change through bdrv_try_change_aio_context(), + * which creates a drained section. Therefore, incrementing such a BB's + * in-flight counter will prevent its context from changing. + */ AioContext *blk_get_aio_context(BlockBackend *blk) { - BlockDriverState *bs; IO_CODE(); if (!blk) { return qemu_get_aio_context(); } - bs = blk_bs(blk); - if (bs) { - AioContext *ctx = bdrv_get_aio_context(blk_bs(blk)); - assert(ctx == blk->ctx); - } - - return blk->ctx; + return qatomic_read(&blk->ctx); } int blk_set_aio_context(BlockBackend *blk, AioContext *new_context, @@ -2463,7 +2438,7 @@ int blk_set_aio_context(BlockBackend *blk, AioContext *new_context, GLOBAL_STATE_CODE(); if (!bs) { - blk->ctx = new_context; + qatomic_set(&blk->ctx, new_context); return 0; } @@ -2492,7 +2467,7 @@ static void blk_root_set_aio_ctx_commit(void *opaque) AioContext *new_context = s->new_ctx; ThrottleGroupMember *tgm = &blk->public.throttle_group_member; - blk->ctx = new_context; + qatomic_set(&blk->ctx, new_context); if (tgm->throttle_state) { throttle_group_detach_aio_context(tgm); throttle_group_attach_aio_context(tgm, new_context); @@ -2739,20 +2714,16 @@ int blk_commit_all(void) GRAPH_RDLOCK_GUARD_MAINLOOP(); while ((blk = blk_all_next(blk)) != NULL) { - AioContext *aio_context = blk_get_aio_context(blk); BlockDriverState *unfiltered_bs = bdrv_skip_filters(blk_bs(blk)); - aio_context_acquire(aio_context); if (blk_is_inserted(blk) && bdrv_cow_child(unfiltered_bs)) { int ret; ret = bdrv_commit(unfiltered_bs); if (ret < 0) { - aio_context_release(aio_context); return ret; } } - aio_context_release(aio_context); } return 0; } diff --git a/block/commit.c b/block/commit.c index 69cc75be0c3..7c3fdcb0cae 100644 --- a/block/commit.c +++ b/block/commit.c @@ -42,6 +42,7 @@ typedef struct CommitBlockJob { bool base_read_only; bool chain_frozen; char *backing_file_str; + bool backing_mask_protocol; } CommitBlockJob; static int commit_prepare(Job *job) @@ -61,7 +62,8 @@ static int commit_prepare(Job *job) /* FIXME: bdrv_drop_intermediate treats total failures and partial failures * identically. Further work is needed to disambiguate these cases. */ return bdrv_drop_intermediate(s->commit_top_bs, s->base_bs, - s->backing_file_str); + s->backing_file_str, + s->backing_mask_protocol); } static void commit_abort(Job *job) @@ -100,9 +102,9 @@ static void commit_abort(Job *job) bdrv_graph_rdunlock_main_loop(); bdrv_drained_begin(commit_top_backing_bs); - bdrv_graph_wrlock(commit_top_backing_bs); + bdrv_graph_wrlock(); bdrv_replace_node(s->commit_top_bs, commit_top_backing_bs, &error_abort); - bdrv_graph_wrunlock(commit_top_backing_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(commit_top_backing_bs); bdrv_unref(s->commit_top_bs); @@ -254,6 +256,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, BlockDriverState *top, int creation_flags, int64_t speed, BlockdevOnError on_error, const char *backing_file_str, + bool backing_mask_protocol, const char *filter_node_name, Error **errp) { CommitBlockJob *s; @@ -339,7 +342,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, * this is the responsibility of the interface (i.e. whoever calls * commit_start()). */ - bdrv_graph_wrlock(top); + bdrv_graph_wrlock(); s->base_overlay = bdrv_find_overlay(top, base); assert(s->base_overlay); @@ -370,19 +373,19 @@ void commit_start(const char *job_id, BlockDriverState *bs, ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, iter_shared_perms, errp); if (ret < 0) { - bdrv_graph_wrunlock(top); + bdrv_graph_wrunlock(); goto fail; } } if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) { - bdrv_graph_wrunlock(top); + bdrv_graph_wrunlock(); goto fail; } s->chain_frozen = true; ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp); - bdrv_graph_wrunlock(top); + bdrv_graph_wrunlock(); if (ret < 0) { goto fail; @@ -408,6 +411,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, blk_set_disable_request_queuing(s->top, true); s->backing_file_str = g_strdup(backing_file_str); + s->backing_mask_protocol = backing_mask_protocol; s->on_error = on_error; trace_commit_start(bs, base, top, s); @@ -434,9 +438,9 @@ void commit_start(const char *job_id, BlockDriverState *bs, * otherwise this would fail because of lack of permissions. */ if (commit_top_bs) { bdrv_drained_begin(top); - bdrv_graph_wrlock(top); + bdrv_graph_wrlock(); bdrv_replace_node(commit_top_bs, top, &error_abort); - bdrv_graph_wrunlock(top); + bdrv_graph_wrunlock(); bdrv_drained_end(top); } } diff --git a/block/copy-before-write.c b/block/copy-before-write.c index 13972879b1b..8aba27a71d6 100644 --- a/block/copy-before-write.c +++ b/block/copy-before-write.c @@ -407,12 +407,12 @@ static BlockdevOptions *cbw_parse_options(QDict *options, Error **errp) static int cbw_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { + ERRP_GUARD(); BDRVCopyBeforeWriteState *s = bs->opaque; BdrvDirtyBitmap *bitmap = NULL; int64_t cluster_size; g_autoptr(BlockdevOptions) full_opts = NULL; BlockdevOptionsCbw *opts; - AioContext *ctx; int ret; full_opts = cbw_parse_options(options, errp); @@ -435,15 +435,11 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, GRAPH_RDLOCK_GUARD_MAINLOOP(); - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); - if (opts->bitmap) { bitmap = block_dirty_bitmap_lookup(opts->bitmap->node, opts->bitmap->name, NULL, errp); if (!bitmap) { - ret = -EINVAL; - goto out; + return -EINVAL; } } s->on_cbw_error = opts->has_on_cbw_error ? opts->on_cbw_error : @@ -461,24 +457,21 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, s->bcs = block_copy_state_new(bs->file, s->target, bitmap, errp); if (!s->bcs) { error_prepend(errp, "Cannot create block-copy-state: "); - ret = -EINVAL; - goto out; + return -EINVAL; } cluster_size = block_copy_cluster_size(s->bcs); s->done_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp); if (!s->done_bitmap) { - ret = -EINVAL; - goto out; + return -EINVAL; } bdrv_disable_dirty_bitmap(s->done_bitmap); /* s->access_bitmap starts equal to bcs bitmap */ s->access_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp); if (!s->access_bitmap) { - ret = -EINVAL; - goto out; + return -EINVAL; } bdrv_disable_dirty_bitmap(s->access_bitmap); bdrv_dirty_bitmap_merge_internal(s->access_bitmap, @@ -487,11 +480,7 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, qemu_co_mutex_init(&s->lock); QLIST_INIT(&s->frozen_read_reqs); - - ret = 0; -out: - aio_context_release(ctx); - return ret; + return 0; } static void cbw_close(BlockDriverState *bs) diff --git a/block/crypto.c b/block/crypto.c index 921933a5e5f..21eed909c1f 100644 --- a/block/crypto.c +++ b/block/crypto.c @@ -39,6 +39,7 @@ typedef struct BlockCrypto BlockCrypto; struct BlockCrypto { QCryptoBlock *block; bool updating_keys; + BdrvChild *header; /* Reference to the detached LUKS header */ }; @@ -63,12 +64,14 @@ static int block_crypto_read_func(QCryptoBlock *block, Error **errp) { BlockDriverState *bs = opaque; + BlockCrypto *crypto = bs->opaque; ssize_t ret; GLOBAL_STATE_CODE(); GRAPH_RDLOCK_GUARD_MAINLOOP(); - ret = bdrv_pread(bs->file, offset, buflen, buf, 0); + ret = bdrv_pread(crypto->header ? crypto->header : bs->file, + offset, buflen, buf, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read encryption header"); return ret; @@ -84,12 +87,14 @@ static int block_crypto_write_func(QCryptoBlock *block, Error **errp) { BlockDriverState *bs = opaque; + BlockCrypto *crypto = bs->opaque; ssize_t ret; GLOBAL_STATE_CODE(); GRAPH_RDLOCK_GUARD_MAINLOOP(); - ret = bdrv_pwrite(bs->file, offset, buflen, buf, 0); + ret = bdrv_pwrite(crypto->header ? crypto->header : bs->file, + offset, buflen, buf, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write encryption header"); return ret; @@ -157,6 +162,48 @@ block_crypto_create_init_func(QCryptoBlock *block, size_t headerlen, return ret; } +static int coroutine_fn GRAPH_UNLOCKED +block_crypto_co_format_luks_payload(BlockdevCreateOptionsLUKS *luks_opts, + Error **errp) +{ + BlockDriverState *bs = NULL; + BlockBackend *blk = NULL; + Error *local_error = NULL; + int ret; + + if (luks_opts->size > INT64_MAX) { + return -EFBIG; + } + + bs = bdrv_co_open_blockdev_ref(luks_opts->file, errp); + if (bs == NULL) { + return -EIO; + } + + blk = blk_co_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, + BLK_PERM_ALL, errp); + if (!blk) { + ret = -EPERM; + goto fail; + } + + ret = blk_truncate(blk, luks_opts->size, true, + luks_opts->preallocation, 0, &local_error); + if (ret < 0) { + if (ret == -EFBIG) { + /* Replace the error message with a better one */ + error_free(local_error); + error_setg(errp, "The requested file size is too large"); + } + goto fail; + } + + ret = 0; + +fail: + bdrv_co_unref(bs); + return ret; +} static QemuOptsList block_crypto_runtime_opts_luks = { .name = "crypto", @@ -184,6 +231,7 @@ static QemuOptsList block_crypto_create_opts_luks = { BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG(""), BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG(""), BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME(""), + BLOCK_CRYPTO_OPT_DEF_LUKS_DETACHED_HEADER(""), { /* end of list */ } }, }; @@ -262,6 +310,8 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, int flags, Error **errp) { + ERRP_GUARD(); + BlockCrypto *crypto = bs->opaque; QemuOpts *opts = NULL; int ret; @@ -276,6 +326,13 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, return ret; } + crypto->header = bdrv_open_child(NULL, options, "header", bs, + &child_of_bds, BDRV_CHILD_METADATA, + true, errp); + if (*errp != NULL) { + return -EINVAL; + } + GRAPH_RDLOCK_GUARD_MAINLOOP(); bs->supported_write_flags = BDRV_REQ_FUA & @@ -299,6 +356,9 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, if (flags & BDRV_O_NO_IO) { cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; } + if (crypto->header != NULL) { + cflags |= QCRYPTO_BLOCK_OPEN_DETACHED; + } crypto->block = qcrypto_block_open(open_opts, NULL, block_crypto_read_func, bs, @@ -324,7 +384,9 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, static int coroutine_fn GRAPH_UNLOCKED block_crypto_co_create_generic(BlockDriverState *bs, int64_t size, QCryptoBlockCreateOptions *opts, - PreallocMode prealloc, Error **errp) + PreallocMode prealloc, + unsigned int flags, + Error **errp) { int ret; BlockBackend *blk; @@ -344,7 +406,7 @@ block_crypto_co_create_generic(BlockDriverState *bs, int64_t size, data = (struct BlockCryptoCreateData) { .blk = blk, - .size = size, + .size = flags & QCRYPTO_BLOCK_CREATE_DETACHED ? 0 : size, .prealloc = prealloc, }; @@ -352,6 +414,7 @@ block_crypto_co_create_generic(BlockDriverState *bs, int64_t size, block_crypto_create_init_func, block_crypto_create_write_func, &data, + flags, errp); if (!crypto) { @@ -638,17 +701,27 @@ static int coroutine_fn GRAPH_UNLOCKED block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp) { BlockdevCreateOptionsLUKS *luks_opts; + BlockDriverState *hdr_bs = NULL; BlockDriverState *bs = NULL; QCryptoBlockCreateOptions create_opts; PreallocMode preallocation = PREALLOC_MODE_OFF; + unsigned int cflags = 0; int ret; assert(create_options->driver == BLOCKDEV_DRIVER_LUKS); luks_opts = &create_options->u.luks; - bs = bdrv_co_open_blockdev_ref(luks_opts->file, errp); - if (bs == NULL) { - return -EIO; + if (luks_opts->header == NULL && luks_opts->file == NULL) { + error_setg(errp, "Either the parameter 'header' or 'file' must " + "be specified"); + return -EINVAL; + } + + if ((luks_opts->preallocation != PREALLOC_MODE_OFF) && + (luks_opts->file == NULL)) { + error_setg(errp, "Parameter 'preallocation' requires 'file' to be " + "specified for formatting LUKS disk"); + return -EINVAL; } create_opts = (QCryptoBlockCreateOptions) { @@ -660,15 +733,52 @@ block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp) preallocation = luks_opts->preallocation; } - ret = block_crypto_co_create_generic(bs, luks_opts->size, &create_opts, - preallocation, errp); - if (ret < 0) { - goto fail; + if (luks_opts->header) { + /* LUKS volume with detached header */ + hdr_bs = bdrv_co_open_blockdev_ref(luks_opts->header, errp); + if (hdr_bs == NULL) { + return -EIO; + } + + cflags |= QCRYPTO_BLOCK_CREATE_DETACHED; + + /* Format the LUKS header node */ + ret = block_crypto_co_create_generic(hdr_bs, 0, &create_opts, + PREALLOC_MODE_OFF, cflags, errp); + if (ret < 0) { + goto fail; + } + + /* Format the LUKS payload node */ + if (luks_opts->file) { + ret = block_crypto_co_format_luks_payload(luks_opts, errp); + if (ret < 0) { + goto fail; + } + } + } else if (luks_opts->file) { + /* LUKS volume with none-detached header */ + bs = bdrv_co_open_blockdev_ref(luks_opts->file, errp); + if (bs == NULL) { + return -EIO; + } + + ret = block_crypto_co_create_generic(bs, luks_opts->size, &create_opts, + preallocation, cflags, errp); + if (ret < 0) { + goto fail; + } } ret = 0; fail: - bdrv_co_unref(bs); + if (hdr_bs != NULL) { + bdrv_co_unref(hdr_bs); + } + + if (bs != NULL) { + bdrv_co_unref(bs); + } return ret; } @@ -682,6 +792,9 @@ block_crypto_co_create_opts_luks(BlockDriver *drv, const char *filename, PreallocMode prealloc; char *buf = NULL; int64_t size; + bool detached_hdr = + qemu_opt_get_bool(opts, "detached-header", false); + unsigned int cflags = 0; int ret; Error *local_err = NULL; @@ -721,8 +834,13 @@ block_crypto_co_create_opts_luks(BlockDriver *drv, const char *filename, goto fail; } + if (detached_hdr) { + cflags |= QCRYPTO_BLOCK_CREATE_DETACHED; + } + /* Create format layer */ - ret = block_crypto_co_create_generic(bs, size, create_opts, prealloc, errp); + ret = block_crypto_co_create_generic(bs, size, create_opts, + prealloc, cflags, errp); if (ret < 0) { goto fail; } diff --git a/block/crypto.h b/block/crypto.h index 72e792c9af1..dc3d2d5ed91 100644 --- a/block/crypto.h +++ b/block/crypto.h @@ -41,6 +41,7 @@ #define BLOCK_CRYPTO_OPT_LUKS_IVGEN_HASH_ALG "ivgen-hash-alg" #define BLOCK_CRYPTO_OPT_LUKS_HASH_ALG "hash-alg" #define BLOCK_CRYPTO_OPT_LUKS_ITER_TIME "iter-time" +#define BLOCK_CRYPTO_OPT_LUKS_DETACHED_HEADER "detached-header" #define BLOCK_CRYPTO_OPT_LUKS_KEYSLOT "keyslot" #define BLOCK_CRYPTO_OPT_LUKS_STATE "state" #define BLOCK_CRYPTO_OPT_LUKS_OLD_SECRET "old-secret" @@ -100,6 +101,13 @@ .help = "Select new state of affected keyslots (active/inactive)",\ } +#define BLOCK_CRYPTO_OPT_DEF_LUKS_DETACHED_HEADER(prefix) \ + { \ + .name = prefix BLOCK_CRYPTO_OPT_LUKS_DETACHED_HEADER, \ + .type = QEMU_OPT_BOOL, \ + .help = "Create a detached LUKS header", \ + } + #define BLOCK_CRYPTO_OPT_DEF_LUKS_KEYSLOT(prefix) \ { \ .name = prefix BLOCK_CRYPTO_OPT_LUKS_KEYSLOT, \ diff --git a/block/export/export.c b/block/export/export.c index a8f274e5268..6d51ae8ed78 100644 --- a/block/export/export.c +++ b/block/export/export.c @@ -114,7 +114,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) } ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); if (export->iothread) { IOThread *iothread; @@ -133,8 +132,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) set_context_errp = fixed_iothread ? errp : NULL; ret = bdrv_try_change_aio_context(bs, new_ctx, NULL, set_context_errp); if (ret == 0) { - aio_context_release(ctx); - aio_context_acquire(new_ctx); ctx = new_ctx; } else if (fixed_iothread) { goto fail; @@ -191,8 +188,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) assert(exp->blk != NULL); QLIST_INSERT_HEAD(&block_exports, exp, next); - - aio_context_release(ctx); return exp; fail: @@ -200,7 +195,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) blk_set_dev_ops(blk, NULL, NULL); blk_unref(blk); } - aio_context_release(ctx); if (exp) { g_free(exp->id); g_free(exp); @@ -218,9 +212,6 @@ void blk_exp_ref(BlockExport *exp) static void blk_exp_delete_bh(void *opaque) { BlockExport *exp = opaque; - AioContext *aio_context = exp->ctx; - - aio_context_acquire(aio_context); assert(exp->refcount == 0); QLIST_REMOVE(exp, next); @@ -230,8 +221,6 @@ static void blk_exp_delete_bh(void *opaque) qapi_event_send_block_export_deleted(exp->id); g_free(exp->id); g_free(exp); - - aio_context_release(aio_context); } void blk_exp_unref(BlockExport *exp) @@ -249,22 +238,16 @@ void blk_exp_unref(BlockExport *exp) * connections and other internally held references start to shut down. When * the function returns, there may still be active references while the export * is in the process of shutting down. - * - * Acquires exp->ctx internally. Callers must *not* hold the lock. */ void blk_exp_request_shutdown(BlockExport *exp) { - AioContext *aio_context = exp->ctx; - - aio_context_acquire(aio_context); - /* * If the user doesn't own the export any more, it is already shutting * down. We must not call .request_shutdown and decrease the refcount a * second time. */ if (!exp->user_owned) { - goto out; + return; } exp->drv->request_shutdown(exp); @@ -272,9 +255,6 @@ void blk_exp_request_shutdown(BlockExport *exp) assert(exp->user_owned); exp->user_owned = false; blk_exp_unref(exp); - -out: - aio_context_release(aio_context); } /* diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c index 16f48388d38..50c358e8cd5 100644 --- a/block/export/vhost-user-blk-server.c +++ b/block/export/vhost-user-blk-server.c @@ -278,7 +278,6 @@ static void vu_blk_exp_resize(void *opaque) vu_config_change_msg(&vexp->vu_server.vu_dev); } -/* Called with vexp->export.ctx acquired */ static void vu_blk_drained_begin(void *opaque) { VuBlkExport *vexp = opaque; @@ -287,7 +286,6 @@ static void vu_blk_drained_begin(void *opaque) vhost_user_server_detach_aio_context(&vexp->vu_server); } -/* Called with vexp->export.blk AioContext acquired */ static void vu_blk_drained_end(void *opaque) { VuBlkExport *vexp = opaque; @@ -300,8 +298,6 @@ static void vu_blk_drained_end(void *opaque) * Ensures that bdrv_drained_begin() waits until in-flight requests complete * and the server->co_trip coroutine has terminated. It will be restarted in * vhost_user_server_attach_aio_context(). - * - * Called with vexp->export.ctx acquired. */ static bool vu_blk_drained_poll(void *opaque) { diff --git a/block/file-posix.c b/block/file-posix.c index b862406c719..35684f7e21c 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -712,17 +712,11 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, #ifdef CONFIG_LINUX_AIO /* Currently Linux does AIO only for files opened with O_DIRECT */ - if (s->use_linux_aio) { - if (!(s->open_flags & O_DIRECT)) { - error_setg(errp, "aio=native was specified, but it requires " - "cache.direct=on, which was not specified."); - ret = -EINVAL; - goto fail; - } - if (!aio_setup_linux_aio(bdrv_get_aio_context(bs), errp)) { - error_prepend(errp, "Unable to use native AIO: "); - goto fail; - } + if (s->use_linux_aio && !(s->open_flags & O_DIRECT)) { + error_setg(errp, "aio=native was specified, but it requires " + "cache.direct=on, which was not specified."); + ret = -EINVAL; + goto fail; } #else if (s->use_linux_aio) { @@ -733,14 +727,7 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, } #endif /* !defined(CONFIG_LINUX_AIO) */ -#ifdef CONFIG_LINUX_IO_URING - if (s->use_linux_io_uring) { - if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs), errp)) { - error_prepend(errp, "Unable to use io_uring: "); - goto fail; - } - } -#else +#ifndef CONFIG_LINUX_IO_URING if (s->use_linux_io_uring) { error_setg(errp, "aio=io_uring was specified, but is not supported " "in this build."); @@ -2444,6 +2431,48 @@ static bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) return true; } +#ifdef CONFIG_LINUX_IO_URING +static inline bool raw_check_linux_io_uring(BDRVRawState *s) +{ + Error *local_err = NULL; + AioContext *ctx; + + if (!s->use_linux_io_uring) { + return false; + } + + ctx = qemu_get_current_aio_context(); + if (unlikely(!aio_setup_linux_io_uring(ctx, &local_err))) { + error_reportf_err(local_err, "Unable to use linux io_uring, " + "falling back to thread pool: "); + s->use_linux_io_uring = false; + return false; + } + return true; +} +#endif + +#ifdef CONFIG_LINUX_AIO +static inline bool raw_check_linux_aio(BDRVRawState *s) +{ + Error *local_err = NULL; + AioContext *ctx; + + if (!s->use_linux_aio) { + return false; + } + + ctx = qemu_get_current_aio_context(); + if (unlikely(!aio_setup_linux_aio(ctx, &local_err))) { + error_reportf_err(local_err, "Unable to use Linux AIO, " + "falling back to thread pool: "); + s->use_linux_aio = false; + return false; + } + return true; +} +#endif + static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, uint64_t bytes, QEMUIOVector *qiov, int type) { @@ -2474,13 +2503,13 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) { type |= QEMU_AIO_MISALIGNED; #ifdef CONFIG_LINUX_IO_URING - } else if (s->use_linux_io_uring) { + } else if (raw_check_linux_io_uring(s)) { assert(qiov->size == bytes); ret = luring_co_submit(bs, s->fd, offset, qiov, type); goto out; #endif #ifdef CONFIG_LINUX_AIO - } else if (s->use_linux_aio) { + } else if (raw_check_linux_aio(s)) { assert(qiov->size == bytes); ret = laio_co_submit(s->fd, offset, qiov, type, s->aio_max_batch); @@ -2567,39 +2596,13 @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) }; #ifdef CONFIG_LINUX_IO_URING - if (s->use_linux_io_uring) { + if (raw_check_linux_io_uring(s)) { return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH); } #endif return raw_thread_pool_submit(handle_aiocb_flush, &acb); } -static void raw_aio_attach_aio_context(BlockDriverState *bs, - AioContext *new_context) -{ - BDRVRawState __attribute__((unused)) *s = bs->opaque; -#ifdef CONFIG_LINUX_AIO - if (s->use_linux_aio) { - Error *local_err = NULL; - if (!aio_setup_linux_aio(new_context, &local_err)) { - error_reportf_err(local_err, "Unable to use native AIO, " - "falling back to thread pool: "); - s->use_linux_aio = false; - } - } -#endif -#ifdef CONFIG_LINUX_IO_URING - if (s->use_linux_io_uring) { - Error *local_err = NULL; - if (!aio_setup_linux_io_uring(new_context, &local_err)) { - error_reportf_err(local_err, "Unable to use linux io_uring, " - "falling back to thread pool: "); - s->use_linux_io_uring = false; - } - } -#endif -} - static void raw_close(BlockDriverState *bs) { BDRVRawState *s = bs->opaque; @@ -3896,7 +3899,6 @@ BlockDriver bdrv_file = { .bdrv_co_copy_range_from = raw_co_copy_range_from, .bdrv_co_copy_range_to = raw_co_copy_range_to, .bdrv_refresh_limits = raw_refresh_limits, - .bdrv_attach_aio_context = raw_aio_attach_aio_context, .bdrv_co_truncate = raw_co_truncate, .bdrv_co_getlength = raw_co_getlength, @@ -4266,7 +4268,6 @@ static BlockDriver bdrv_host_device = { .bdrv_co_copy_range_from = raw_co_copy_range_from, .bdrv_co_copy_range_to = raw_co_copy_range_to, .bdrv_refresh_limits = raw_refresh_limits, - .bdrv_attach_aio_context = raw_aio_attach_aio_context, .bdrv_co_truncate = raw_co_truncate, .bdrv_co_getlength = raw_co_getlength, @@ -4402,7 +4403,6 @@ static BlockDriver bdrv_host_cdrom = { .bdrv_co_pwritev = raw_co_pwritev, .bdrv_co_flush_to_disk = raw_co_flush_to_disk, .bdrv_refresh_limits = cdrom_refresh_limits, - .bdrv_attach_aio_context = raw_aio_attach_aio_context, .bdrv_co_truncate = raw_co_truncate, .bdrv_co_getlength = raw_co_getlength, @@ -4528,7 +4528,6 @@ static BlockDriver bdrv_host_cdrom = { .bdrv_co_pwritev = raw_co_pwritev, .bdrv_co_flush_to_disk = raw_co_flush_to_disk, .bdrv_refresh_limits = cdrom_refresh_limits, - .bdrv_attach_aio_context = raw_aio_attach_aio_context, .bdrv_co_truncate = raw_co_truncate, .bdrv_co_getlength = raw_co_getlength, diff --git a/block/graph-lock.c b/block/graph-lock.c index 079e878d9b4..c81162b1473 100644 --- a/block/graph-lock.c +++ b/block/graph-lock.c @@ -106,27 +106,12 @@ static uint32_t reader_count(void) return rd; } -void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs) +void no_coroutine_fn bdrv_graph_wrlock(void) { - AioContext *ctx = NULL; - GLOBAL_STATE_CODE(); assert(!qatomic_read(&has_writer)); assert(!qemu_in_coroutine()); - /* - * Release only non-mainloop AioContext. The mainloop often relies on the - * BQL and doesn't lock the main AioContext before doing things. - */ - if (bs) { - ctx = bdrv_get_aio_context(bs); - if (ctx != qemu_get_aio_context()) { - aio_context_release(ctx); - } else { - ctx = NULL; - } - } - /* Make sure that constantly arriving new I/O doesn't cause starvation */ bdrv_drain_all_begin_nopoll(); @@ -155,27 +140,13 @@ void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs) } while (reader_count() >= 1); bdrv_drain_all_end(); - - if (ctx) { - aio_context_acquire(bdrv_get_aio_context(bs)); - } } -void no_coroutine_fn bdrv_graph_wrunlock_ctx(AioContext *ctx) +void no_coroutine_fn bdrv_graph_wrunlock(void) { GLOBAL_STATE_CODE(); assert(qatomic_read(&has_writer)); - /* - * Release only non-mainloop AioContext. The mainloop often relies on the - * BQL and doesn't lock the main AioContext before doing things. - */ - if (ctx && ctx != qemu_get_aio_context()) { - aio_context_release(ctx); - } else { - ctx = NULL; - } - WITH_QEMU_LOCK_GUARD(&aio_context_list_lock) { /* * No need for memory barriers, this works in pair with @@ -197,17 +168,6 @@ void no_coroutine_fn bdrv_graph_wrunlock_ctx(AioContext *ctx) * progress. */ aio_bh_poll(qemu_get_aio_context()); - - if (ctx) { - aio_context_acquire(ctx); - } -} - -void no_coroutine_fn bdrv_graph_wrunlock(BlockDriverState *bs) -{ - AioContext *ctx = bs ? bdrv_get_aio_context(bs) : NULL; - - bdrv_graph_wrunlock_ctx(ctx); } void coroutine_fn bdrv_graph_co_rdlock(void) diff --git a/block/io.c b/block/io.c index d202987770d..7217cf811b4 100644 --- a/block/io.c +++ b/block/io.c @@ -294,8 +294,6 @@ static void bdrv_co_drain_bh_cb(void *opaque) BlockDriverState *bs = data->bs; if (bs) { - AioContext *ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); bdrv_dec_in_flight(bs); if (data->begin) { bdrv_do_drained_begin(bs, data->parent, data->poll); @@ -303,7 +301,6 @@ static void bdrv_co_drain_bh_cb(void *opaque) assert(!data->poll); bdrv_do_drained_end(bs, data->parent); } - aio_context_release(ctx); } else { assert(data->begin); bdrv_drain_all_begin(); @@ -320,8 +317,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, { BdrvCoDrainData data; Coroutine *self = qemu_coroutine_self(); - AioContext *ctx = bdrv_get_aio_context(bs); - AioContext *co_ctx = qemu_coroutine_get_aio_context(self); /* Calling bdrv_drain() from a BH ensures the current coroutine yields and * other coroutines run if they were queued by aio_co_enter(). */ @@ -340,17 +335,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, bdrv_inc_in_flight(bs); } - /* - * Temporarily drop the lock across yield or we would get deadlocks. - * bdrv_co_drain_bh_cb() reaquires the lock as needed. - * - * When we yield below, the lock for the current context will be - * released, so if this is actually the lock that protects bs, don't drop - * it a second time. - */ - if (ctx != co_ctx) { - aio_context_release(ctx); - } replay_bh_schedule_oneshot_event(qemu_get_aio_context(), bdrv_co_drain_bh_cb, &data); @@ -358,11 +342,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, /* If we are resumed from some other event (such as an aio completion or a * timer callback), it is a bug in the caller that should be fixed. */ assert(data.done); - - /* Reacquire the AioContext of bs if we dropped it */ - if (ctx != co_ctx) { - aio_context_acquire(ctx); - } } static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, @@ -478,13 +457,12 @@ static bool bdrv_drain_all_poll(void) GLOBAL_STATE_CODE(); GRAPH_RDLOCK_GUARD_MAINLOOP(); - /* bdrv_drain_poll() can't make changes to the graph and we are holding the - * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ + /* + * bdrv_drain_poll() can't make changes to the graph and we hold the BQL, + * so iterating bdrv_next_all_states() is safe. + */ while ((bs = bdrv_next_all_states(bs))) { - AioContext *aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); result |= bdrv_drain_poll(bs, NULL, true); - aio_context_release(aio_context); } return result; @@ -525,11 +503,7 @@ void bdrv_drain_all_begin_nopoll(void) /* Quiesce all nodes, without polling in-flight requests yet. The graph * cannot change during this loop. */ while ((bs = bdrv_next_all_states(bs))) { - AioContext *aio_context = bdrv_get_aio_context(bs); - - aio_context_acquire(aio_context); bdrv_do_drained_begin(bs, NULL, false); - aio_context_release(aio_context); } } @@ -588,11 +562,7 @@ void bdrv_drain_all_end(void) } while ((bs = bdrv_next_all_states(bs))) { - AioContext *aio_context = bdrv_get_aio_context(bs); - - aio_context_acquire(aio_context); bdrv_do_drained_end(bs, NULL); - aio_context_release(aio_context); } assert(qemu_get_current_aio_context() == qemu_get_aio_context()); @@ -1756,22 +1726,29 @@ static int bdrv_pad_request(BlockDriverState *bs, return 0; } - sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes, - &sliced_head, &sliced_tail, - &sliced_niov); - - /* Guaranteed by bdrv_check_request32() */ - assert(*bytes <= SIZE_MAX); - ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov, - sliced_head, *bytes); - if (ret < 0) { - bdrv_padding_finalize(pad); - return ret; + /* + * For prefetching in stream_populate(), no qiov is passed along, because + * only copy-on-read matters. + */ + if (*qiov) { + sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes, + &sliced_head, &sliced_tail, + &sliced_niov); + + /* Guaranteed by bdrv_check_request32() */ + assert(*bytes <= SIZE_MAX); + ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov, + sliced_head, *bytes); + if (ret < 0) { + bdrv_padding_finalize(pad); + return ret; + } + *qiov = &pad->local_qiov; + *qiov_offset = 0; } + *bytes += pad->head + pad->tail; *offset -= pad->head; - *qiov = &pad->local_qiov; - *qiov_offset = 0; if (padded) { *padded = true; } @@ -2368,15 +2345,10 @@ int bdrv_flush_all(void) } for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { - AioContext *aio_context = bdrv_get_aio_context(bs); - int ret; - - aio_context_acquire(aio_context); - ret = bdrv_flush(bs); + int ret = bdrv_flush(bs); if (ret < 0 && !result) { result = ret; } - aio_context_release(aio_context); } return result; diff --git a/block/io_uring.c b/block/io_uring.c index 7cdd00e9f16..d11b2051abd 100644 --- a/block/io_uring.c +++ b/block/io_uring.c @@ -49,7 +49,7 @@ typedef struct LuringQueue { QSIMPLEQ_HEAD(, LuringAIOCB) submit_queue; } LuringQueue; -typedef struct LuringState { +struct LuringState { AioContext *aio_context; struct io_uring ring; @@ -58,7 +58,7 @@ typedef struct LuringState { LuringQueue io_q; QEMUBH *completion_bh; -} LuringState; +}; /** * luring_resubmit: @@ -102,7 +102,7 @@ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, /* Update sqe */ luringcb->sqeq.off += nread; - luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov; + luringcb->sqeq.addr = (uintptr_t)luringcb->resubmit_qiov.iov; luringcb->sqeq.len = luringcb->resubmit_qiov.niov; luring_resubmit(s, luringcb); @@ -432,7 +432,7 @@ LuringState *luring_init(Error **errp) rc = io_uring_queue_init(MAX_ENTRIES, ring, 0); if (rc < 0) { - error_setg_errno(errp, errno, "failed to init linux io_uring ring"); + error_setg_errno(errp, -rc, "failed to init linux io_uring ring"); g_free(s); return NULL; } diff --git a/block/meson.build b/block/meson.build index 59ff6d380c0..e1f03fd773e 100644 --- a/block/meson.build +++ b/block/meson.build @@ -88,10 +88,15 @@ if get_option('parallels').allowed() block_ss.add(files('parallels.c', 'parallels-ext.c')) endif -block_ss.add(when: 'CONFIG_WIN32', if_true: files('file-win32.c', 'win32-aio.c')) -block_ss.add(when: 'CONFIG_POSIX', if_true: [files('file-posix.c'), coref, iokit]) +if host_os == 'windows' + block_ss.add(files('file-win32.c', 'win32-aio.c')) +else + block_ss.add(files('file-posix.c'), coref, iokit) +endif block_ss.add(when: libiscsi, if_true: files('iscsi-opts.c')) -block_ss.add(when: 'CONFIG_LINUX', if_true: files('nvme.c')) +if host_os == 'linux' + block_ss.add(files('nvme.c')) +endif if get_option('replication').allowed() block_ss.add(files('replication.c')) endif diff --git a/block/mirror.c b/block/mirror.c index cd9d3ad4a80..1bdce3b657d 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -479,9 +479,9 @@ static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, return bytes_handled; } -static void coroutine_fn GRAPH_RDLOCK mirror_iteration(MirrorBlockJob *s) +static void coroutine_fn GRAPH_UNLOCKED mirror_iteration(MirrorBlockJob *s) { - BlockDriverState *source = s->mirror_top_bs->backing->bs; + BlockDriverState *source; MirrorOp *pseudo_op; int64_t offset; /* At least the first dirty chunk is mirrored in one iteration. */ @@ -489,6 +489,10 @@ static void coroutine_fn GRAPH_RDLOCK mirror_iteration(MirrorBlockJob *s) bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); + bdrv_graph_co_rdlock(); + source = s->mirror_top_bs->backing->bs; + bdrv_graph_co_rdunlock(); + bdrv_dirty_bitmap_lock(s->dirty_bitmap); offset = bdrv_dirty_iter_next(s->dbi); if (offset < 0) { @@ -662,7 +666,6 @@ static int mirror_exit_common(Job *job) MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); BlockJob *bjob = &s->common; MirrorBDSOpaque *bs_opaque; - AioContext *replace_aio_context = NULL; BlockDriverState *src; BlockDriverState *target_bs; BlockDriverState *mirror_top_bs; @@ -677,7 +680,6 @@ static int mirror_exit_common(Job *job) } s->prepared = true; - aio_context_acquire(qemu_get_aio_context()); bdrv_graph_rdlock_main_loop(); mirror_top_bs = s->mirror_top_bs; @@ -742,11 +744,6 @@ static int mirror_exit_common(Job *job) } bdrv_graph_rdunlock_main_loop(); - if (s->to_replace) { - replace_aio_context = bdrv_get_aio_context(s->to_replace); - aio_context_acquire(replace_aio_context); - } - if (s->should_complete && !abort) { BlockDriverState *to_replace = s->to_replace ?: src; bool ro = bdrv_is_read_only(to_replace); @@ -764,7 +761,7 @@ static int mirror_exit_common(Job *job) * check for an op blocker on @to_replace, and we have our own * there. */ - bdrv_graph_wrlock(target_bs); + bdrv_graph_wrlock(); if (bdrv_recurse_can_replace(src, to_replace)) { bdrv_replace_node(to_replace, target_bs, &local_err); } else { @@ -773,7 +770,7 @@ static int mirror_exit_common(Job *job) "would not lead to an abrupt change of visible data", to_replace->node_name, target_bs->node_name); } - bdrv_graph_wrunlock(target_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(to_replace); if (local_err) { error_report_err(local_err); @@ -785,9 +782,6 @@ static int mirror_exit_common(Job *job) error_free(s->replace_blocker); bdrv_unref(s->to_replace); } - if (replace_aio_context) { - aio_context_release(replace_aio_context); - } g_free(s->replaces); /* @@ -796,9 +790,9 @@ static int mirror_exit_common(Job *job) * valid. */ block_job_remove_all_bdrv(bjob); - bdrv_graph_wrlock(mirror_top_bs); + bdrv_graph_wrlock(); bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); - bdrv_graph_wrunlock(mirror_top_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(target_bs); bdrv_unref(target_bs); @@ -811,8 +805,6 @@ static int mirror_exit_common(Job *job) bdrv_unref(mirror_top_bs); bdrv_unref(src); - aio_context_release(qemu_get_aio_context()); - return ret; } @@ -1078,9 +1070,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) mirror_wait_for_free_in_flight_slot(s); continue; } else if (cnt != 0) { - bdrv_graph_co_rdlock(); mirror_iteration(s); - bdrv_graph_co_rdunlock(); } } @@ -1191,24 +1181,17 @@ static void mirror_complete(Job *job, Error **errp) /* block all operations on to_replace bs */ if (s->replaces) { - AioContext *replace_aio_context; - s->to_replace = bdrv_find_node(s->replaces); if (!s->to_replace) { error_setg(errp, "Node name '%s' not found", s->replaces); return; } - replace_aio_context = bdrv_get_aio_context(s->to_replace); - aio_context_acquire(replace_aio_context); - /* TODO Translate this into child freeze system. */ error_setg(&s->replace_blocker, "block device is in use by block-job-complete"); bdrv_op_block_all(s->to_replace, s->replace_blocker); bdrv_ref(s->to_replace); - - aio_context_release(replace_aio_context); } s->should_complete = true; @@ -1914,13 +1897,13 @@ static BlockJob *mirror_start_job( */ bdrv_disable_dirty_bitmap(s->dirty_bitmap); - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); ret = block_job_add_bdrv(&s->common, "source", bs, 0, BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | BLK_PERM_CONSISTENT_READ, errp); if (ret < 0) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); goto fail; } @@ -1965,17 +1948,17 @@ static BlockJob *mirror_start_job( ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, iter_shared_perms, errp); if (ret < 0) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); goto fail; } } if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); goto fail; } } - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); QTAILQ_INIT(&s->ops_in_flight); @@ -2001,12 +1984,12 @@ static BlockJob *mirror_start_job( bs_opaque->stop = true; bdrv_drained_begin(bs); - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); assert(mirror_top_bs->backing->bs == bs); bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, &error_abort); bdrv_replace_node(mirror_top_bs, bs, &error_abort); - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); bdrv_drained_end(bs); bdrv_unref(mirror_top_bs); diff --git a/block/monitor/bitmap-qmp-cmds.c b/block/monitor/bitmap-qmp-cmds.c index 70d01a37763..a738e7bbf7a 100644 --- a/block/monitor/bitmap-qmp-cmds.c +++ b/block/monitor/bitmap-qmp-cmds.c @@ -95,7 +95,6 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, { BlockDriverState *bs; BdrvDirtyBitmap *bitmap; - AioContext *aio_context; if (!name || name[0] == '\0') { error_setg(errp, "Bitmap name cannot be empty"); @@ -107,14 +106,11 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - if (has_granularity) { if (granularity < 512 || !is_power_of_2(granularity)) { error_setg(errp, "Granularity must be power of 2 " "and at least 512"); - goto out; + return; } } else { /* Default to cluster size, if available: */ @@ -132,12 +128,12 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, if (persistent && !bdrv_can_store_new_dirty_bitmap(bs, name, granularity, errp)) { - goto out; + return; } bitmap = bdrv_create_dirty_bitmap(bs, granularity, name, errp); if (bitmap == NULL) { - goto out; + return; } if (disabled) { @@ -145,9 +141,6 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, } bdrv_dirty_bitmap_set_persistence(bitmap, persistent); - -out: - aio_context_release(aio_context); } BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, @@ -157,7 +150,6 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, { BlockDriverState *bs; BdrvDirtyBitmap *bitmap; - AioContext *aio_context; GLOBAL_STATE_CODE(); @@ -166,19 +158,14 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, return NULL; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_BUSY | BDRV_BITMAP_RO, errp)) { - aio_context_release(aio_context); return NULL; } if (bdrv_dirty_bitmap_get_persistence(bitmap) && bdrv_remove_persistent_dirty_bitmap(bs, name, errp) < 0) { - aio_context_release(aio_context); return NULL; } @@ -190,7 +177,6 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, *bitmap_bs = bs; } - aio_context_release(aio_context); return release ? NULL : bitmap; } diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c index c729cbf1eb8..d954bec6f1e 100644 --- a/block/monitor/block-hmp-cmds.c +++ b/block/monitor/block-hmp-cmds.c @@ -141,7 +141,6 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict) const char *id = qdict_get_str(qdict, "id"); BlockBackend *blk; BlockDriverState *bs; - AioContext *aio_context; Error *local_err = NULL; GLOBAL_STATE_CODE(); @@ -168,14 +167,10 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict) return; } - aio_context = blk_get_aio_context(blk); - aio_context_acquire(aio_context); - bs = blk_bs(blk); if (bs) { if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) { error_report_err(local_err); - aio_context_release(aio_context); return; } @@ -196,8 +191,6 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict) } else { blk_unref(blk); } - - aio_context_release(aio_context); } void hmp_commit(Monitor *mon, const QDict *qdict) @@ -213,7 +206,6 @@ void hmp_commit(Monitor *mon, const QDict *qdict) ret = blk_commit_all(); } else { BlockDriverState *bs; - AioContext *aio_context; blk = blk_by_name(device); if (!blk) { @@ -222,18 +214,13 @@ void hmp_commit(Monitor *mon, const QDict *qdict) } bs = bdrv_skip_implicit_filters(blk_bs(blk)); - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); if (!blk_is_available(blk)) { error_report("Device '%s' has no medium", device); - aio_context_release(aio_context); return; } ret = bdrv_commit(bs); - - aio_context_release(aio_context); } if (ret < 0) { error_report("'commit' error for '%s': %s", device, strerror(-ret)); @@ -509,7 +496,7 @@ void hmp_block_stream(Monitor *mon, const QDict *qdict) const char *base = qdict_get_try_str(qdict, "base"); int64_t speed = qdict_get_try_int(qdict, "speed", 0); - qmp_block_stream(device, device, base, NULL, NULL, NULL, + qmp_block_stream(device, device, base, NULL, NULL, false, false, NULL, qdict_haskey(qdict, "speed"), speed, true, BLOCKDEV_ON_ERROR_REPORT, NULL, false, false, false, false, &error); @@ -560,7 +547,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict) BlockBackend *blk = NULL; BlockDriverState *bs = NULL; BlockBackend *local_blk = NULL; - AioContext *ctx = NULL; bool qdev = qdict_get_try_bool(qdict, "qdev", false); const char *device = qdict_get_str(qdict, "device"); const char *command = qdict_get_str(qdict, "command"); @@ -582,9 +568,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict) } } - ctx = blk ? blk_get_aio_context(blk) : bdrv_get_aio_context(bs); - aio_context_acquire(ctx); - if (bs) { blk = local_blk = blk_new(bdrv_get_aio_context(bs), 0, BLK_PERM_ALL); ret = blk_insert_bs(blk, bs, &err); @@ -622,11 +605,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict) fail: blk_unref(local_blk); - - if (ctx) { - aio_context_release(ctx); - } - hmp_handle_error(mon, err); } @@ -882,7 +860,6 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict) int nb_sns, i; int total; int *global_snapshots; - AioContext *aio_context; typedef struct SnapshotEntry { QEMUSnapshotInfo sn; @@ -909,11 +886,8 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict) error_report_err(err); return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); nb_sns = bdrv_snapshot_list(bs, &sn_tab); - aio_context_release(aio_context); if (nb_sns < 0) { monitor_printf(mon, "bdrv_snapshot_list: error %d\n", nb_sns); @@ -924,9 +898,7 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict) int bs1_nb_sns = 0; ImageEntry *ie; SnapshotEntry *se; - AioContext *ctx = bdrv_get_aio_context(bs1); - aio_context_acquire(ctx); if (bdrv_can_snapshot(bs1)) { sn = NULL; bs1_nb_sns = bdrv_snapshot_list(bs1, &sn); @@ -944,7 +916,6 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict) } g_free(sn); } - aio_context_release(ctx); } if (no_snapshot) { diff --git a/block/nbd.c b/block/nbd.c index b9d4f935e01..ef05f7cdfd6 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -852,6 +852,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk( BDRVNBDState *s, uint64_t cookie, bool only_structured, int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp) { + ERRP_GUARD(); int ret; int i = COOKIE_TO_INDEX(cookie); void *local_payload = NULL; diff --git a/block/nvme.c b/block/nvme.c index 0a0a0a6b36c..3a3c6da73d2 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -168,6 +168,7 @@ static QemuOptsList runtime_opts = { static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q, unsigned nentries, size_t entry_bytes, Error **errp) { + ERRP_GUARD(); size_t bytes; int r; @@ -221,6 +222,7 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s, unsigned idx, size_t size, Error **errp) { + ERRP_GUARD(); int i, r; NVMeQueuePair *q; uint64_t prp_list_iova; @@ -535,6 +537,7 @@ static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd) /* Returns true on success, false on failure. */ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp) { + ERRP_GUARD(); BDRVNVMeState *s = bs->opaque; bool ret = false; QEMU_AUTO_VFREE union { diff --git a/block/qapi-sysemu.c b/block/qapi-sysemu.c index 1618cd225a3..e4282631d23 100644 --- a/block/qapi-sysemu.c +++ b/block/qapi-sysemu.c @@ -174,7 +174,6 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp) { BlockBackend *blk; BlockDriverState *bs; - AioContext *aio_context; bool has_attached_device; GLOBAL_STATE_CODE(); @@ -204,13 +203,10 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp) return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - bdrv_graph_rdlock_main_loop(); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) { bdrv_graph_rdunlock_main_loop(); - goto out; + return; } bdrv_graph_rdunlock_main_loop(); @@ -223,9 +219,6 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp) * value passed here (i.e. false). */ blk_dev_change_media_cb(blk, false, &error_abort); } - -out: - aio_context_release(aio_context); } void qmp_blockdev_remove_medium(const char *id, Error **errp) @@ -237,7 +230,6 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk, BlockDriverState *bs, Error **errp) { Error *local_err = NULL; - AioContext *ctx; bool has_device; int ret; @@ -259,11 +251,7 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk, return; } - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); ret = blk_insert_bs(blk, bs, errp); - aio_context_release(ctx); - if (ret < 0) { return; } @@ -374,9 +362,7 @@ void qmp_blockdev_change_medium(const char *device, qdict_put_str(options, "driver", format); } - aio_context_acquire(qemu_get_aio_context()); medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp); - aio_context_release(qemu_get_aio_context()); if (!medium_bs) { goto fail; @@ -437,20 +423,16 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) ThrottleConfig cfg; BlockDriverState *bs; BlockBackend *blk; - AioContext *aio_context; blk = qmp_get_blk(arg->device, arg->id, errp); if (!blk) { return; } - aio_context = blk_get_aio_context(blk); - aio_context_acquire(aio_context); - bs = blk_bs(blk); if (!bs) { error_setg(errp, "Device has no medium"); - goto out; + return; } throttle_config_init(&cfg); @@ -505,7 +487,7 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) } if (!throttle_is_valid(&cfg, errp)) { - goto out; + return; } if (throttle_enabled(&cfg)) { @@ -522,9 +504,6 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) /* If all throttling settings are set to 0, disable I/O limits */ blk_io_limits_disable(blk); } - -out: - aio_context_release(aio_context); } void qmp_block_latency_histogram_set( diff --git a/block/qapi.c b/block/qapi.c index 82a30b38fe7..2b5793f1d9b 100644 --- a/block/qapi.c +++ b/block/qapi.c @@ -46,11 +46,11 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, bool flat, Error **errp) { + ERRP_GUARD(); ImageInfo **p_image_info; ImageInfo *backing_info; BlockDriverState *backing; BlockDeviceInfo *info; - ERRP_GUARD(); if (!bs->drv) { error_setg(errp, "Block device %s is ejected", bs->node_name); @@ -234,13 +234,11 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp) int ret; Error *err = NULL; - aio_context_acquire(bdrv_get_aio_context(bs)); - size = bdrv_getlength(bs); if (size < 0) { error_setg_errno(errp, -size, "Can't get image size '%s'", bs->exact_filename); - goto out; + return; } bdrv_refresh_filename(bs); @@ -265,7 +263,7 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp) info->format_specific = bdrv_get_specific_info(bs, &err); if (err) { error_propagate(errp, err); - goto out; + return; } backing_filename = bs->backing_file; if (backing_filename[0] != '\0') { @@ -300,11 +298,8 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp) break; default: error_propagate(errp, err); - goto out; + return; } - -out: - aio_context_release(bdrv_get_aio_context(bs)); } /** @@ -335,8 +330,8 @@ void bdrv_query_image_info(BlockDriverState *bs, bool skip_implicit_filters, Error **errp) { - ImageInfo *info; ERRP_GUARD(); + ImageInfo *info; info = g_new0(ImageInfo, 1); bdrv_do_query_node_info(bs, qapi_ImageInfo_base(info), errp); @@ -387,10 +382,10 @@ void bdrv_query_block_graph_info(BlockDriverState *bs, BlockGraphInfo **p_info, Error **errp) { + ERRP_GUARD(); BlockGraphInfo *info; BlockChildInfoList **children_list_tail; BdrvChild *c; - ERRP_GUARD(); info = g_new0(BlockGraphInfo, 1); bdrv_do_query_node_info(bs, qapi_BlockGraphInfo_base(info), errp); @@ -709,15 +704,10 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes, /* Just to be safe if query_nodes is not always initialized */ if (has_query_nodes && query_nodes) { for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) { - AioContext *ctx = bdrv_get_aio_context(bs); - - aio_context_acquire(ctx); QAPI_LIST_APPEND(tail, bdrv_query_bds_stats(bs, false)); - aio_context_release(ctx); } } else { for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) { - AioContext *ctx = blk_get_aio_context(blk); BlockStats *s; char *qdev; @@ -725,7 +715,6 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes, continue; } - aio_context_acquire(ctx); s = bdrv_query_bds_stats(blk_bs(blk), true); s->device = g_strdup(blk_name(blk)); @@ -737,7 +726,6 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes, } bdrv_query_blk_stats(s->stats, blk); - aio_context_release(ctx); QAPI_LIST_APPEND(tail, s); } @@ -754,15 +742,15 @@ void bdrv_snapshot_dump(QEMUSnapshotInfo *sn) char *sizing = NULL; if (!sn) { - qemu_printf("%-10s%-17s%8s%20s%13s%11s", - "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK", "ICOUNT"); + qemu_printf("%-7s %-16s %8s %19s %15s %10s", + "ID", "TAG", "VM_SIZE", "DATE", "VM_CLOCK", "ICOUNT"); } else { g_autoptr(GDateTime) date = g_date_time_new_from_unix_local(sn->date_sec); g_autofree char *date_buf = g_date_time_format(date, "%Y-%m-%d %H:%M:%S"); secs = sn->vm_clock_nsec / 1000000000; snprintf(clock_buf, sizeof(clock_buf), - "%02d:%02d:%02d.%03d", + "%04d:%02d:%02d.%03d", (int)(secs / 3600), (int)((secs / 60) % 60), (int)(secs % 60), @@ -771,8 +759,10 @@ void bdrv_snapshot_dump(QEMUSnapshotInfo *sn) if (sn->icount != -1ULL) { snprintf(icount_buf, sizeof(icount_buf), "%"PRId64, sn->icount); + } else { + snprintf(icount_buf, sizeof(icount_buf), "--"); } - qemu_printf("%-9s %-16s %8s%20s%13s%11s", + qemu_printf("%-7s %-16s %8s %19s %15s %10s", sn->id_str, sn->name, sizing, date_buf, diff --git a/block/qcow.c b/block/qcow.c index c6d0e15f1ef..ca8e1d5ec8a 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -885,7 +885,7 @@ qcow_co_create(BlockdevCreateOptions *opts, Error **errp) header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); crypto = qcrypto_block_create(qcow_opts->encrypt, "encrypt.", - NULL, NULL, NULL, errp); + NULL, NULL, NULL, 0, errp); if (!crypto) { ret = -EINVAL; goto exit; diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c index 0e567ed588d..874ea569485 100644 --- a/block/qcow2-bitmap.c +++ b/block/qcow2-bitmap.c @@ -1710,6 +1710,7 @@ bool coroutine_fn qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs, uint32_t granularity, Error **errp) { + ERRP_GUARD(); BDRVQcow2State *s = bs->opaque; BdrvDirtyBitmap *bitmap; uint64_t bitmap_directory_size = 0; diff --git a/block/qcow2.c b/block/qcow2.c index 13e032bd5e2..956128b4094 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -2807,9 +2807,9 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file) if (close_data_file && has_data_file(bs)) { GLOBAL_STATE_CODE(); bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, s->data_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); s->data_file = NULL; bdrv_graph_rdlock_main_loop(); } @@ -3216,7 +3216,7 @@ qcow2_set_up_encryption(BlockDriverState *bs, crypto = qcrypto_block_create(cryptoopts, "encrypt.", qcow2_crypto_hdr_init_func, qcow2_crypto_hdr_write_func, - bs, errp); + bs, 0, errp); if (!crypto) { return -EINVAL; } @@ -3483,6 +3483,7 @@ static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, static int coroutine_fn GRAPH_UNLOCKED qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) { + ERRP_GUARD(); BlockdevCreateOptionsQcow2 *qcow2_opts; QDict *options; @@ -4283,6 +4284,7 @@ static int coroutine_fn GRAPH_RDLOCK qcow2_co_truncate(BlockDriverState *bs, int64_t offset, bool exact, PreallocMode prealloc, BdrvRequestFlags flags, Error **errp) { + ERRP_GUARD(); BDRVQcow2State *s = bs->opaque; uint64_t old_length; int64_t new_l1_size; diff --git a/block/qed.c b/block/qed.c index bc2f0a61c0a..fa5bc110855 100644 --- a/block/qed.c +++ b/block/qed.c @@ -1579,6 +1579,7 @@ bdrv_qed_co_change_backing_file(BlockDriverState *bs, const char *backing_file, static void coroutine_fn GRAPH_RDLOCK bdrv_qed_co_invalidate_cache(BlockDriverState *bs, Error **errp) { + ERRP_GUARD(); BDRVQEDState *s = bs->opaque; int ret; diff --git a/block/quorum.c b/block/quorum.c index 505b8b3e18e..db8fe891c4b 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -1037,14 +1037,14 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags, close_exit: /* cleanup on error */ - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); for (i = 0; i < s->num_children; i++) { if (!opened[i]) { continue; } bdrv_unref_child(bs, s->children[i]); } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_free(s->children); g_free(opened); exit: @@ -1057,11 +1057,11 @@ static void quorum_close(BlockDriverState *bs) BDRVQuorumState *s = bs->opaque; int i; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); for (i = 0; i < s->num_children; i++) { bdrv_unref_child(bs, s->children[i]); } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_free(s->children); } diff --git a/block/raw-format.c b/block/raw-format.c index 1111dffd54f..ac7e8495f66 100644 --- a/block/raw-format.c +++ b/block/raw-format.c @@ -470,7 +470,6 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVRawState *s = bs->opaque; - AioContext *ctx; bool has_size; uint64_t offset, size; BdrvChildRole file_role; @@ -522,11 +521,7 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags, bs->file->bs->filename); } - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); ret = raw_apply_options(bs, s, offset, has_size, size, errp); - aio_context_release(ctx); - if (ret < 0) { return ret; } diff --git a/block/replication.c b/block/replication.c index 5ded5f1ca94..ca6bd0a7205 100644 --- a/block/replication.c +++ b/block/replication.c @@ -394,14 +394,7 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable, } if (reopen_queue) { - AioContext *ctx = bdrv_get_aio_context(bs); - if (ctx != qemu_get_aio_context()) { - aio_context_release(ctx); - } bdrv_reopen_multiple(reopen_queue, errp); - if (ctx != qemu_get_aio_context()) { - aio_context_acquire(ctx); - } } } @@ -462,14 +455,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, BlockDriverState *top_bs; BdrvChild *active_disk, *hidden_disk, *secondary_disk; int64_t active_length, hidden_length, disk_length; - AioContext *aio_context; Error *local_err = NULL; BackupPerf perf = { .use_copy_range = true, .max_workers = 1 }; GLOBAL_STATE_CODE(); - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); s = bs->opaque; if (s->stage == BLOCK_REPLICATION_DONE || @@ -479,20 +469,17 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, * Ignore the request because the secondary side of replication * doesn't have to do anything anymore. */ - aio_context_release(aio_context); return; } if (s->stage != BLOCK_REPLICATION_NONE) { error_setg(errp, "Block replication is running or done"); - aio_context_release(aio_context); return; } if (s->mode != mode) { error_setg(errp, "The parameter mode's value is invalid, needs %d," " but got %d", s->mode, mode); - aio_context_release(aio_context); return; } @@ -505,7 +492,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (!active_disk || !active_disk->bs || !active_disk->bs->backing) { error_setg(errp, "Active disk doesn't have backing file"); bdrv_graph_rdunlock_main_loop(); - aio_context_release(aio_context); return; } @@ -513,7 +499,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (!hidden_disk->bs || !hidden_disk->bs->backing) { error_setg(errp, "Hidden disk doesn't have backing file"); bdrv_graph_rdunlock_main_loop(); - aio_context_release(aio_context); return; } @@ -521,7 +506,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (!secondary_disk->bs || !bdrv_has_blk(secondary_disk->bs)) { error_setg(errp, "The secondary disk doesn't have block backend"); bdrv_graph_rdunlock_main_loop(); - aio_context_release(aio_context); return; } bdrv_graph_rdunlock_main_loop(); @@ -534,7 +518,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, active_length != hidden_length || hidden_length != disk_length) { error_setg(errp, "Active disk, hidden disk, secondary disk's length" " are not the same"); - aio_context_release(aio_context); return; } @@ -546,7 +529,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, !hidden_disk->bs->drv->bdrv_make_empty) { error_setg(errp, "Active disk or hidden disk doesn't support make_empty"); - aio_context_release(aio_context); bdrv_graph_rdunlock_main_loop(); return; } @@ -556,11 +538,10 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, reopen_backing_file(bs, true, &local_err); if (local_err) { error_propagate(errp, local_err); - aio_context_release(aio_context); return; } - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); bdrv_ref(hidden_disk->bs); s->hidden_disk = bdrv_attach_child(bs, hidden_disk->bs, "hidden disk", @@ -568,8 +549,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, &local_err); if (local_err) { error_propagate(errp, local_err); - bdrv_graph_wrunlock(bs); - aio_context_release(aio_context); + bdrv_graph_wrunlock(); return; } @@ -579,8 +559,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, BDRV_CHILD_DATA, &local_err); if (local_err) { error_propagate(errp, local_err); - bdrv_graph_wrunlock(bs); - aio_context_release(aio_context); + bdrv_graph_wrunlock(); return; } @@ -592,15 +571,14 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (!top_bs || !bdrv_is_root_node(top_bs) || !check_top_bs(top_bs, bs)) { error_setg(errp, "No top_bs or it is invalid"); - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); reopen_backing_file(bs, false, NULL); - aio_context_release(aio_context); return; } bdrv_op_block_all(top_bs, s->blocker); bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker); - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); s->backup_job = backup_job_create( NULL, s->secondary_disk->bs, s->hidden_disk->bs, @@ -612,13 +590,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (local_err) { error_propagate(errp, local_err); backup_job_cleanup(bs); - aio_context_release(aio_context); return; } job_start(&s->backup_job->job); break; default: - aio_context_release(aio_context); abort(); } @@ -629,18 +605,12 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, } s->error = 0; - aio_context_release(aio_context); } static void replication_do_checkpoint(ReplicationState *rs, Error **errp) { BlockDriverState *bs = rs->opaque; - BDRVReplicationState *s; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - s = bs->opaque; + BDRVReplicationState *s = bs->opaque; if (s->stage == BLOCK_REPLICATION_DONE || s->stage == BLOCK_REPLICATION_FAILOVER) { @@ -649,38 +619,28 @@ static void replication_do_checkpoint(ReplicationState *rs, Error **errp) * Ignore the request because the secondary side of replication * doesn't have to do anything anymore. */ - aio_context_release(aio_context); return; } if (s->mode == REPLICATION_MODE_SECONDARY) { secondary_do_checkpoint(bs, errp); } - aio_context_release(aio_context); } static void replication_get_error(ReplicationState *rs, Error **errp) { BlockDriverState *bs = rs->opaque; - BDRVReplicationState *s; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - s = bs->opaque; + BDRVReplicationState *s = bs->opaque; if (s->stage == BLOCK_REPLICATION_NONE) { error_setg(errp, "Block replication is not running"); - aio_context_release(aio_context); return; } if (s->error) { error_setg(errp, "I/O error occurred"); - aio_context_release(aio_context); return; } - aio_context_release(aio_context); } static void replication_done(void *opaque, int ret) @@ -691,12 +651,12 @@ static void replication_done(void *opaque, int ret) if (ret == 0) { s->stage = BLOCK_REPLICATION_DONE; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, s->secondary_disk); s->secondary_disk = NULL; bdrv_unref_child(bs, s->hidden_disk); s->hidden_disk = NULL; - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); s->error = 0; } else { @@ -708,12 +668,7 @@ static void replication_done(void *opaque, int ret) static void replication_stop(ReplicationState *rs, bool failover, Error **errp) { BlockDriverState *bs = rs->opaque; - BDRVReplicationState *s; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - s = bs->opaque; + BDRVReplicationState *s = bs->opaque; if (s->stage == BLOCK_REPLICATION_DONE || s->stage == BLOCK_REPLICATION_FAILOVER) { @@ -722,13 +677,11 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) * Ignore the request because the secondary side of replication * doesn't have to do anything anymore. */ - aio_context_release(aio_context); return; } if (s->stage != BLOCK_REPLICATION_RUNNING) { error_setg(errp, "Block replication is not running"); - aio_context_release(aio_context); return; } @@ -744,15 +697,12 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) * disk, secondary disk in backup_job_completed(). */ if (s->backup_job) { - aio_context_release(aio_context); job_cancel_sync(&s->backup_job->job, true); - aio_context_acquire(aio_context); } if (!failover) { secondary_do_checkpoint(bs, errp); s->stage = BLOCK_REPLICATION_DONE; - aio_context_release(aio_context); return; } @@ -765,10 +715,8 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) bdrv_graph_rdunlock_main_loop(); break; default: - aio_context_release(aio_context); abort(); } - aio_context_release(aio_context); } static const char *const replication_strong_runtime_opts[] = { diff --git a/block/snapshot.c b/block/snapshot.c index c4d40e80dd2..8242b4abac4 100644 --- a/block/snapshot.c +++ b/block/snapshot.c @@ -292,9 +292,9 @@ int bdrv_snapshot_goto(BlockDriverState *bs, } /* .bdrv_open() will re-attach it */ - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, fallback); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp); open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err); @@ -527,9 +527,7 @@ static bool GRAPH_RDLOCK bdrv_all_snapshots_includes_bs(BlockDriverState *bs) return bdrv_has_blk(bs) || QLIST_EMPTY(&bs->parents); } -/* Group operations. All block drivers are involved. - * These functions will properly handle dataplane (take aio_context_acquire - * when appropriate for appropriate block drivers) */ +/* Group operations. All block drivers are involved. */ bool bdrv_all_can_snapshot(bool has_devices, strList *devices, Error **errp) @@ -547,14 +545,11 @@ bool bdrv_all_can_snapshot(bool has_devices, strList *devices, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); bool ok = true; - aio_context_acquire(ctx); if (devices || bdrv_all_snapshots_includes_bs(bs)) { ok = bdrv_can_snapshot(bs); } - aio_context_release(ctx); if (!ok) { error_setg(errp, "Device '%s' is writable but does not support " "snapshots", bdrv_get_device_or_node_name(bs)); @@ -571,6 +566,7 @@ int bdrv_all_delete_snapshot(const char *name, bool has_devices, strList *devices, Error **errp) { + ERRP_GUARD(); g_autoptr(GList) bdrvs = NULL; GList *iterbdrvs; @@ -584,18 +580,15 @@ int bdrv_all_delete_snapshot(const char *name, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); QEMUSnapshotInfo sn1, *snapshot = &sn1; int ret = 0; - aio_context_acquire(ctx); if ((devices || bdrv_all_snapshots_includes_bs(bs)) && bdrv_snapshot_find(bs, snapshot, name) >= 0) { ret = bdrv_snapshot_delete(bs, snapshot->id_str, snapshot->name, errp); } - aio_context_release(ctx); if (ret < 0) { error_prepend(errp, "Could not delete snapshot '%s' on '%s': ", name, bdrv_get_device_or_node_name(bs)); @@ -613,6 +606,7 @@ int bdrv_all_goto_snapshot(const char *name, bool has_devices, strList *devices, Error **errp) { + ERRP_GUARD(); g_autoptr(GList) bdrvs = NULL; GList *iterbdrvs; int ret; @@ -630,17 +624,14 @@ int bdrv_all_goto_snapshot(const char *name, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); bool all_snapshots_includes_bs; - aio_context_acquire(ctx); bdrv_graph_rdlock_main_loop(); all_snapshots_includes_bs = bdrv_all_snapshots_includes_bs(bs); bdrv_graph_rdunlock_main_loop(); ret = (devices || all_snapshots_includes_bs) ? bdrv_snapshot_goto(bs, name, errp) : 0; - aio_context_release(ctx); if (ret < 0) { bdrv_graph_rdlock_main_loop(); error_prepend(errp, "Could not load snapshot '%s' on '%s': ", @@ -672,15 +663,12 @@ int bdrv_all_has_snapshot(const char *name, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); QEMUSnapshotInfo sn; int ret = 0; - aio_context_acquire(ctx); if (devices || bdrv_all_snapshots_includes_bs(bs)) { ret = bdrv_snapshot_find(bs, &sn, name); } - aio_context_release(ctx); if (ret < 0) { if (ret == -ENOENT) { return 0; @@ -717,10 +705,8 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); int ret = 0; - aio_context_acquire(ctx); if (bs == vm_state_bs) { sn->vm_state_size = vm_state_size; ret = bdrv_snapshot_create(bs, sn); @@ -728,7 +714,6 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn, sn->vm_state_size = 0; ret = bdrv_snapshot_create(bs, sn); } - aio_context_release(ctx); if (ret < 0) { error_setg(errp, "Could not create snapshot '%s' on '%s'", sn->name, bdrv_get_device_or_node_name(bs)); @@ -759,13 +744,10 @@ BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); bool found = false; - aio_context_acquire(ctx); found = (devices || bdrv_all_snapshots_includes_bs(bs)) && bdrv_can_snapshot(bs); - aio_context_release(ctx); if (vmstate_bs) { if (g_str_equal(vmstate_bs, diff --git a/block/stream.c b/block/stream.c index 01fe7c0f166..7031eef12b6 100644 --- a/block/stream.c +++ b/block/stream.c @@ -39,6 +39,7 @@ typedef struct StreamBlockJob { BlockDriverState *target_bs; BlockdevOnError on_error; char *backing_file_str; + bool backing_mask_protocol; bool bs_read_only; } StreamBlockJob; @@ -95,13 +96,18 @@ static int stream_prepare(Job *job) if (unfiltered_base) { base_id = s->backing_file_str ?: unfiltered_base->filename; if (unfiltered_base->drv) { - base_fmt = unfiltered_base->drv->format_name; + if (s->backing_mask_protocol && + unfiltered_base->drv->protocol_name) { + base_fmt = "raw"; + } else { + base_fmt = unfiltered_base->drv->format_name; + } } } - bdrv_graph_wrlock(s->target_bs); + bdrv_graph_wrlock(); bdrv_set_backing_hd_drained(unfiltered_bs, base, &local_err); - bdrv_graph_wrunlock(s->target_bs); + bdrv_graph_wrunlock(); /* * This call will do I/O, so the graph can change again from here on. @@ -247,6 +253,7 @@ static const BlockJobDriver stream_job_driver = { void stream_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, const char *backing_file_str, + bool backing_mask_protocol, BlockDriverState *bottom, int creation_flags, int64_t speed, BlockdevOnError on_error, @@ -366,10 +373,10 @@ void stream_start(const char *job_id, BlockDriverState *bs, * already have our own plans. Also don't allow resize as the image size is * queried only at the job start and then cached. */ - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); if (block_job_add_bdrv(&s->common, "active node", bs, 0, basic_flags | BLK_PERM_WRITE, errp)) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); goto fail; } @@ -389,15 +396,16 @@ void stream_start(const char *job_id, BlockDriverState *bs, ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, basic_flags, errp); if (ret < 0) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); goto fail; } } - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); s->base_overlay = base_overlay; s->above_base = above_base; s->backing_file_str = g_strdup(backing_file_str); + s->backing_mask_protocol = backing_mask_protocol; s->cor_filter_bs = cor_filter_bs; s->target_bs = bs; s->bs_read_only = bs_read_only; diff --git a/block/vdi.c b/block/vdi.c index 3b57becb9fe..6363da08cee 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -738,6 +738,7 @@ static int coroutine_fn GRAPH_UNLOCKED vdi_co_do_create(BlockdevCreateOptions *create_options, size_t block_size, Error **errp) { + ERRP_GUARD(); BlockdevCreateOptionsVdi *vdi_opts; int ret = 0; uint64_t bytes = 0; diff --git a/block/vmdk.c b/block/vmdk.c index d6971c70675..3b82979fdf4 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -272,7 +272,7 @@ static void vmdk_free_extents(BlockDriverState *bs) BDRVVmdkState *s = bs->opaque; VmdkExtent *e; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); for (i = 0; i < s->num_extents; i++) { e = &s->extents[i]; g_free(e->l1_table); @@ -283,7 +283,7 @@ static void vmdk_free_extents(BlockDriverState *bs) bdrv_unref_child(bs, e->file); } } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_free(s->extents); } @@ -1147,6 +1147,7 @@ static int GRAPH_RDLOCK vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, Error **errp) { + ERRP_GUARD(); int ret; int matches; char access[11]; @@ -1247,9 +1248,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, 0, 0, 0, 0, 0, &extent, errp); if (ret < 0) { bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); goto out; } @@ -1266,9 +1267,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, g_free(buf); if (ret) { bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); goto out; } @@ -1277,9 +1278,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp); if (ret) { bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); goto out; } @@ -1287,9 +1288,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, } else { error_setg(errp, "Unsupported extent type '%s'", type); bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); ret = -ENOTSUP; goto out; diff --git a/block/write-threshold.c b/block/write-threshold.c index 76d8885677e..56fe88de811 100644 --- a/block/write-threshold.c +++ b/block/write-threshold.c @@ -33,7 +33,6 @@ void qmp_block_set_write_threshold(const char *node_name, Error **errp) { BlockDriverState *bs; - AioContext *aio_context; bs = bdrv_find_node(node_name); if (!bs) { @@ -41,12 +40,7 @@ void qmp_block_set_write_threshold(const char *node_name, return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - bdrv_write_threshold_set(bs, threshold_bytes); - - aio_context_release(aio_context); } void bdrv_write_threshold_check_write(BlockDriverState *bs, int64_t offset, diff --git a/blockdev.c b/blockdev.c index c91f49e7b62..057601dcf03 100644 --- a/blockdev.c +++ b/blockdev.c @@ -662,7 +662,6 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, /* Takes the ownership of bs_opts */ BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp) { - BlockDriverState *bs; int bdrv_flags = 0; GLOBAL_STATE_CODE(); @@ -677,11 +676,7 @@ BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp) bdrv_flags |= BDRV_O_INACTIVE; } - aio_context_acquire(qemu_get_aio_context()); - bs = bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp); - aio_context_release(qemu_get_aio_context()); - - return bs; + return bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp); } void blockdev_close_all_bdrv_states(void) @@ -690,11 +685,7 @@ void blockdev_close_all_bdrv_states(void) GLOBAL_STATE_CODE(); QTAILQ_FOREACH_SAFE(bs, &monitor_bdrv_states, monitor_list, next_bs) { - AioContext *ctx = bdrv_get_aio_context(bs); - - aio_context_acquire(ctx); bdrv_unref(bs); - aio_context_release(ctx); } } @@ -1048,7 +1039,6 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type, static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp) { BlockDriverState *bs; - AioContext *aio_context; GRAPH_RDLOCK_GUARD_MAINLOOP(); @@ -1062,16 +1052,11 @@ static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp) return NULL; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - if (!bdrv_is_inserted(bs)) { error_setg(errp, "Device has no medium"); bs = NULL; } - aio_context_release(aio_context); - return bs; } @@ -1141,7 +1126,6 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, Error **errp) { BlockDriverState *bs; - AioContext *aio_context; QEMUSnapshotInfo sn; Error *local_err = NULL; SnapshotInfo *info = NULL; @@ -1154,39 +1138,35 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, if (!bs) { return NULL; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); if (!id && !name) { error_setg(errp, "Name or id must be provided"); - goto out_aio_context; + return NULL; } if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) { - goto out_aio_context; + return NULL; } ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err); if (local_err) { error_propagate(errp, local_err); - goto out_aio_context; + return NULL; } if (!ret) { error_setg(errp, "Snapshot with id '%s' and name '%s' does not exist on " "device '%s'", STR_OR_NULL(id), STR_OR_NULL(name), device); - goto out_aio_context; + return NULL; } bdrv_snapshot_delete(bs, id, name, &local_err); if (local_err) { error_propagate(errp, local_err); - goto out_aio_context; + return NULL; } - aio_context_release(aio_context); - info = g_new0(SnapshotInfo, 1); info->id = g_strdup(sn.id_str); info->name = g_strdup(sn.name); @@ -1201,10 +1181,6 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, } return info; - -out_aio_context: - aio_context_release(aio_context); - return NULL; } /* internal snapshot private data */ @@ -1232,7 +1208,6 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, bool ret; int64_t rt; InternalSnapshotState *state = g_new0(InternalSnapshotState, 1); - AioContext *aio_context; int ret1; GLOBAL_STATE_CODE(); @@ -1248,33 +1223,30 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - state->bs = bs; /* Paired with .clean() */ bdrv_drained_begin(bs); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) { - goto out; + return; } if (bdrv_is_read_only(bs)) { error_setg(errp, "Device '%s' is read only", device); - goto out; + return; } if (!bdrv_can_snapshot(bs)) { error_setg(errp, "Block format '%s' used by device '%s' " "does not support internal snapshots", bs->drv->format_name, device); - goto out; + return; } if (!strlen(name)) { error_setg(errp, "Name is empty"); - goto out; + return; } /* check whether a snapshot with name exist */ @@ -1282,12 +1254,12 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, &local_err); if (local_err) { error_propagate(errp, local_err); - goto out; + return; } else if (ret) { error_setg(errp, "Snapshot with name '%s' already exists on device '%s'", name, device); - goto out; + return; } /* 3. take the snapshot */ @@ -1308,14 +1280,11 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, error_setg_errno(errp, -ret1, "Failed to create snapshot '%s' on device '%s'", name, device); - goto out; + return; } /* 4. succeed, mark a snapshot is created */ state->created = true; - -out: - aio_context_release(aio_context); } static void internal_snapshot_abort(void *opaque) @@ -1323,7 +1292,6 @@ static void internal_snapshot_abort(void *opaque) InternalSnapshotState *state = opaque; BlockDriverState *bs = state->bs; QEMUSnapshotInfo *sn = &state->sn; - AioContext *aio_context; Error *local_error = NULL; GLOBAL_STATE_CODE(); @@ -1333,9 +1301,6 @@ static void internal_snapshot_abort(void *opaque) return; } - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); - if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) { error_reportf_err(local_error, "Failed to delete snapshot with id '%s' and " @@ -1343,25 +1308,17 @@ static void internal_snapshot_abort(void *opaque) sn->id_str, sn->name, bdrv_get_device_name(bs)); } - - aio_context_release(aio_context); } static void internal_snapshot_clean(void *opaque) { g_autofree InternalSnapshotState *state = opaque; - AioContext *aio_context; if (!state->bs) { return; } - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); - bdrv_drained_end(state->bs); - - aio_context_release(aio_context); } /* external snapshot private data */ @@ -1395,7 +1352,6 @@ static void external_snapshot_action(TransactionAction *action, /* File name of the new image (for 'blockdev-snapshot-sync') */ const char *new_image_file; ExternalSnapshotState *state = g_new0(ExternalSnapshotState, 1); - AioContext *aio_context; uint64_t perm, shared; /* TODO We'll eventually have to take a writer lock in this function */ @@ -1435,26 +1391,24 @@ static void external_snapshot_action(TransactionAction *action, return; } - aio_context = bdrv_get_aio_context(state->old_bs); - aio_context_acquire(aio_context); - /* Paired with .clean() */ bdrv_drained_begin(state->old_bs); if (!bdrv_is_inserted(state->old_bs)) { - error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); - goto out; + error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, + bdrv_get_device_or_node_name(state->old_bs)); + return; } if (bdrv_op_is_blocked(state->old_bs, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) { - goto out; + return; } if (!bdrv_is_read_only(state->old_bs)) { if (bdrv_flush(state->old_bs)) { error_setg(errp, QERR_IO_ERROR); - goto out; + return; } } @@ -1466,13 +1420,13 @@ static void external_snapshot_action(TransactionAction *action, if (node_name && !snapshot_node_name) { error_setg(errp, "New overlay node-name missing"); - goto out; + return; } if (snapshot_node_name && bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) { error_setg(errp, "New overlay node-name already in use"); - goto out; + return; } flags = state->old_bs->open_flags; @@ -1485,20 +1439,18 @@ static void external_snapshot_action(TransactionAction *action, int64_t size = bdrv_getlength(state->old_bs); if (size < 0) { error_setg_errno(errp, -size, "bdrv_getlength failed"); - goto out; + return; } bdrv_refresh_filename(state->old_bs); - aio_context_release(aio_context); bdrv_img_create(new_image_file, format, state->old_bs->filename, state->old_bs->drv->format_name, NULL, size, flags, false, &local_err); - aio_context_acquire(aio_context); if (local_err) { error_propagate(errp, local_err); - goto out; + return; } } @@ -1508,20 +1460,15 @@ static void external_snapshot_action(TransactionAction *action, } qdict_put_str(options, "driver", format); } - aio_context_release(aio_context); - aio_context_acquire(qemu_get_aio_context()); state->new_bs = bdrv_open(new_image_file, snapshot_ref, options, flags, errp); - aio_context_release(qemu_get_aio_context()); /* We will manually add the backing_hd field to the bs later */ if (!state->new_bs) { return; } - aio_context_acquire(aio_context); - /* * Allow attaching a backing file to an overlay that's already in use only * if the parents don't assume that they are already seeing a valid image. @@ -1530,41 +1477,34 @@ static void external_snapshot_action(TransactionAction *action, bdrv_get_cumulative_perm(state->new_bs, &perm, &shared); if (perm & BLK_PERM_CONSISTENT_READ) { error_setg(errp, "The overlay is already in use"); - goto out; + return; } if (state->new_bs->drv->is_filter) { error_setg(errp, "Filters cannot be used as overlays"); - goto out; + return; } if (bdrv_cow_child(state->new_bs)) { error_setg(errp, "The overlay already has a backing image"); - goto out; + return; } if (!state->new_bs->drv->supports_backing) { error_setg(errp, "The overlay does not support backing images"); - goto out; + return; } ret = bdrv_append(state->new_bs, state->old_bs, errp); if (ret < 0) { - goto out; + return; } state->overlay_appended = true; - -out: - aio_context_release(aio_context); } static void external_snapshot_commit(void *opaque) { ExternalSnapshotState *state = opaque; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(state->old_bs); - aio_context_acquire(aio_context); /* We don't need (or want) to use the transactional * bdrv_reopen_multiple() across all the entries at once, because we @@ -1572,8 +1512,6 @@ static void external_snapshot_commit(void *opaque) if (!qatomic_read(&state->old_bs->copy_on_read)) { bdrv_reopen_set_read_only(state->old_bs, true, NULL); } - - aio_context_release(aio_context); } static void external_snapshot_abort(void *opaque) @@ -1586,7 +1524,6 @@ static void external_snapshot_abort(void *opaque) int ret; aio_context = bdrv_get_aio_context(state->old_bs); - aio_context_acquire(aio_context); bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd() close state->old_bs; we need it */ @@ -1599,26 +1536,18 @@ static void external_snapshot_abort(void *opaque) */ tmp_context = bdrv_get_aio_context(state->old_bs); if (aio_context != tmp_context) { - aio_context_release(aio_context); - aio_context_acquire(tmp_context); - ret = bdrv_try_change_aio_context(state->old_bs, aio_context, NULL, NULL); assert(ret == 0); - - aio_context_release(tmp_context); - aio_context_acquire(aio_context); } bdrv_drained_begin(state->new_bs); - bdrv_graph_wrlock(state->old_bs); + bdrv_graph_wrlock(); bdrv_replace_node(state->new_bs, state->old_bs, &error_abort); - bdrv_graph_wrunlock(state->old_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(state->new_bs); bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */ - - aio_context_release(aio_context); } } } @@ -1626,19 +1555,13 @@ static void external_snapshot_abort(void *opaque) static void external_snapshot_clean(void *opaque) { g_autofree ExternalSnapshotState *state = opaque; - AioContext *aio_context; if (!state->old_bs) { return; } - aio_context = bdrv_get_aio_context(state->old_bs); - aio_context_acquire(aio_context); - bdrv_drained_end(state->old_bs); bdrv_unref(state->new_bs); - - aio_context_release(aio_context); } typedef struct DriveBackupState { @@ -1670,7 +1593,6 @@ static void drive_backup_action(DriveBackup *backup, BlockDriverState *target_bs; BlockDriverState *source = NULL; AioContext *aio_context; - AioContext *old_context; const char *format; QDict *options; Error *local_err = NULL; @@ -1698,7 +1620,6 @@ static void drive_backup_action(DriveBackup *backup, } aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); state->bs = bs; /* Paired with .clean() */ @@ -1713,7 +1634,7 @@ static void drive_backup_action(DriveBackup *backup, bdrv_graph_rdlock_main_loop(); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { bdrv_graph_rdunlock_main_loop(); - goto out; + return; } flags = bs->open_flags | BDRV_O_RDWR; @@ -1744,7 +1665,7 @@ static void drive_backup_action(DriveBackup *backup, size = bdrv_getlength(bs); if (size < 0) { error_setg_errno(errp, -size, "bdrv_getlength failed"); - goto out; + return; } if (backup->mode != NEW_IMAGE_MODE_EXISTING) { @@ -1770,7 +1691,7 @@ static void drive_backup_action(DriveBackup *backup, if (local_err) { error_propagate(errp, local_err); - goto out; + return; } options = qdict_new(); @@ -1779,30 +1700,18 @@ static void drive_backup_action(DriveBackup *backup, if (format) { qdict_put_str(options, "driver", format); } - aio_context_release(aio_context); - aio_context_acquire(qemu_get_aio_context()); target_bs = bdrv_open(backup->target, NULL, options, flags, errp); - aio_context_release(qemu_get_aio_context()); - if (!target_bs) { return; } - /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ - old_context = bdrv_get_aio_context(target_bs); - aio_context_acquire(old_context); - ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); if (ret < 0) { bdrv_unref(target_bs); - aio_context_release(old_context); return; } - aio_context_release(old_context); - aio_context_acquire(aio_context); - if (set_backing_hd) { if (bdrv_set_backing_hd(target_bs, source, errp) < 0) { goto unref; @@ -1815,22 +1724,14 @@ static void drive_backup_action(DriveBackup *backup, unref: bdrv_unref(target_bs); -out: - aio_context_release(aio_context); } static void drive_backup_commit(void *opaque) { DriveBackupState *state = opaque; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); assert(state->job); job_start(&state->job->job); - - aio_context_release(aio_context); } static void drive_backup_abort(void *opaque) @@ -1845,18 +1746,12 @@ static void drive_backup_abort(void *opaque) static void drive_backup_clean(void *opaque) { g_autofree DriveBackupState *state = opaque; - AioContext *aio_context; if (!state->bs) { return; } - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); - bdrv_drained_end(state->bs); - - aio_context_release(aio_context); } typedef struct BlockdevBackupState { @@ -1881,7 +1776,6 @@ static void blockdev_backup_action(BlockdevBackup *backup, BlockDriverState *bs; BlockDriverState *target_bs; AioContext *aio_context; - AioContext *old_context; int ret; tran_add(tran, &blockdev_backup_drv, state); @@ -1898,17 +1792,12 @@ static void blockdev_backup_action(BlockdevBackup *backup, /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ aio_context = bdrv_get_aio_context(bs); - old_context = bdrv_get_aio_context(target_bs); - aio_context_acquire(old_context); ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); if (ret < 0) { - aio_context_release(old_context); return; } - aio_context_release(old_context); - aio_context_acquire(aio_context); state->bs = bs; /* Paired with .clean() */ @@ -1917,22 +1806,14 @@ static void blockdev_backup_action(BlockdevBackup *backup, state->job = do_backup_common(qapi_BlockdevBackup_base(backup), bs, target_bs, aio_context, block_job_txn, errp); - - aio_context_release(aio_context); } static void blockdev_backup_commit(void *opaque) { BlockdevBackupState *state = opaque; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); assert(state->job); job_start(&state->job->job); - - aio_context_release(aio_context); } static void blockdev_backup_abort(void *opaque) @@ -1947,18 +1828,12 @@ static void blockdev_backup_abort(void *opaque) static void blockdev_backup_clean(void *opaque) { g_autofree BlockdevBackupState *state = opaque; - AioContext *aio_context; if (!state->bs) { return; } - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); - bdrv_drained_end(state->bs); - - aio_context_release(aio_context); } typedef struct BlockDirtyBitmapState { @@ -2378,8 +2253,7 @@ void coroutine_fn qmp_block_resize(const char *device, const char *node_name, } bdrv_graph_co_rdlock(); - if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) { - error_setg(errp, QERR_DEVICE_IN_USE, device); + if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, errp)) { bdrv_graph_co_rdunlock(); return; } @@ -2390,18 +2264,13 @@ void coroutine_fn qmp_block_resize(const char *device, const char *node_name, return; } - bdrv_co_lock(bs); bdrv_drained_begin(bs); - bdrv_co_unlock(bs); old_ctx = bdrv_co_enter(bs); blk_co_truncate(blk, size, false, PREALLOC_MODE_OFF, 0, errp); bdrv_co_leave(bs, old_ctx); - bdrv_co_lock(bs); bdrv_drained_end(bs); - bdrv_co_unlock(bs); - blk_co_unref(blk); } @@ -2409,6 +2278,8 @@ void qmp_block_stream(const char *job_id, const char *device, const char *base, const char *base_node, const char *backing_file, + bool has_backing_mask_protocol, + bool backing_mask_protocol, const char *bottom, bool has_speed, int64_t speed, bool has_on_error, BlockdevOnError on_error, @@ -2444,6 +2315,10 @@ void qmp_block_stream(const char *job_id, const char *device, return; } + if (!has_backing_mask_protocol) { + backing_mask_protocol = false; + } + if (!has_on_error) { on_error = BLOCKDEV_ON_ERROR_REPORT; } @@ -2454,7 +2329,6 @@ void qmp_block_stream(const char *job_id, const char *device, } aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); bdrv_graph_rdlock_main_loop(); if (base) { @@ -2521,7 +2395,7 @@ void qmp_block_stream(const char *job_id, const char *device, if (!base_bs && backing_file) { error_setg(errp, "backing file specified, but streaming the " "entire chain"); - goto out; + return; } if (has_auto_finalize && !auto_finalize) { @@ -2532,22 +2406,19 @@ void qmp_block_stream(const char *job_id, const char *device, } stream_start(job_id, bs, base_bs, backing_file, + backing_mask_protocol, bottom_bs, job_flags, has_speed ? speed : 0, on_error, filter_node_name, &local_err); if (local_err) { error_propagate(errp, local_err); - goto out; + return; } trace_qmp_block_stream(bs); - -out: - aio_context_release(aio_context); return; out_rdlock: bdrv_graph_rdunlock_main_loop(); - aio_context_release(aio_context); } void qmp_block_commit(const char *job_id, const char *device, @@ -2556,6 +2427,8 @@ void qmp_block_commit(const char *job_id, const char *device, const char *top_node, const char *top, const char *backing_file, + bool has_backing_mask_protocol, + bool backing_mask_protocol, bool has_speed, int64_t speed, bool has_on_error, BlockdevOnError on_error, const char *filter_node_name, @@ -2586,6 +2459,9 @@ void qmp_block_commit(const char *job_id, const char *device, if (has_auto_dismiss && !auto_dismiss) { job_flags |= JOB_MANUAL_DISMISS; } + if (!has_backing_mask_protocol) { + backing_mask_protocol = false; + } /* Important Note: * libvirt relies on the DeviceNotFound error class in order to probe for @@ -2606,10 +2482,9 @@ void qmp_block_commit(const char *job_id, const char *device, } aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, errp)) { - goto out; + return; } /* default top_bs is the active layer */ @@ -2617,16 +2492,16 @@ void qmp_block_commit(const char *job_id, const char *device, if (top_node && top) { error_setg(errp, "'top-node' and 'top' are mutually exclusive"); - goto out; + return; } else if (top_node) { top_bs = bdrv_lookup_bs(NULL, top_node, errp); if (top_bs == NULL) { - goto out; + return; } if (!bdrv_chain_contains(bs, top_bs)) { error_setg(errp, "'%s' is not in this backing file chain", top_node); - goto out; + return; } } else if (top) { /* This strcmp() is just a shortcut, there is no need to @@ -2640,35 +2515,35 @@ void qmp_block_commit(const char *job_id, const char *device, if (top_bs == NULL) { error_setg(errp, "Top image file %s not found", top ? top : "NULL"); - goto out; + return; } assert(bdrv_get_aio_context(top_bs) == aio_context); if (base_node && base) { error_setg(errp, "'base-node' and 'base' are mutually exclusive"); - goto out; + return; } else if (base_node) { base_bs = bdrv_lookup_bs(NULL, base_node, errp); if (base_bs == NULL) { - goto out; + return; } if (!bdrv_chain_contains(top_bs, base_bs)) { error_setg(errp, "'%s' is not in this backing file chain", base_node); - goto out; + return; } } else if (base) { base_bs = bdrv_find_backing_image(top_bs, base); if (base_bs == NULL) { error_setg(errp, "Can't find '%s' in the backing chain", base); - goto out; + return; } } else { base_bs = bdrv_find_base(top_bs); if (base_bs == NULL) { error_setg(errp, "There is no backimg image"); - goto out; + return; } } @@ -2678,14 +2553,14 @@ void qmp_block_commit(const char *job_id, const char *device, iter = bdrv_filter_or_cow_bs(iter)) { if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) { - goto out; + return; } } /* Do not allow attempts to commit an image into itself */ if (top_bs == base_bs) { error_setg(errp, "cannot commit an image into itself"); - goto out; + return; } /* @@ -2708,7 +2583,7 @@ void qmp_block_commit(const char *job_id, const char *device, error_setg(errp, "'backing-file' specified, but 'top' has a " "writer on it"); } - goto out; + return; } if (!job_id) { /* @@ -2724,19 +2599,17 @@ void qmp_block_commit(const char *job_id, const char *device, } else { BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs); if (bdrv_op_is_blocked(overlay_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) { - goto out; + return; } commit_start(job_id, bs, base_bs, top_bs, job_flags, speed, on_error, backing_file, + backing_mask_protocol, filter_node_name, &local_err); } if (local_err != NULL) { error_propagate(errp, local_err); - goto out; + return; } - -out: - aio_context_release(aio_context); } /* Common QMP interface for drive-backup and blockdev-backup */ @@ -2985,8 +2858,6 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, if (replaces) { BlockDriverState *to_replace_bs; - AioContext *aio_context; - AioContext *replace_aio_context; int64_t bs_size, replace_size; bs_size = bdrv_getlength(bs); @@ -3000,19 +2871,7 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, return; } - aio_context = bdrv_get_aio_context(bs); - replace_aio_context = bdrv_get_aio_context(to_replace_bs); - /* - * bdrv_getlength() is a co-wrapper and uses AIO_WAIT_WHILE. Be sure not - * to acquire the same AioContext twice. - */ - if (replace_aio_context != aio_context) { - aio_context_acquire(replace_aio_context); - } replace_size = bdrv_getlength(to_replace_bs); - if (replace_aio_context != aio_context) { - aio_context_release(replace_aio_context); - } if (replace_size < 0) { error_setg_errno(errp, -replace_size, @@ -3041,7 +2900,6 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) BlockDriverState *bs; BlockDriverState *target_backing_bs, *target_bs; AioContext *aio_context; - AioContext *old_context; BlockMirrorBackingMode backing_mode; Error *local_err = NULL; QDict *options = NULL; @@ -3064,7 +2922,6 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) } aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); if (!arg->has_mode) { arg->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; @@ -3088,14 +2945,14 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) size = bdrv_getlength(bs); if (size < 0) { error_setg_errno(errp, -size, "bdrv_getlength failed"); - goto out; + return; } if (arg->replaces) { if (!arg->node_name) { error_setg(errp, "a node-name must be provided when replacing a" " named node of the graph"); - goto out; + return; } } @@ -3143,7 +3000,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) if (local_err) { error_propagate(errp, local_err); - goto out; + return; } options = qdict_new(); @@ -3153,15 +3010,11 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) if (format) { qdict_put_str(options, "driver", format); } - aio_context_release(aio_context); /* Mirroring takes care of copy-on-write using the source's backing * file. */ - aio_context_acquire(qemu_get_aio_context()); target_bs = bdrv_open(arg->target, NULL, options, flags, errp); - aio_context_release(qemu_get_aio_context()); - if (!target_bs) { return; } @@ -3173,20 +3026,12 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) bdrv_graph_rdunlock_main_loop(); - /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ - old_context = bdrv_get_aio_context(target_bs); - aio_context_acquire(old_context); - ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); if (ret < 0) { bdrv_unref(target_bs); - aio_context_release(old_context); return; } - aio_context_release(old_context); - aio_context_acquire(aio_context); - blockdev_mirror_common(arg->job_id, bs, target_bs, arg->replaces, arg->sync, backing_mode, zero_target, @@ -3202,8 +3047,6 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) arg->has_auto_dismiss, arg->auto_dismiss, errp); bdrv_unref(target_bs); -out: - aio_context_release(aio_context); } void qmp_blockdev_mirror(const char *job_id, @@ -3226,7 +3069,6 @@ void qmp_blockdev_mirror(const char *job_id, BlockDriverState *bs; BlockDriverState *target_bs; AioContext *aio_context; - AioContext *old_context; BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN; bool zero_target; int ret; @@ -3243,18 +3085,11 @@ void qmp_blockdev_mirror(const char *job_id, zero_target = (sync == MIRROR_SYNC_MODE_FULL); - /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ - old_context = bdrv_get_aio_context(target_bs); aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(old_context); ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); - - aio_context_release(old_context); - aio_context_acquire(aio_context); - if (ret < 0) { - goto out; + return; } blockdev_mirror_common(job_id, bs, target_bs, @@ -3269,8 +3104,6 @@ void qmp_blockdev_mirror(const char *job_id, has_auto_finalize, auto_finalize, has_auto_dismiss, auto_dismiss, errp); -out: - aio_context_release(aio_context); } /* @@ -3433,7 +3266,6 @@ void qmp_change_backing_file(const char *device, Error **errp) { BlockDriverState *bs = NULL; - AioContext *aio_context; BlockDriverState *image_bs = NULL; Error *local_err = NULL; bool ro; @@ -3444,9 +3276,6 @@ void qmp_change_backing_file(const char *device, return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - bdrv_graph_rdlock_main_loop(); image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err); @@ -3485,7 +3314,7 @@ void qmp_change_backing_file(const char *device, if (ro) { if (bdrv_reopen_set_read_only(image_bs, false, errp) != 0) { - goto out; + return; } } @@ -3503,14 +3332,10 @@ void qmp_change_backing_file(const char *device, if (ro) { bdrv_reopen_set_read_only(image_bs, true, errp); } - -out: - aio_context_release(aio_context); return; out_rdlock: bdrv_graph_rdunlock_main_loop(); - aio_context_release(aio_context); } void qmp_blockdev_add(BlockdevOptions *options, Error **errp) @@ -3550,7 +3375,6 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) for (; reopen_list != NULL; reopen_list = reopen_list->next) { BlockdevOptions *options = reopen_list->value; BlockDriverState *bs; - AioContext *ctx; QObject *obj; Visitor *v; QDict *qdict; @@ -3578,12 +3402,7 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) qdict_flatten(qdict); - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); - queue = bdrv_reopen_queue(queue, bs, qdict, false); - - aio_context_release(ctx); } /* Perform the reopen operation */ @@ -3596,7 +3415,6 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) void qmp_blockdev_del(const char *node_name, Error **errp) { - AioContext *aio_context; BlockDriverState *bs; GLOBAL_STATE_CODE(); @@ -3611,30 +3429,25 @@ void qmp_blockdev_del(const char *node_name, Error **errp) error_setg(errp, "Node %s is in use", node_name); return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, errp)) { - goto out; + return; } if (!QTAILQ_IN_USE(bs, monitor_list)) { error_setg(errp, "Node %s is not owned by the monitor", bs->node_name); - goto out; + return; } if (bs->refcnt > 1) { error_setg(errp, "Block device %s is in use", bdrv_get_device_or_node_name(bs)); - goto out; + return; } QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list); bdrv_unref(bs); - -out: - aio_context_release(aio_context); } static BdrvChild * GRAPH_RDLOCK @@ -3657,7 +3470,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child, BlockDriverState *parent_bs, *new_bs = NULL; BdrvChild *p_child; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); parent_bs = bdrv_lookup_bs(parent, parent, errp); if (!parent_bs) { @@ -3693,7 +3506,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child, } out: - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } BlockJobInfoList *qmp_query_block_jobs(Error **errp) @@ -3724,7 +3537,6 @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp) void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, bool has_force, bool force, Error **errp) { - AioContext *old_context; AioContext *new_context; BlockDriverState *bs; @@ -3756,12 +3568,7 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, new_context = qemu_get_aio_context(); } - old_context = bdrv_get_aio_context(bs); - aio_context_acquire(old_context); - bdrv_try_change_aio_context(bs, new_context, NULL, errp); - - aio_context_release(old_context); } QemuOptsList qemu_common_drive_opts = { diff --git a/blockjob.c b/blockjob.c index b7a29052b94..d5f29e14af2 100644 --- a/blockjob.c +++ b/blockjob.c @@ -198,9 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job) * one to make sure that such a concurrent access does not attempt * to process an already freed BdrvChild. */ - aio_context_release(job->job.aio_context); - bdrv_graph_wrlock(NULL); - aio_context_acquire(job->job.aio_context); + bdrv_graph_wrlock(); while (job->nodes) { GSList *l = job->nodes; BdrvChild *c = l->data; @@ -212,7 +210,7 @@ void block_job_remove_all_bdrv(BlockJob *job) g_slist_free_1(l); } - bdrv_graph_wrunlock_ctx(job->job.aio_context); + bdrv_graph_wrunlock(); } bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs) @@ -234,28 +232,12 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, Error **errp) { BdrvChild *c; - AioContext *ctx = bdrv_get_aio_context(bs); - bool need_context_ops; GLOBAL_STATE_CODE(); bdrv_ref(bs); - need_context_ops = ctx != job->job.aio_context; - - if (need_context_ops) { - if (job->job.aio_context != qemu_get_aio_context()) { - aio_context_release(job->job.aio_context); - } - aio_context_acquire(ctx); - } c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job, errp); - if (need_context_ops) { - aio_context_release(ctx); - if (job->job.aio_context != qemu_get_aio_context()) { - aio_context_acquire(job->job.aio_context); - } - } if (c == NULL) { return -EPERM; } @@ -514,7 +496,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, int ret; GLOBAL_STATE_CODE(); - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); if (job_id == NULL && !(flags & JOB_INTERNAL)) { job_id = bdrv_get_device_name(bs); @@ -523,7 +505,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs), flags, cb, opaque, errp); if (job == NULL) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); return NULL; } @@ -563,11 +545,11 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, goto fail; } - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); return job; fail: - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); job_early_fail(&job->job); return NULL; } diff --git a/bsd-user/bsd-file.h b/bsd-user/bsd-file.h index 3c00dc00567..6fa2c30b4de 100644 --- a/bsd-user/bsd-file.h +++ b/bsd-user/bsd-file.h @@ -641,7 +641,7 @@ static abi_long do_bsd_readlink(CPUArchState *env, abi_long arg1, } if (strcmp(p1, "/proc/curproc/file") == 0) { CPUState *cpu = env_cpu(env); - TaskState *ts = (TaskState *)cpu->opaque; + TaskState *ts = get_task_state(cpu); strncpy(p2, ts->bprm->fullpath, arg3); ret = MIN((abi_long)strlen(ts->bprm->fullpath), arg3); } else { diff --git a/bsd-user/freebsd/os-proc.h b/bsd-user/freebsd/os-proc.h index d6418780344..3003c8cb637 100644 --- a/bsd-user/freebsd/os-proc.h +++ b/bsd-user/freebsd/os-proc.h @@ -208,7 +208,7 @@ static inline abi_long do_freebsd_fork(void *cpu_env) */ set_second_rval(cpu_env, child_flag); - fork_end(child_flag); + fork_end(ret); return ret; } @@ -252,7 +252,7 @@ static inline abi_long do_freebsd_rfork(void *cpu_env, abi_long flags) * value: 0 for parent process, 1 for child process. */ set_second_rval(cpu_env, child_flag); - fork_end(child_flag); + fork_end(ret); return ret; @@ -285,7 +285,7 @@ static inline abi_long do_freebsd_pdfork(void *cpu_env, abi_ulong target_fdp, * value: 0 for parent process, 1 for child process. */ set_second_rval(cpu_env, child_flag); - fork_end(child_flag); + fork_end(ret); return ret; } diff --git a/bsd-user/main.c b/bsd-user/main.c index e6014f517ee..01b313756e3 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -49,6 +49,13 @@ #include "host-os.h" #include "target_arch_cpu.h" + +/* + * TODO: Remove these and rely only on qemu_real_host_page_size(). + */ +uintptr_t qemu_host_page_size; +intptr_t qemu_host_page_mask; + static bool opt_one_insn_per_tb; uintptr_t guest_base; bool have_guest_base; @@ -106,10 +113,13 @@ void fork_start(void) start_exclusive(); cpu_list_lock(); mmap_fork_start(); + gdbserver_fork_start(); } -void fork_end(int child) +void fork_end(pid_t pid) { + bool child = pid == 0; + if (child) { CPUState *cpu, *next_cpu; /* @@ -127,10 +137,12 @@ void fork_end(int child) * state, so we don't need to end_exclusive() here. */ qemu_init_cpu_list(); - gdbserver_fork(thread_cpu); + get_task_state(thread_cpu)->ts_tid = qemu_get_thread_id(); + gdbserver_fork_end(thread_cpu, pid); } else { mmap_fork_end(child); cpu_list_unlock(); + gdbserver_fork_end(thread_cpu, pid); end_exclusive(); } } @@ -163,7 +175,6 @@ static void usage(void) " (use '-d help' for a list of log items)\n" "-D logfile write logs to 'logfile' (default stderr)\n" "-one-insn-per-tb run with one guest instruction per emulated TB\n" - "-singlestep deprecated synonym for -one-insn-per-tb\n" "-strace log system calls\n" "-trace [[enable=]][,events=][,file=]\n" " specify tracing options\n" @@ -308,6 +319,9 @@ int main(int argc, char **argv) (void) envlist_setenv(envlist, *wrk); } + qemu_host_page_size = getpagesize(); + qemu_host_page_size = MAX(qemu_host_page_size, TARGET_PAGE_SIZE); + cpu_model = NULL; qemu_add_opts(&qemu_trace_opts); @@ -365,11 +379,12 @@ int main(int argc, char **argv) } else if (!strcmp(r, "L")) { interp_prefix = argv[optind++]; } else if (!strcmp(r, "p")) { - qemu_host_page_size = atoi(argv[optind++]); - if (qemu_host_page_size == 0 || - (qemu_host_page_size & (qemu_host_page_size - 1)) != 0) { - fprintf(stderr, "page size must be a power of two\n"); - exit(1); + unsigned size, want = qemu_real_host_page_size(); + + r = argv[optind++]; + if (qemu_strtoui(r, NULL, 10, &size) || size != want) { + warn_report("Deprecated page size option cannot " + "change host page size (%u)", want); } } else if (!strcmp(r, "g")) { gdbstub = g_strdup(argv[optind++]); @@ -378,10 +393,7 @@ int main(int argc, char **argv) } else if (!strcmp(r, "cpu")) { cpu_model = argv[optind++]; if (is_help_option(cpu_model)) { - /* XXX: implement xxx_cpu_list for targets that still miss it */ -#if defined(cpu_list) - cpu_list(); -#endif + list_cpus(); exit(1); } } else if (!strcmp(r, "B")) { @@ -394,7 +406,7 @@ int main(int argc, char **argv) (void) envlist_unsetenv(envlist, "LD_PRELOAD"); } else if (!strcmp(r, "seed")) { seed_optarg = optarg; - } else if (!strcmp(r, "singlestep") || !strcmp(r, "one-insn-per-tb")) { + } else if (!strcmp(r, "one-insn-per-tb")) { opt_one_insn_per_tb = true; } else if (!strcmp(r, "strace")) { do_strace = 1; @@ -407,6 +419,8 @@ int main(int argc, char **argv) } } + qemu_host_page_mask = -qemu_host_page_size; + /* init debug */ { int mask = 0; @@ -592,7 +606,7 @@ int main(int argc, char **argv) if (gdbstub) { gdbserver_start(gdbstub); - gdb_handlesig(cpu, 0); + gdb_handlesig(cpu, 0, NULL, NULL, 0); } cpu_loop(env); /* never exits */ diff --git a/bsd-user/meson.build b/bsd-user/meson.build index c6bfd3b2b53..39bad0ae33e 100644 --- a/bsd-user/meson.build +++ b/bsd-user/meson.build @@ -24,6 +24,6 @@ kvm = cc.find_library('kvm', required: true) bsd_user_ss.add(elf, procstat, kvm) # Pull in the OS-specific build glue, if any -subdir(targetos) +subdir(host_os) specific_ss.add_all(when: 'CONFIG_BSD_USER', if_true: bsd_user_ss) diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h index dc842fffa7d..1b0a591d2d2 100644 --- a/bsd-user/qemu.h +++ b/bsd-user/qemu.h @@ -39,6 +39,13 @@ extern char **environ; #include "qemu/clang-tsa.h" #include "qemu-os.h" +/* + * TODO: Remove these and rely only on qemu_real_host_page_size(). + */ +extern uintptr_t qemu_host_page_size; +extern intptr_t qemu_host_page_mask; +#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size) + /* * This struct is used to hold certain information about the image. Basically, * it replicates in user space what would be certain task_struct fields in the @@ -110,6 +117,11 @@ typedef struct TaskState { struct target_sigaltstack sigaltstack_used; } __attribute__((aligned(16))) TaskState; +static inline TaskState *get_task_state(CPUState *cs) +{ + return cs->opaque; +} + void stop_all_tasks(void); extern const char *interp_prefix; extern const char *qemu_uname_release; @@ -180,7 +192,7 @@ void cpu_loop(CPUArchState *env); char *target_strerror(int err); int get_osversion(void); void fork_start(void); -void fork_end(int child); +void fork_end(pid_t pid); #include "qemu/log.h" diff --git a/bsd-user/signal.c b/bsd-user/signal.c index ca31470772f..e5a773dddee 100644 --- a/bsd-user/signal.c +++ b/bsd-user/signal.c @@ -27,6 +27,9 @@ #include "hw/core/tcg-cpu-ops.h" #include "host-signal.h" +/* target_siginfo_t must fit in gdbstub's siginfo save area. */ +QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH); + static struct target_sigaction sigact_table[TARGET_NSIG]; static void host_signal_handler(int host_sig, siginfo_t *info, void *puc); static void target_to_host_sigset_internal(sigset_t *d, @@ -319,7 +322,7 @@ void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) int block_signals(void) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); sigset_t set; /* @@ -359,7 +362,7 @@ void dump_core_and_abort(int target_sig) { CPUState *cpu = thread_cpu; CPUArchState *env = cpu_env(cpu); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); int core_dumped = 0; int host_sig; struct sigaction act; @@ -421,7 +424,7 @@ void queue_signal(CPUArchState *env, int sig, int si_type, target_siginfo_t *info) { CPUState *cpu = env_cpu(env); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); trace_user_queue_signal(env, sig); @@ -463,20 +466,19 @@ static int fatal_signal(int sig) void force_sig_fault(int sig, int code, abi_ulong addr) { CPUState *cpu = thread_cpu; - CPUArchState *env = cpu_env(cpu); target_siginfo_t info = {}; info.si_signo = sig; info.si_errno = 0; info.si_code = code; info.si_addr = addr; - queue_signal(env, sig, QEMU_SI_FAULT, &info); + queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info); } static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) { CPUState *cpu = thread_cpu; - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); target_siginfo_t tinfo; ucontext_t *uc = puc; struct emulated_sigtable *k; @@ -585,7 +587,7 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) /* compare to kern/kern_sig.c sys_sigaltstack() and kern_sigaltstack() */ abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); int ret; target_stack_t oss; @@ -714,7 +716,7 @@ int do_sigaction(int sig, const struct target_sigaction *act, static inline abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, size_t frame_size) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); abi_ulong sp; /* Use default user stack */ @@ -789,7 +791,7 @@ static int reset_signal_mask(target_ucontext_t *ucontext) int i; sigset_t blocked; target_sigset_t target_set; - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); for (i = 0; i < TARGET_NSIG_WORDS; i++) { __get_user(target_set.__bits[i], &ucontext->uc_sigmask.__bits[i]); @@ -839,7 +841,7 @@ long do_sigreturn(CPUArchState *env, abi_ulong addr) void signal_init(void) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); struct sigaction act; struct sigaction oact; int i; @@ -878,7 +880,7 @@ static void handle_pending_signal(CPUArchState *env, int sig, struct emulated_sigtable *k) { CPUState *cpu = env_cpu(env); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); struct target_sigaction *sa; int code; sigset_t set; @@ -890,7 +892,7 @@ static void handle_pending_signal(CPUArchState *env, int sig, k->pending = 0; - sig = gdb_handlesig(cpu, sig); + sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info)); if (!sig) { sa = NULL; handler = TARGET_SIG_IGN; @@ -967,7 +969,7 @@ void process_pending_signals(CPUArchState *env) int sig; sigset_t *blocked_set, set; struct emulated_sigtable *k; - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); while (qatomic_read(&ts->signal_pending)) { sigfillset(&set); @@ -1022,7 +1024,7 @@ void process_pending_signals(CPUArchState *env) void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, MMUAccessType access_type, bool maperr, uintptr_t ra) { - const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; if (tcg_ops->record_sigsegv) { tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra); @@ -1038,7 +1040,7 @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, MMUAccessType access_type, uintptr_t ra) { - const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; if (tcg_ops->record_sigbus) { tcg_ops->record_sigbus(cpu, addr, access_type, ra); diff --git a/chardev/char-fe.c b/chardev/char-fe.c index 7789f7be9c8..66cee8475ac 100644 --- a/chardev/char-fe.c +++ b/chardev/char-fe.c @@ -199,26 +199,27 @@ bool qemu_chr_fe_init(CharBackend *b, Chardev *s, Error **errp) MuxChardev *d = MUX_CHARDEV(s); if (d->mux_cnt >= MAX_MUX) { - goto unavailable; + error_setg(errp, + "too many uses of multiplexed chardev '%s'" + " (maximum is " stringify(MAX_MUX) ")", + s->label); + return false; } d->backends[d->mux_cnt] = b; tag = d->mux_cnt++; } else if (s->be) { - goto unavailable; + error_setg(errp, "chardev '%s' is already in use", s->label); + return false; } else { s->be = b; } } - b->fe_open = false; + b->fe_is_open = false; b->tag = tag; b->chr = s; return true; - -unavailable: - error_setg(errp, QERR_DEVICE_IN_USE, s->label); - return false; } void qemu_chr_fe_deinit(CharBackend *b, bool del) @@ -257,7 +258,7 @@ void qemu_chr_fe_set_handlers_full(CharBackend *b, bool sync_state) { Chardev *s; - int fe_open; + bool fe_open; s = b->chr; if (!s) { @@ -265,10 +266,10 @@ void qemu_chr_fe_set_handlers_full(CharBackend *b, } if (!opaque && !fd_can_read && !fd_read && !fd_event) { - fe_open = 0; + fe_open = false; remove_fd_in_watch(s); } else { - fe_open = 1; + fe_open = true; } b->chr_can_read = fd_can_read; b->chr_read = fd_read; @@ -336,7 +337,7 @@ void qemu_chr_fe_set_echo(CharBackend *be, bool echo) } } -void qemu_chr_fe_set_open(CharBackend *be, int fe_open) +void qemu_chr_fe_set_open(CharBackend *be, bool is_open) { Chardev *chr = be->chr; @@ -344,12 +345,12 @@ void qemu_chr_fe_set_open(CharBackend *be, int fe_open) return; } - if (be->fe_open == fe_open) { + if (be->fe_is_open == is_open) { return; } - be->fe_open = fe_open; + be->fe_is_open = is_open; if (CHARDEV_GET_CLASS(chr)->chr_set_fe_open) { - CHARDEV_GET_CLASS(chr)->chr_set_fe_open(chr, fe_open); + CHARDEV_GET_CLASS(chr)->chr_set_fe_open(chr, is_open); } } diff --git a/chardev/char-io.c b/chardev/char-io.c index 4451128cba5..dab77b112e3 100644 --- a/chardev/char-io.c +++ b/chardev/char-io.c @@ -33,6 +33,7 @@ typedef struct IOWatchPoll { IOCanReadHandler *fd_can_read; GSourceFunc fd_read; void *opaque; + GMainContext *context; } IOWatchPoll; static IOWatchPoll *io_watch_poll_from_source(GSource *source) @@ -50,28 +51,59 @@ static gboolean io_watch_poll_prepare(GSource *source, return FALSE; } + /* + * We do not register the QIOChannel watch as a child GSource. + * The 'prepare' function on the parent GSource will be + * skipped if a child GSource's 'prepare' function indicates + * readiness. We need this prepare function be guaranteed + * to run on *every* iteration of the main loop, because + * it is critical to ensure we remove the QIOChannel watch + * if 'fd_can_read' indicates the frontend cannot receive + * more data. + */ if (now_active) { iwp->src = qio_channel_create_watch( iwp->ioc, G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL); g_source_set_callback(iwp->src, iwp->fd_read, iwp->opaque, NULL); - g_source_add_child_source(source, iwp->src); - g_source_unref(iwp->src); + g_source_attach(iwp->src, iwp->context); } else { - g_source_remove_child_source(source, iwp->src); + g_source_destroy(iwp->src); + g_source_unref(iwp->src); iwp->src = NULL; } return FALSE; } +static gboolean io_watch_poll_check(GSource *source) +{ + return FALSE; +} + static gboolean io_watch_poll_dispatch(GSource *source, GSourceFunc callback, gpointer user_data) { - return G_SOURCE_CONTINUE; + abort(); +} + +static void io_watch_poll_finalize(GSource *source) +{ + /* + * Due to a glib bug, removing the last reference to a source + * inside a finalize callback causes recursive locking (and a + * deadlock). This is not a problem inside other callbacks, + * including dispatch callbacks, so we call io_remove_watch_poll + * to remove this source. At this point, iwp->src must + * be NULL, or we would leak it. + */ + IOWatchPoll *iwp = io_watch_poll_from_source(source); + assert(iwp->src == NULL); } static GSourceFuncs io_watch_poll_funcs = { .prepare = io_watch_poll_prepare, + .check = io_watch_poll_check, .dispatch = io_watch_poll_dispatch, + .finalize = io_watch_poll_finalize, }; GSource *io_add_watch_poll(Chardev *chr, @@ -91,6 +123,7 @@ GSource *io_add_watch_poll(Chardev *chr, iwp->ioc = ioc; iwp->fd_read = (GSourceFunc) fd_read; iwp->src = NULL; + iwp->context = context; name = g_strdup_printf("chardev-iowatch-%s", chr->label); g_source_set_name((GSource *)iwp, name); @@ -101,10 +134,23 @@ GSource *io_add_watch_poll(Chardev *chr, return (GSource *)iwp; } +static void io_remove_watch_poll(GSource *source) +{ + IOWatchPoll *iwp; + + iwp = io_watch_poll_from_source(source); + if (iwp->src) { + g_source_destroy(iwp->src); + g_source_unref(iwp->src); + iwp->src = NULL; + } + g_source_destroy(&iwp->parent); +} + void remove_fd_in_watch(Chardev *chr) { if (chr->gsource) { - g_source_destroy(chr->gsource); + io_remove_watch_poll(chr->gsource); chr->gsource = NULL; } } diff --git a/chardev/char-parallel.c b/chardev/char-parallel.c index a5164f975af..78697d7522d 100644 --- a/chardev/char-parallel.c +++ b/chardev/char-parallel.c @@ -164,13 +164,13 @@ static void qemu_chr_open_pp_fd(Chardev *chr, { ParallelChardev *drv = PARALLEL_CHARDEV(chr); + drv->fd = fd; + if (ioctl(fd, PPCLAIM) < 0) { error_setg_errno(errp, errno, "not a parallel port"); - close(fd); return; } - drv->fd = fd; drv->mode = IEEE1284_MODE_COMPAT; } #endif /* __linux__ */ @@ -238,6 +238,7 @@ static void qemu_chr_open_pp_fd(Chardev *chr, } #endif +#ifdef HAVE_CHARDEV_PARALLEL static void qmp_chardev_open_parallel(Chardev *chr, ChardevBackend *backend, bool *be_opened, @@ -306,3 +307,5 @@ static void register_types(void) } type_init(register_types); + +#endif /* HAVE_CHARDEV_PARALLEL */ diff --git a/chardev/char-socket.c b/chardev/char-socket.c index 034840593d7..2c4dffc0e6f 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -378,6 +378,10 @@ static void tcp_chr_free_connection(Chardev *chr) char_socket_yank_iochannel, QIO_CHANNEL(s->sioc)); } + + if (s->ioc) { + qio_channel_close(s->ioc, NULL); + } object_unref(OBJECT(s->sioc)); s->sioc = NULL; object_unref(OBJECT(s->ioc)); @@ -597,6 +601,22 @@ static void update_ioc_handlers(SocketChardev *s) remove_hup_source(s); s->hup_source = qio_channel_create_watch(s->ioc, G_IO_HUP); + /* + * poll() is liable to return POLLHUP even when there is + * still incoming data available to read on the FD. If + * we have the hup_source at the same priority as the + * main io_add_watch_poll GSource, then we might end up + * processing the POLLHUP event first, closing the FD, + * and as a result silently discard data we should have + * read. + * + * By setting the hup_source to G_PRIORITY_DEFAULT + 1, + * we ensure that io_add_watch_poll GSource will always + * be dispatched first, thus guaranteeing we will be + * able to process all incoming data before closing the + * FD + */ + g_source_set_priority(s->hup_source, G_PRIORITY_DEFAULT + 1); g_source_set_callback(s->hup_source, (GSourceFunc)tcp_chr_hup, chr, NULL); g_source_attach(s->hup_source, chr->gcontext); @@ -1504,7 +1524,7 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend, }; } else { addr->type = SOCKET_ADDRESS_TYPE_FD; - addr->u.fd.data = g_new(String, 1); + addr->u.fd.data = g_new(FdSocketAddress, 1); addr->u.fd.data->str = g_strdup(fd); } sock->addr = addr; diff --git a/chardev/char.c b/chardev/char.c index 119b5487841..3c43fb1278f 100644 --- a/chardev/char.c +++ b/chardev/char.c @@ -171,6 +171,18 @@ int qemu_chr_write(Chardev *s, const uint8_t *buf, int len, bool write_all) return res; } + if (replay_mode == REPLAY_MODE_RECORD) { + /* + * When recording we don't want temporary conditions to + * perturb the result. By ensuring we write everything we can + * while recording we avoid playback being out of sync if it + * doesn't encounter the same temporary conditions (usually + * triggered by external programs not reading the chardev fast + * enough and pipes filling up). + */ + write_all = true; + } + res = qemu_chr_write_buffer(s, buf, len, &offset, write_all); if (qemu_chr_replay(s) && replay_mode == REPLAY_MODE_RECORD) { @@ -750,7 +762,7 @@ static int qmp_query_chardev_foreach(Object *obj, void *data) value->label = g_strdup(chr->label); value->filename = g_strdup(chr->filename); - value->frontend_open = chr->be && chr->be->fe_open; + value->frontend_open = chr->be && chr->be->fe_is_open; QAPI_LIST_PREPEND(*list, value); diff --git a/chardev/meson.build b/chardev/meson.build index 6d56ad32fdb..70070a8279a 100644 --- a/chardev/meson.build +++ b/chardev/meson.build @@ -12,21 +12,21 @@ chardev_ss.add(files( 'char-udp.c', 'char.c', )) -chardev_ss.add(when: 'CONFIG_POSIX', if_true: [files( - 'char-fd.c', - 'char-pty.c', -), util]) -if targetos in ['linux', 'gnu/kfreebsd', 'freebsd', 'dragonfly'] - chardev_ss.add(files('char-parallel.c')) +if host_os == 'windows' + chardev_ss.add(files( + 'char-console.c', + 'char-win-stdio.c', + 'char-win.c', + )) +else + chardev_ss.add(files( + 'char-fd.c', + 'char-parallel.c', + 'char-pty.c', + ), util) endif -chardev_ss.add(when: 'CONFIG_WIN32', if_true: files( - 'char-console.c', - 'char-win-stdio.c', - 'char-win.c', -)) - -chardev_ss = chardev_ss.apply(config_targetos, strict: false) +chardev_ss = chardev_ss.apply({}) system_ss.add(files( 'char-hmp-cmds.c', diff --git a/configs/devices/arm-softmmu/default.mak b/configs/devices/arm-softmmu/default.mak index 980c48a7d99..6ee31bc1ab9 100644 --- a/configs/devices/arm-softmmu/default.mak +++ b/configs/devices/arm-softmmu/default.mak @@ -13,12 +13,14 @@ CONFIG_ARM_VIRT=y # CONFIG_INTEGRATOR=n # CONFIG_FSL_IMX31=n # CONFIG_MUSICPAL=n +# CONFIG_MPS3R=n # CONFIG_MUSCA=n # CONFIG_CHEETAH=n # CONFIG_SX1=n # CONFIG_NSERIES=n # CONFIG_STELLARIS=n # CONFIG_STM32VLDISCOVERY=n +# CONFIG_B_L475E_IOT01A=n # CONFIG_REALVIEW=n # CONFIG_VERSATILE=n # CONFIG_VEXPRESS=n diff --git a/configs/devices/m68k-softmmu/default.mak b/configs/devices/m68k-softmmu/default.mak index 7f8619e4278..8dcaa28ed38 100644 --- a/configs/devices/m68k-softmmu/default.mak +++ b/configs/devices/m68k-softmmu/default.mak @@ -1,7 +1,5 @@ # Default configuration for m68k-softmmu -CONFIG_SEMIHOSTING=y - # Boards: # CONFIG_AN5206=y diff --git a/configs/devices/mips-softmmu/common.mak b/configs/devices/mips-softmmu/common.mak index 7da99327a77..416a5d353e8 100644 --- a/configs/devices/mips-softmmu/common.mak +++ b/configs/devices/mips-softmmu/common.mak @@ -1,31 +1,8 @@ # Common mips*-softmmu CONFIG defines -# CONFIG_SEMIHOSTING is always required on this architecture -CONFIG_SEMIHOSTING=y +# Uncomment the following lines to disable these optional devices: +# CONFIG_PCI_DEVICES=n +# CONFIG_TEST_DEVICES=n -CONFIG_ISA_BUS=y -CONFIG_PCI=y -CONFIG_PCI_DEVICES=y -CONFIG_VGA_ISA=y -CONFIG_VGA_MMIO=y -CONFIG_VGA_CIRRUS=y -CONFIG_VMWARE_VGA=y -CONFIG_SERIAL=y -CONFIG_SERIAL_ISA=y -CONFIG_PARALLEL=y -CONFIG_I8254=y -CONFIG_PCSPK=y -CONFIG_PCKBD=y -CONFIG_FDC=y -CONFIG_I8257=y -CONFIG_IDE_ISA=y -CONFIG_PFLASH_CFI01=y -CONFIG_I8259=y -CONFIG_MC146818RTC=y -CONFIG_MIPS_CPS=y -CONFIG_MIPS_ITU=y CONFIG_MALTA=y -CONFIG_PCNET_PCI=y CONFIG_MIPSSIM=y -CONFIG_SMBUS_EEPROM=y -CONFIG_TEST_DEVICES=y diff --git a/configs/devices/mips64el-softmmu/default.mak b/configs/devices/mips64el-softmmu/default.mak index d5188f7ea58..88a37cf27f1 100644 --- a/configs/devices/mips64el-softmmu/default.mak +++ b/configs/devices/mips64el-softmmu/default.mak @@ -3,8 +3,5 @@ include ../mips-softmmu/common.mak CONFIG_FULOONG=y CONFIG_LOONGSON3V=y -CONFIG_ATI_VGA=y -CONFIG_RTL8139_PCI=y CONFIG_JAZZ=y -CONFIG_VT82C686=y CONFIG_MIPS_BOSTON=y diff --git a/configs/devices/nios2-softmmu/default.mak b/configs/devices/nios2-softmmu/default.mak index 1bc4082ea99..e130d024e62 100644 --- a/configs/devices/nios2-softmmu/default.mak +++ b/configs/devices/nios2-softmmu/default.mak @@ -1,7 +1,5 @@ # Default configuration for nios2-softmmu -CONFIG_SEMIHOSTING=y - # Boards: # CONFIG_NIOS2_10M50=y diff --git a/configs/devices/riscv32-softmmu/default.mak b/configs/devices/riscv32-softmmu/default.mak index d847bd5692e..94a236c9c25 100644 --- a/configs/devices/riscv32-softmmu/default.mak +++ b/configs/devices/riscv32-softmmu/default.mak @@ -3,8 +3,6 @@ # Uncomment the following lines to disable these optional devices: # #CONFIG_PCI_DEVICES=n -CONFIG_SEMIHOSTING=y -CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y # Boards: # diff --git a/configs/devices/riscv64-softmmu/default.mak b/configs/devices/riscv64-softmmu/default.mak index bc69301fa4a..3f680594484 100644 --- a/configs/devices/riscv64-softmmu/default.mak +++ b/configs/devices/riscv64-softmmu/default.mak @@ -3,8 +3,6 @@ # Uncomment the following lines to disable these optional devices: # #CONFIG_PCI_DEVICES=n -CONFIG_SEMIHOSTING=y -CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y # Boards: # diff --git a/configs/devices/xtensa-softmmu/default.mak b/configs/devices/xtensa-softmmu/default.mak index 4fe1bf00c94..49e4c9da88c 100644 --- a/configs/devices/xtensa-softmmu/default.mak +++ b/configs/devices/xtensa-softmmu/default.mak @@ -1,7 +1,5 @@ # Default configuration for Xtensa -CONFIG_SEMIHOSTING=y - # Boards: # CONFIG_XTENSA_SIM=y diff --git a/configure b/configure index 163729c3ec9..3cd736b139f 100755 --- a/configure +++ b/configure @@ -334,30 +334,30 @@ EOF } if check_define __linux__ ; then - targetos=linux + host_os=linux elif check_define _WIN32 ; then - targetos=windows + host_os=windows elif check_define __OpenBSD__ ; then - targetos=openbsd + host_os=openbsd elif check_define __sun__ ; then - targetos=sunos + host_os=sunos elif check_define __HAIKU__ ; then - targetos=haiku + host_os=haiku elif check_define __FreeBSD__ ; then - targetos=freebsd + host_os=freebsd elif check_define __FreeBSD_kernel__ && check_define __GLIBC__; then - targetos=gnu/kfreebsd + host_os=gnu/kfreebsd elif check_define __DragonFly__ ; then - targetos=dragonfly + host_os=dragonfly elif check_define __NetBSD__; then - targetos=netbsd + host_os=netbsd elif check_define __APPLE__; then - targetos=darwin + host_os=darwin else # This is a fatal error, but don't report it yet, because we # might be going to just print the --help text, or it might # be the result of a missing compiler. - targetos=bogus + host_os=bogus fi if test ! -z "$cpu" ; then @@ -445,6 +445,7 @@ case "$cpu" in loongarch*) cpu=loongarch64 host_arch=loongarch64 + linux_arch=loongarch ;; mips64*) @@ -573,13 +574,13 @@ do fi done -if test "$targetos" = "windows" ; then +if test "$host_os" = "windows" ; then EXESUF=".exe" fi meson_option_build_array() { printf '[' - (if test "$targetos" = windows; then + (if test "$host_os" = windows; then IFS=\; else IFS=: @@ -802,7 +803,7 @@ mak_wilds="" if [ -n "$host_arch" ] && [ -d "$source_path/common-user/host/$host_arch" ]; then if [ "$linux_user" != no ]; then - if [ "$targetos" = linux ]; then + if [ "$host_os" = linux ]; then linux_user=yes elif [ "$linux_user" = yes ]; then error_exit "linux-user not supported on this architecture" @@ -813,9 +814,9 @@ if [ -n "$host_arch" ] && [ -d "$source_path/common-user/host/$host_arch" ]; the fi if [ "$bsd_user" != no ]; then if [ "$bsd_user" = "" ]; then - test $targetos = freebsd && bsd_user=yes + test $host_os = freebsd && bsd_user=yes fi - if [ "$bsd_user" = yes ] && ! [ -d "$source_path/bsd-user/$targetos" ]; then + if [ "$bsd_user" = yes ] && ! [ -d "$source_path/bsd-user/$host_os" ]; then error_exit "bsd-user not supported on this host OS" fi if [ "$bsd_user" = "yes" ]; then @@ -998,7 +999,7 @@ if test -z "$ninja"; then fi fi -if test "$targetos" = "bogus"; then +if test "$host_os" = "bogus"; then # Now that we know that we're not printing the help and that # the compiler works (so the results of the check_defines we used # to identify the OS are reliable), if we didn't recognize the @@ -1007,7 +1008,7 @@ if test "$targetos" = "bogus"; then fi # test for any invalid configuration combinations -if test "$targetos" = "windows" && ! has "$dlltool"; then +if test "$host_os" = "windows" && ! has "$dlltool"; then if test "$plugins" = "yes"; then error_exit "TCG plugins requires dlltool to build on Windows platforms" fi @@ -1041,7 +1042,7 @@ static THREAD int tls_var; int main(void) { return tls_var; } EOF -if test "$targetos" = windows || test "$targetos" = haiku; then +if test "$host_os" = windows || test "$host_os" = haiku; then if test "$pie" = "yes"; then error_exit "PIE not available due to missing OS support" fi @@ -1231,6 +1232,7 @@ probe_target_compiler() { got_cross_cc=no container_image= container_hosts= + container_cross_prefix= container_cross_cc= container_cross_ar= container_cross_as= @@ -1272,16 +1274,33 @@ probe_target_compiler() { test "$container" != no || continue test "$host" = "$cpu" || continue case $target_arch in + # debian-all-test-cross architectures + + hppa|m68k|mips|riscv64|sparc64) + container_image=debian-all-test-cross + ;; + mips64) + container_image=debian-all-test-cross + container_cross_prefix=mips64-linux-gnuabi64- + ;; + ppc|ppc64|ppc64le) + container_image=debian-all-test-cross + container_cross_prefix=powerpc${target_arch#ppc}-linux-gnu- + ;; + + # debian-legacy-test-cross architectures (need Debian 11) + # - libc6.1-dev-alpha-cross: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1054412 + # - sh4-linux-user: binaries don't run with bookworm compiler + + alpha|sh4) + container_image=debian-legacy-test-cross + ;; + + # architectures with individual containers + aarch64) # We don't have any bigendian build tools so we only use this for AArch64 container_image=debian-arm64-cross - container_cross_prefix=aarch64-linux-gnu- - container_cross_cc=${container_cross_prefix}gcc - ;; - alpha) - container_image=debian-legacy-test-cross - container_cross_prefix=alpha-linux-gnu- - container_cross_cc=${container_cross_prefix}gcc ;; arm) # We don't have any bigendian build tools so we only use this for ARM @@ -1290,18 +1309,11 @@ probe_target_compiler() { ;; cris) container_image=fedora-cris-cross - container_cross_prefix=cris-linux-gnu- ;; hexagon) - container_image=debian-hexagon-cross container_cross_prefix=hexagon-unknown-linux-musl- container_cross_cc=${container_cross_prefix}clang ;; - hppa) - container_image=debian-all-test-cross - container_cross_prefix=hppa-linux-gnu- - container_cross_cc=${container_cross_prefix}gcc - ;; i386) container_image=debian-i686-cross container_cross_prefix=i686-linux-gnu- @@ -1310,59 +1322,19 @@ probe_target_compiler() { container_image=debian-loongarch-cross container_cross_prefix=loongarch64-unknown-linux-gnu- ;; - m68k) - container_image=debian-all-test-cross - container_cross_prefix=m68k-linux-gnu- - container_cross_cc=${container_cross_prefix}gcc - ;; microblaze) - container_image=debian-microblaze-cross container_cross_prefix=microblaze-linux-musl- ;; mips64el) container_image=debian-mips64el-cross container_cross_prefix=mips64el-linux-gnuabi64- ;; - mips64) - container_image=debian-all-test-cross - container_cross_prefix=mips64-linux-gnuabi64- - ;; - mips) - container_image=debian-all-test-cross - container_cross_prefix=mips-linux-gnu- - ;; - nios2) - container_image=debian-nios2-cross - container_cross_prefix=nios2-linux-gnu- - ;; - ppc) - container_image=debian-all-test-cross - container_cross_prefix=powerpc-linux-gnu- - container_cross_cc=${container_cross_prefix}gcc - ;; - ppc64|ppc64le) - container_image=debian-all-test-cross - container_cross_prefix=powerpc${target_arch#ppc}-linux-gnu- - ;; - riscv64) - container_image=debian-all-test-cross - container_cross_prefix=riscv64-linux-gnu- - ;; - sh4) - container_image=debian-legacy-test-cross - container_cross_prefix=sh4-linux-gnu- - ;; - sparc64) - container_image=debian-all-test-cross - container_cross_prefix=sparc64-linux-gnu- - ;; tricore) container_image=debian-tricore-cross container_cross_prefix=tricore- ;; x86_64) container_image=debian-amd64-cross - container_cross_prefix=x86_64-linux-gnu- ;; xtensa*) container_image=debian-xtensa-cross @@ -1370,12 +1342,10 @@ probe_target_compiler() { # default to the dc232b cpu container_cross_prefix=/opt/2020.07/xtensa-dc232b-elf/bin/xtensa-dc232b-elf- ;; - *) - # Debian and GNU architecture names usually match - container_image=debian-$target_arch-cross - container_cross_prefix=$target_arch-linux-gnu- - ;; esac + # Debian and GNU architecture names usually match + : ${container_image:=debian-$target_arch-cross} + : ${container_cross_prefix:=$target_arch-linux-gnu-} : ${container_cross_cc:=${container_cross_prefix}gcc} : ${container_cross_ar:=${container_cross_prefix}ar} : ${container_cross_as:=${container_cross_prefix}as} @@ -1559,7 +1529,7 @@ LINKS="$LINKS pc-bios/s390-ccw/Makefile" LINKS="$LINKS pc-bios/vof/Makefile" LINKS="$LINKS .gdbinit scripts" # scripts needed by relative path in .gdbinit LINKS="$LINKS tests/avocado tests/data" -LINKS="$LINKS tests/qemu-iotests/check" +LINKS="$LINKS tests/qemu-iotests/check tests/qemu-iotests/Makefile" LINKS="$LINKS python" LINKS="$LINKS contrib/plugins/Makefile " for f in $LINKS ; do @@ -1568,12 +1538,17 @@ for f in $LINKS ; do fi done +# use included Linux headers for KVM architectures +if test "$host_os" = "linux" && test -n "$linux_arch"; then + symlink "$source_path/linux-headers/asm-$linux_arch" linux-headers/asm +fi + echo "# Automatically generated by configure - do not modify" > Makefile.prereqs # Mac OS X ships with a broken assembler if have_target i386-softmmu x86_64-softmmu && \ - test "$targetos" != "darwin" && test "$targetos" != "sunos" && \ - test "$targetos" != "haiku" && \ + test "$host_os" != "darwin" && test "$host_os" != "sunos" && \ + test "$host_os" != "haiku" && \ probe_target_compiler i386-softmmu; then subdirs="$subdirs pc-bios/optionrom" config_mak=pc-bios/optionrom/config.mak @@ -1635,21 +1610,6 @@ echo "GENISOIMAGE=$genisoimage" >> $config_host_mak echo "MESON=$meson" >> $config_host_mak echo "NINJA=$ninja" >> $config_host_mak echo "EXESUF=$EXESUF" >> $config_host_mak - -# use included Linux headers for KVM architectures -if test "$targetos" = "linux" && test -n "$linux_arch"; then - symlink "$source_path/linux-headers/asm-$linux_arch" linux-headers/asm -fi - -for target in $target_list; do - target_dir="$target" - target_name=$(echo $target | cut -d '-' -f 1)$EXESUF - case $target in - *-user) symlink "../qemu-$target_name" "$target_dir/qemu-$target_name" ;; - *) symlink "../qemu-system-$target_name" "$target_dir/qemu-system-$target_name" ;; - esac -done - if test "$default_targets" = "yes"; then echo "CONFIG_DEFAULT_TARGETS=y" >> $config_host_mak fi @@ -1660,23 +1620,22 @@ echo "SRC_PATH=$source_path/contrib/plugins" >> contrib/plugins/$config_host_mak echo "PKG_CONFIG=${pkg_config}" >> contrib/plugins/$config_host_mak echo "CC=$cc $CPU_CFLAGS" >> contrib/plugins/$config_host_mak echo "CFLAGS=${CFLAGS-$default_cflags} $EXTRA_CFLAGS" >> contrib/plugins/$config_host_mak -if test "$targetos" = windows; then +if test "$host_os" = windows; then echo "DLLTOOL=$dlltool" >> contrib/plugins/$config_host_mak fi -if test "$targetos" = darwin; then +if test "$host_os" = darwin; then echo "CONFIG_DARWIN=y" >> contrib/plugins/$config_host_mak fi -if test "$targetos" = windows; then +if test "$host_os" = windows; then echo "CONFIG_WIN32=y" >> contrib/plugins/$config_host_mak fi # tests/tcg configuration -(config_host_mak=tests/tcg/config-host.mak mkdir -p tests/tcg -echo "# Automatically generated by configure - do not modify" > $config_host_mak -echo "SRC_PATH=$source_path" >> $config_host_mak +echo "# Automatically generated by configure - do not modify" > tests/tcg/$config_host_mak +echo "SRC_PATH=$source_path" >> tests/tcg/$config_host_mak if test "$plugins" = "yes" ; then - echo "CONFIG_PLUGIN=y" >> $config_host_mak + echo "CONFIG_PLUGIN=y" >> tests/tcg/$config_host_mak fi tcg_tests_targets= @@ -1720,9 +1679,8 @@ for target in $target_list; do done if test "$tcg" = "enabled"; then - echo "TCG_TESTS_TARGETS=$tcg_tests_targets" >> config-host.mak + echo "TCG_TESTS_TARGETS=$tcg_tests_targets" >> $config_host_mak fi -) if test "$skip_meson" = no; then cross="config-meson.cross.new" @@ -1751,7 +1709,7 @@ if test "$skip_meson" = no; then echo "# environment defaults, can still be overridden on " >> $cross echo "# the command line" >> $cross if test -e "$source_path/.git" && \ - { test "$targetos" = linux || test "$targetos" = "windows"; }; then + { test "$host_os" = linux || test "$host_os" = "windows"; }; then echo 'werror = true' >> $cross fi echo "[project options]" >> $cross @@ -1788,7 +1746,7 @@ if test "$skip_meson" = no; then echo "windmc = [$(meson_quote $windmc)]" >> $cross if test "$cross_compile" = "yes"; then echo "[host_machine]" >> $cross - echo "system = '$targetos'" >> $cross + echo "system = '$host_os'" >> $cross case "$cpu" in i386) echo "cpu_family = 'x86'" >> $cross @@ -1814,8 +1772,8 @@ if test "$skip_meson" = no; then fi mv $cross config-meson.cross meson_add_machine_file config-meson.cross - if test -f "$source_path/configs/meson/$targetos.txt"; then - meson_add_machine_file $source_path/configs/meson/$targetos.txt + if test -f "$source_path/configs/meson/$host_os.txt"; then + meson_add_machine_file $source_path/configs/meson/$host_os.txt fi rm -rf meson-private meson-info meson-logs diff --git a/contrib/elf2dmp/addrspace.c b/contrib/elf2dmp/addrspace.c index 6f608a517b1..81295a11534 100644 --- a/contrib/elf2dmp/addrspace.c +++ b/contrib/elf2dmp/addrspace.c @@ -22,7 +22,7 @@ static struct pa_block *pa_space_find_block(struct pa_space *ps, uint64_t pa) return NULL; } -static uint8_t *pa_space_resolve(struct pa_space *ps, uint64_t pa) +static void *pa_space_resolve(struct pa_space *ps, uint64_t pa) { struct pa_block *block = pa_space_find_block(ps, pa); @@ -33,6 +33,19 @@ static uint8_t *pa_space_resolve(struct pa_space *ps, uint64_t pa) return block->addr + (pa - block->paddr); } +static bool pa_space_read64(struct pa_space *ps, uint64_t pa, uint64_t *value) +{ + uint64_t *resolved = pa_space_resolve(ps, pa); + + if (!resolved) { + return false; + } + + *value = *resolved; + + return true; +} + static void pa_block_align(struct pa_block *b) { uint64_t low_align = ((b->paddr - 1) | ELF2DMP_PAGE_MASK) + 1 - b->paddr; @@ -57,7 +70,7 @@ static void pa_block_align(struct pa_block *b) b->paddr += low_align; } -int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf) +void pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf) { Elf64_Half phdr_nr = elf_getphdrnum(qemu_elf->map); Elf64_Phdr *phdr = elf64_getphdr(qemu_elf->map); @@ -75,11 +88,12 @@ int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf) ps->block = g_new(struct pa_block, ps->block_nr); for (i = 0; i < phdr_nr; i++) { - if (phdr[i].p_type == PT_LOAD) { + if (phdr[i].p_type == PT_LOAD && phdr[i].p_offset < qemu_elf->size) { ps->block[block_i] = (struct pa_block) { .addr = (uint8_t *)qemu_elf->map + phdr[i].p_offset, .paddr = phdr[i].p_paddr, - .size = phdr[i].p_filesz, + .size = MIN(phdr[i].p_filesz, + qemu_elf->size - phdr[i].p_offset), }; pa_block_align(&ps->block[block_i]); block_i = ps->block[block_i].size ? (block_i + 1) : block_i; @@ -87,8 +101,6 @@ int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf) } ps->block_nr = block_i; - - return 0; } void pa_space_destroy(struct pa_space *ps) @@ -108,19 +120,20 @@ void va_space_create(struct va_space *vs, struct pa_space *ps, uint64_t dtb) va_space_set_dtb(vs, dtb); } -static uint64_t get_pml4e(struct va_space *vs, uint64_t va) +static bool get_pml4e(struct va_space *vs, uint64_t va, uint64_t *value) { uint64_t pa = (vs->dtb & 0xffffffffff000) | ((va & 0xff8000000000) >> 36); - return *(uint64_t *)pa_space_resolve(vs->ps, pa); + return pa_space_read64(vs->ps, pa, value); } -static uint64_t get_pdpi(struct va_space *vs, uint64_t va, uint64_t pml4e) +static bool get_pdpi(struct va_space *vs, uint64_t va, uint64_t pml4e, + uint64_t *value) { uint64_t pdpte_paddr = (pml4e & 0xffffffffff000) | ((va & 0x7FC0000000) >> 27); - return *(uint64_t *)pa_space_resolve(vs->ps, pdpte_paddr); + return pa_space_read64(vs->ps, pdpte_paddr, value); } static uint64_t pde_index(uint64_t va) @@ -133,11 +146,12 @@ static uint64_t pdba_base(uint64_t pdpe) return pdpe & 0xFFFFFFFFFF000; } -static uint64_t get_pgd(struct va_space *vs, uint64_t va, uint64_t pdpe) +static bool get_pgd(struct va_space *vs, uint64_t va, uint64_t pdpe, + uint64_t *value) { uint64_t pgd_entry = pdba_base(pdpe) + pde_index(va) * 8; - return *(uint64_t *)pa_space_resolve(vs->ps, pgd_entry); + return pa_space_read64(vs->ps, pgd_entry, value); } static uint64_t pte_index(uint64_t va) @@ -150,11 +164,12 @@ static uint64_t ptba_base(uint64_t pde) return pde & 0xFFFFFFFFFF000; } -static uint64_t get_pte(struct va_space *vs, uint64_t va, uint64_t pgd) +static bool get_pte(struct va_space *vs, uint64_t va, uint64_t pgd, + uint64_t *value) { uint64_t pgd_val = ptba_base(pgd) + pte_index(va) * 8; - return *(uint64_t *)pa_space_resolve(vs->ps, pgd_val); + return pa_space_read64(vs->ps, pgd_val, value); } static uint64_t get_paddr(uint64_t va, uint64_t pte) @@ -186,13 +201,11 @@ static uint64_t va_space_va2pa(struct va_space *vs, uint64_t va) { uint64_t pml4e, pdpe, pgd, pte; - pml4e = get_pml4e(vs, va); - if (!is_present(pml4e)) { + if (!get_pml4e(vs, va, &pml4e) || !is_present(pml4e)) { return INVALID_PA; } - pdpe = get_pdpi(vs, va, pml4e); - if (!is_present(pdpe)) { + if (!get_pdpi(vs, va, pml4e, &pdpe) || !is_present(pdpe)) { return INVALID_PA; } @@ -200,8 +213,7 @@ static uint64_t va_space_va2pa(struct va_space *vs, uint64_t va) return get_1GB_paddr(va, pdpe); } - pgd = get_pgd(vs, va, pdpe); - if (!is_present(pgd)) { + if (!get_pgd(vs, va, pdpe, &pgd) || !is_present(pgd)) { return INVALID_PA; } @@ -209,8 +221,7 @@ static uint64_t va_space_va2pa(struct va_space *vs, uint64_t va) return get_2MB_paddr(va, pgd); } - pte = get_pte(vs, va, pgd); - if (!is_present(pte)) { + if (!get_pte(vs, va, pgd, &pte) || !is_present(pte)) { return INVALID_PA; } @@ -228,8 +239,8 @@ void *va_space_resolve(struct va_space *vs, uint64_t va) return pa_space_resolve(vs->ps, pa); } -int va_space_rw(struct va_space *vs, uint64_t addr, - void *buf, size_t size, int is_write) +bool va_space_rw(struct va_space *vs, uint64_t addr, + void *buf, size_t size, int is_write) { while (size) { uint64_t page = addr & ELF2DMP_PFN_MASK; @@ -240,7 +251,7 @@ int va_space_rw(struct va_space *vs, uint64_t addr, ptr = va_space_resolve(vs, addr); if (!ptr) { - return 1; + return false; } if (is_write) { @@ -254,5 +265,5 @@ int va_space_rw(struct va_space *vs, uint64_t addr, addr += s; } - return 0; + return true; } diff --git a/contrib/elf2dmp/addrspace.h b/contrib/elf2dmp/addrspace.h index 039c70c5b07..2ad30a9da48 100644 --- a/contrib/elf2dmp/addrspace.h +++ b/contrib/elf2dmp/addrspace.h @@ -33,13 +33,13 @@ struct va_space { struct pa_space *ps; }; -int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf); +void pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf); void pa_space_destroy(struct pa_space *ps); void va_space_create(struct va_space *vs, struct pa_space *ps, uint64_t dtb); void va_space_set_dtb(struct va_space *vs, uint64_t dtb); void *va_space_resolve(struct va_space *vs, uint64_t va); -int va_space_rw(struct va_space *vs, uint64_t addr, - void *buf, size_t size, int is_write); +bool va_space_rw(struct va_space *vs, uint64_t addr, + void *buf, size_t size, int is_write); #endif /* ADDRSPACE_H */ diff --git a/contrib/elf2dmp/download.c b/contrib/elf2dmp/download.c index bd7650a7a27..21306b3fd4c 100644 --- a/contrib/elf2dmp/download.c +++ b/contrib/elf2dmp/download.c @@ -9,19 +9,18 @@ #include #include "download.h" -int download_url(const char *name, const char *url) +bool download_url(const char *name, const char *url) { - int err = 0; + bool success = false; FILE *file; CURL *curl = curl_easy_init(); if (!curl) { - return 1; + return false; } file = fopen(name, "wb"); if (!file) { - err = 1; goto out_curl; } @@ -33,13 +32,12 @@ int download_url(const char *name, const char *url) || curl_easy_perform(curl) != CURLE_OK) { unlink(name); fclose(file); - err = 1; } else { - err = fclose(file); + success = !fclose(file); } out_curl: curl_easy_cleanup(curl); - return err; + return success; } diff --git a/contrib/elf2dmp/download.h b/contrib/elf2dmp/download.h index 5c274925f7a..f65adb5d089 100644 --- a/contrib/elf2dmp/download.h +++ b/contrib/elf2dmp/download.h @@ -8,6 +8,6 @@ #ifndef DOWNLOAD_H #define DOWNLOAD_H -int download_url(const char *name, const char *url); +bool download_url(const char *name, const char *url); #endif /* DOWNLOAD_H */ diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c index cbc38a7c103..d046a72ae67 100644 --- a/contrib/elf2dmp/main.c +++ b/contrib/elf2dmp/main.c @@ -6,6 +6,7 @@ */ #include "qemu/osdep.h" +#include "qemu/bitops.h" #include "err.h" #include "addrspace.h" @@ -47,11 +48,6 @@ static const uint64_t SharedUserData = 0xfffff78000000000; s ? printf(#s" = 0x%016"PRIx64"\n", s) :\ eprintf("Failed to resolve "#s"\n"), s) -static uint64_t rol(uint64_t x, uint64_t y) -{ - return (x << y) | (x >> (64 - y)); -} - /* * Decoding algorithm can be found in Volatility project */ @@ -64,7 +60,7 @@ static void kdbg_decode(uint64_t *dst, uint64_t *src, size_t size, uint64_t block; block = src[i]; - block = rol(block ^ kwn, (uint8_t)kwn); + block = rol64(block ^ kwn, kwn); block = __builtin_bswap64(block ^ kdbe) ^ kwa; dst[i] = block; } @@ -79,9 +75,9 @@ static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb, bool decode = false; uint64_t kwn, kwa, KdpDataBlockEncoded; - if (va_space_rw(vs, - KdDebuggerDataBlock + offsetof(KDDEBUGGER_DATA64, Header), - &kdbg_hdr, sizeof(kdbg_hdr), 0)) { + if (!va_space_rw(vs, + KdDebuggerDataBlock + offsetof(KDDEBUGGER_DATA64, Header), + &kdbg_hdr, sizeof(kdbg_hdr), 0)) { eprintf("Failed to extract KDBG header\n"); return NULL; } @@ -97,8 +93,8 @@ static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb, return NULL; } - if (va_space_rw(vs, KiWaitNever, &kwn, sizeof(kwn), 0) || - va_space_rw(vs, KiWaitAlways, &kwa, sizeof(kwa), 0)) { + if (!va_space_rw(vs, KiWaitNever, &kwn, sizeof(kwn), 0) || + !va_space_rw(vs, KiWaitAlways, &kwa, sizeof(kwa), 0)) { return NULL; } @@ -122,7 +118,7 @@ static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb, kdbg = g_malloc(kdbg_hdr.Size); - if (va_space_rw(vs, KdDebuggerDataBlock, kdbg, kdbg_hdr.Size, 0)) { + if (!va_space_rw(vs, KdDebuggerDataBlock, kdbg, kdbg_hdr.Size, 0)) { eprintf("Failed to extract entire KDBG\n"); g_free(kdbg); return NULL; @@ -186,13 +182,13 @@ static void win_context_init_from_qemu_cpu_state(WinContext64 *ctx, * Finds paging-structure hierarchy base, * if previously set doesn't give access to kernel structures */ -static int fix_dtb(struct va_space *vs, QEMU_Elf *qe) +static bool fix_dtb(struct va_space *vs, QEMU_Elf *qe) { /* * Firstly, test previously set DTB. */ if (va_space_resolve(vs, SharedUserData)) { - return 0; + return true; } /* @@ -206,7 +202,7 @@ static int fix_dtb(struct va_space *vs, QEMU_Elf *qe) va_space_set_dtb(vs, s->cr[3]); printf("DTB 0x%016"PRIx64" has been found from CPU #%zu" " as system task CR3\n", vs->dtb, i); - return !(va_space_resolve(vs, SharedUserData)); + return va_space_resolve(vs, SharedUserData); } } @@ -220,16 +216,16 @@ static int fix_dtb(struct va_space *vs, QEMU_Elf *qe) uint64_t *cr3 = va_space_resolve(vs, Prcb + 0x7000); if (!cr3) { - return 1; + return false; } va_space_set_dtb(vs, *cr3); printf("DirectoryTableBase = 0x%016"PRIx64" has been found from CPU #0" " as interrupt handling CR3\n", vs->dtb); - return !(va_space_resolve(vs, SharedUserData)); + return va_space_resolve(vs, SharedUserData); } - return 1; + return true; } static void try_merge_runs(struct pa_space *ps, @@ -268,9 +264,10 @@ static void try_merge_runs(struct pa_space *ps, } } -static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps, - struct va_space *vs, uint64_t KdDebuggerDataBlock, - KDDEBUGGER_DATA64 *kdbg, uint64_t KdVersionBlock, int nr_cpus) +static bool fill_header(WinDumpHeader64 *hdr, struct pa_space *ps, + struct va_space *vs, uint64_t KdDebuggerDataBlock, + KDDEBUGGER_DATA64 *kdbg, uint64_t KdVersionBlock, + int nr_cpus) { uint32_t *suite_mask = va_space_resolve(vs, SharedUserData + KUSD_OFFSET_SUITE_MASK); @@ -283,12 +280,12 @@ static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps, QEMU_BUILD_BUG_ON(KUSD_OFFSET_PRODUCT_TYPE >= ELF2DMP_PAGE_SIZE); if (!suite_mask || !product_type) { - return 1; + return false; } - if (va_space_rw(vs, KdVersionBlock, &kvb, sizeof(kvb), 0)) { + if (!va_space_rw(vs, KdVersionBlock, &kvb, sizeof(kvb), 0)) { eprintf("Failed to extract KdVersionBlock\n"); - return 1; + return false; } h = (WinDumpHeader64) { @@ -333,11 +330,16 @@ static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps, *hdr = h; - return 0; + return true; } -static int fill_context(KDDEBUGGER_DATA64 *kdbg, - struct va_space *vs, QEMU_Elf *qe) +/* + * fill_context() continues even if it fails to fill contexts of some CPUs. + * A dump may still contain valuable information even if it lacks contexts of + * some CPUs due to dump corruption or a failure before starting CPUs. + */ +static void fill_context(KDDEBUGGER_DATA64 *kdbg, + struct va_space *vs, QEMU_Elf *qe) { int i; @@ -347,10 +349,10 @@ static int fill_context(KDDEBUGGER_DATA64 *kdbg, WinContext64 ctx; QEMUCPUState *s = qe->state[i]; - if (va_space_rw(vs, kdbg->KiProcessorBlock + sizeof(Prcb) * i, - &Prcb, sizeof(Prcb), 0)) { + if (!va_space_rw(vs, kdbg->KiProcessorBlock + sizeof(Prcb) * i, + &Prcb, sizeof(Prcb), 0)) { eprintf("Failed to read CPU #%d PRCB location\n", i); - return 1; + continue; } if (!Prcb) { @@ -358,26 +360,24 @@ static int fill_context(KDDEBUGGER_DATA64 *kdbg, continue; } - if (va_space_rw(vs, Prcb + kdbg->OffsetPrcbContext, - &Context, sizeof(Context), 0)) { + if (!va_space_rw(vs, Prcb + kdbg->OffsetPrcbContext, + &Context, sizeof(Context), 0)) { eprintf("Failed to read CPU #%d ContextFrame location\n", i); - return 1; + continue; } printf("Filling context for CPU #%d...\n", i); win_context_init_from_qemu_cpu_state(&ctx, s); - if (va_space_rw(vs, Context, &ctx, sizeof(ctx), 1)) { + if (!va_space_rw(vs, Context, &ctx, sizeof(ctx), 1)) { eprintf("Failed to fill CPU #%d context\n", i); - return 1; + continue; } } - - return 0; } -static int pe_get_data_dir_entry(uint64_t base, void *start_addr, int idx, - void *entry, size_t size, struct va_space *vs) +static bool pe_get_data_dir_entry(uint64_t base, void *start_addr, int idx, + void *entry, size_t size, struct va_space *vs) { const char e_magic[2] = "MZ"; const char Signature[4] = "PE\0\0"; @@ -390,40 +390,38 @@ static int pe_get_data_dir_entry(uint64_t base, void *start_addr, int idx, QEMU_BUILD_BUG_ON(sizeof(*dos_hdr) >= ELF2DMP_PAGE_SIZE); if (memcmp(&dos_hdr->e_magic, e_magic, sizeof(e_magic))) { - return 1; + return false; } - if (va_space_rw(vs, base + dos_hdr->e_lfanew, - &nt_hdrs, sizeof(nt_hdrs), 0)) { - return 1; + if (!va_space_rw(vs, base + dos_hdr->e_lfanew, + &nt_hdrs, sizeof(nt_hdrs), 0)) { + return false; } if (memcmp(&nt_hdrs.Signature, Signature, sizeof(Signature)) || file_hdr->Machine != 0x8664 || opt_hdr->Magic != 0x020b) { - return 1; + return false; } - if (va_space_rw(vs, - base + data_dir[idx].VirtualAddress, - entry, size, 0)) { - return 1; + if (!va_space_rw(vs, base + data_dir[idx].VirtualAddress, entry, size, 0)) { + return false; } printf("Data directory entry #%d: RVA = 0x%08"PRIx32"\n", idx, (uint32_t)data_dir[idx].VirtualAddress); - return 0; + return true; } -static int write_dump(struct pa_space *ps, - WinDumpHeader64 *hdr, const char *name) +static bool write_dump(struct pa_space *ps, + WinDumpHeader64 *hdr, const char *name) { FILE *dmp_file = fopen(name, "wb"); size_t i; if (!dmp_file) { eprintf("Failed to open output file \'%s\'\n", name); - return 1; + return false; } printf("Writing header to file...\n"); @@ -431,7 +429,7 @@ static int write_dump(struct pa_space *ps, if (fwrite(hdr, sizeof(*hdr), 1, dmp_file) != 1) { eprintf("Failed to write dump header\n"); fclose(dmp_file); - return 1; + return false; } for (i = 0; i < ps->block_nr; i++) { @@ -442,11 +440,11 @@ static int write_dump(struct pa_space *ps, if (fwrite(b->addr, b->size, 1, dmp_file) != 1) { eprintf("Failed to write block\n"); fclose(dmp_file); - return 1; + return false; } } - return fclose(dmp_file); + return !fclose(dmp_file); } static bool pe_check_pdb_name(uint64_t base, void *start_addr, @@ -456,8 +454,8 @@ static bool pe_check_pdb_name(uint64_t base, void *start_addr, IMAGE_DEBUG_DIRECTORY debug_dir; char pdb_name[sizeof(PDB_NAME)]; - if (pe_get_data_dir_entry(base, start_addr, IMAGE_FILE_DEBUG_DIRECTORY, - &debug_dir, sizeof(debug_dir), vs)) { + if (!pe_get_data_dir_entry(base, start_addr, IMAGE_FILE_DEBUG_DIRECTORY, + &debug_dir, sizeof(debug_dir), vs)) { eprintf("Failed to get Debug Directory\n"); return false; } @@ -467,9 +465,8 @@ static bool pe_check_pdb_name(uint64_t base, void *start_addr, return false; } - if (va_space_rw(vs, - base + debug_dir.AddressOfRawData, - rsds, sizeof(*rsds), 0)) { + if (!va_space_rw(vs, base + debug_dir.AddressOfRawData, + rsds, sizeof(*rsds), 0)) { eprintf("Failed to resolve OMFSignatureRSDS\n"); return false; } @@ -485,9 +482,9 @@ static bool pe_check_pdb_name(uint64_t base, void *start_addr, return false; } - if (va_space_rw(vs, base + debug_dir.AddressOfRawData + - offsetof(OMFSignatureRSDS, name), pdb_name, sizeof(PDB_NAME), - 0)) { + if (!va_space_rw(vs, base + debug_dir.AddressOfRawData + + offsetof(OMFSignatureRSDS, name), + pdb_name, sizeof(PDB_NAME), 0)) { eprintf("Failed to resolve PDB name\n"); return false; } @@ -511,7 +508,7 @@ static void pe_get_pdb_symstore_hash(OMFSignatureRSDS *rsds, char *hash) int main(int argc, char *argv[]) { - int err = 0; + int err = 1; QEMU_Elf qemu_elf; struct pa_space ps; struct va_space vs; @@ -535,33 +532,27 @@ int main(int argc, char *argv[]) return 1; } - if (QEMU_Elf_init(&qemu_elf, argv[1])) { + if (!QEMU_Elf_init(&qemu_elf, argv[1])) { eprintf("Failed to initialize QEMU ELF dump\n"); return 1; } - if (pa_space_create(&ps, &qemu_elf)) { - eprintf("Failed to initialize physical address space\n"); - err = 1; - goto out_elf; - } + pa_space_create(&ps, &qemu_elf); state = qemu_elf.state[0]; printf("CPU #0 CR3 is 0x%016"PRIx64"\n", state->cr[3]); va_space_create(&vs, &ps, state->cr[3]); - if (fix_dtb(&vs, &qemu_elf)) { + if (!fix_dtb(&vs, &qemu_elf)) { eprintf("Failed to find paging base\n"); - err = 1; - goto out_elf; + goto out_ps; } printf("CPU #0 IDT is at 0x%016"PRIx64"\n", state->idt.base); - if (va_space_rw(&vs, state->idt.base, - &first_idt_desc, sizeof(first_idt_desc), 0)) { + if (!va_space_rw(&vs, state->idt.base, + &first_idt_desc, sizeof(first_idt_desc), 0)) { eprintf("Failed to get CPU #0 IDT[0]\n"); - err = 1; goto out_ps; } printf("CPU #0 IDT[0] -> 0x%016"PRIx64"\n", idt_desc_addr(first_idt_desc)); @@ -586,7 +577,6 @@ int main(int argc, char *argv[]) if (!kernel_found) { eprintf("Failed to find NT kernel image\n"); - err = 1; goto out_ps; } @@ -598,47 +588,40 @@ int main(int argc, char *argv[]) sprintf(pdb_url, "%s%s/%s/%s", SYM_URL_BASE, PDB_NAME, pdb_hash, PDB_NAME); printf("PDB URL is %s\n", pdb_url); - if (download_url(PDB_NAME, pdb_url)) { + if (!download_url(PDB_NAME, pdb_url)) { eprintf("Failed to download PDB file\n"); - err = 1; goto out_ps; } - if (pdb_init_from_file(PDB_NAME, &pdb)) { + if (!pdb_init_from_file(PDB_NAME, &pdb)) { eprintf("Failed to initialize PDB reader\n"); - err = 1; goto out_pdb_file; } if (!SYM_RESOLVE(KernBase, &pdb, KdDebuggerDataBlock) || !SYM_RESOLVE(KernBase, &pdb, KdVersionBlock)) { - err = 1; goto out_pdb; } kdbg = get_kdbg(KernBase, &pdb, &vs, KdDebuggerDataBlock); if (!kdbg) { - err = 1; goto out_pdb; } - if (fill_header(&header, &ps, &vs, KdDebuggerDataBlock, kdbg, - KdVersionBlock, qemu_elf.state_nr)) { - err = 1; + if (!fill_header(&header, &ps, &vs, KdDebuggerDataBlock, kdbg, + KdVersionBlock, qemu_elf.state_nr)) { goto out_kdbg; } - if (fill_context(kdbg, &vs, &qemu_elf)) { - err = 1; - goto out_kdbg; - } + fill_context(kdbg, &vs, &qemu_elf); - if (write_dump(&ps, &header, argv[2])) { + if (!write_dump(&ps, &header, argv[2])) { eprintf("Failed to save dump\n"); - err = 1; goto out_kdbg; } + err = 0; + out_kdbg: g_free(kdbg); out_pdb: @@ -647,7 +630,6 @@ int main(int argc, char *argv[]) unlink(PDB_NAME); out_ps: pa_space_destroy(&ps); -out_elf: QEMU_Elf_exit(&qemu_elf); return err; diff --git a/contrib/elf2dmp/pdb.c b/contrib/elf2dmp/pdb.c index 40991f5f4c3..492aca4434c 100644 --- a/contrib/elf2dmp/pdb.c +++ b/contrib/elf2dmp/pdb.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/bswap.h" #include "pdb.h" #include "err.h" @@ -158,36 +159,35 @@ static void *pdb_ds_read_file(struct pdb_reader* r, uint32_t file_number) return pdb_ds_read(r->ds.header, block_list, file_size[file_number]); } -static int pdb_init_segments(struct pdb_reader *r) +static bool pdb_init_segments(struct pdb_reader *r) { unsigned stream_idx = r->segments; r->segs = pdb_ds_read_file(r, stream_idx); if (!r->segs) { - return 1; + return false; } r->segs_size = pdb_get_file_size(r, stream_idx); if (!r->segs_size) { - return 1; + return false; } - return 0; + return true; } -static int pdb_init_symbols(struct pdb_reader *r) +static bool pdb_init_symbols(struct pdb_reader *r) { - int err = 0; PDB_SYMBOLS *symbols; symbols = pdb_ds_read_file(r, 3); if (!symbols) { - return 1; + return false; } r->symbols = symbols; - r->segments = *(uint16_t *)((const char *)symbols + sizeof(PDB_SYMBOLS) + + r->segments = lduw_le_p((const char *)symbols + sizeof(PDB_SYMBOLS) + symbols->module_size + symbols->offset_size + symbols->hash_size + symbols->srcmodule_size + symbols->pdbimport_size + symbols->unknown2_size + @@ -196,22 +196,21 @@ static int pdb_init_symbols(struct pdb_reader *r) /* Read global symbol table */ r->modimage = pdb_ds_read_file(r, symbols->gsym_file); if (!r->modimage) { - err = 1; goto out_symbols; } - return 0; + return true; out_symbols: g_free(symbols); - return err; + return false; } -static int pdb_reader_ds_init(struct pdb_reader *r, PDB_DS_HEADER *hdr) +static bool pdb_reader_ds_init(struct pdb_reader *r, PDB_DS_HEADER *hdr) { if (hdr->block_size == 0) { - return 1; + return false; } memset(r->file_used, 0, sizeof(r->file_used)); @@ -220,42 +219,38 @@ static int pdb_reader_ds_init(struct pdb_reader *r, PDB_DS_HEADER *hdr) hdr->toc_page * hdr->block_size), hdr->toc_size); if (!r->ds.toc) { - return 1; + return false; } - return 0; + return true; } -static int pdb_reader_init(struct pdb_reader *r, void *data) +static bool pdb_reader_init(struct pdb_reader *r, void *data) { - int err = 0; const char pdb7[] = "Microsoft C/C++ MSF 7.00"; if (memcmp(data, pdb7, sizeof(pdb7) - 1)) { - return 1; + return false; } - if (pdb_reader_ds_init(r, data)) { - return 1; + if (!pdb_reader_ds_init(r, data)) { + return false; } r->ds.root = pdb_ds_read_file(r, 1); if (!r->ds.root) { - err = 1; goto out_ds; } - if (pdb_init_symbols(r)) { - err = 1; + if (!pdb_init_symbols(r)) { goto out_root; } - if (pdb_init_segments(r)) { - err = 1; + if (!pdb_init_segments(r)) { goto out_sym; } - return 0; + return true; out_sym: pdb_exit_symbols(r); @@ -264,7 +259,7 @@ static int pdb_reader_init(struct pdb_reader *r, void *data) out_ds: pdb_reader_ds_exit(r); - return err; + return false; } static void pdb_reader_exit(struct pdb_reader *r) @@ -275,32 +270,30 @@ static void pdb_reader_exit(struct pdb_reader *r) pdb_reader_ds_exit(r); } -int pdb_init_from_file(const char *name, struct pdb_reader *reader) +bool pdb_init_from_file(const char *name, struct pdb_reader *reader) { GError *gerr = NULL; - int err = 0; void *map; reader->gmf = g_mapped_file_new(name, TRUE, &gerr); if (gerr) { eprintf("Failed to map PDB file \'%s\'\n", name); g_error_free(gerr); - return 1; + return false; } reader->file_size = g_mapped_file_get_length(reader->gmf); map = g_mapped_file_get_contents(reader->gmf); - if (pdb_reader_init(reader, map)) { - err = 1; + if (!pdb_reader_init(reader, map)) { goto out_unmap; } - return 0; + return true; out_unmap: g_mapped_file_unref(reader->gmf); - return err; + return false; } void pdb_exit(struct pdb_reader *reader) diff --git a/contrib/elf2dmp/pdb.h b/contrib/elf2dmp/pdb.h index 2a50da56ac9..feddf1862f0 100644 --- a/contrib/elf2dmp/pdb.h +++ b/contrib/elf2dmp/pdb.h @@ -233,7 +233,7 @@ struct pdb_reader { size_t segs_size; }; -int pdb_init_from_file(const char *name, struct pdb_reader *reader); +bool pdb_init_from_file(const char *name, struct pdb_reader *reader); void pdb_exit(struct pdb_reader *reader); uint64_t pdb_resolve(uint64_t img_base, struct pdb_reader *r, const char *name); uint64_t pdb_find_public_v3_symbol(struct pdb_reader *reader, const char *name); diff --git a/contrib/elf2dmp/qemu_elf.c b/contrib/elf2dmp/qemu_elf.c index 055e6f8792e..c9bad6e82cf 100644 --- a/contrib/elf2dmp/qemu_elf.c +++ b/contrib/elf2dmp/qemu_elf.c @@ -6,6 +6,7 @@ */ #include "qemu/osdep.h" +#include "qemu/host-utils.h" #include "err.h" #include "qemu_elf.h" @@ -15,36 +16,11 @@ #define ROUND_UP(n, d) (((n) + (d) - 1) & -(0 ? (n) : (d))) #endif -#ifndef DIV_ROUND_UP -#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) -#endif - -#define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ - ((DIV_ROUND_UP((hdr_size), 4) + \ - DIV_ROUND_UP((name_size), 4) + \ - DIV_ROUND_UP((desc_size), 4)) * 4) - int is_system(QEMUCPUState *s) { return s->gs.base >> 63; } -static char *nhdr_get_name(Elf64_Nhdr *nhdr) -{ - return (char *)nhdr + ROUND_UP(sizeof(*nhdr), 4); -} - -static void *nhdr_get_desc(Elf64_Nhdr *nhdr) -{ - return nhdr_get_name(nhdr) + ROUND_UP(nhdr->n_namesz, 4); -} - -static Elf64_Nhdr *nhdr_get_next(Elf64_Nhdr *nhdr) -{ - return (void *)((uint8_t *)nhdr + ELF_NOTE_SIZE(sizeof(*nhdr), - nhdr->n_namesz, nhdr->n_descsz)); -} - Elf64_Phdr *elf64_getphdr(void *map) { Elf64_Ehdr *ehdr = map; @@ -60,54 +36,92 @@ Elf64_Half elf_getphdrnum(void *map) return ehdr->e_phnum; } -static int init_states(QEMU_Elf *qe) +static bool advance_note_offset(uint64_t *offsetp, uint64_t size, uint64_t end) +{ + uint64_t offset = *offsetp; + + if (uadd64_overflow(offset, size, &offset) || offset > UINT64_MAX - 3) { + return false; + } + + offset = ROUND_UP(offset, 4); + + if (offset > end) { + return false; + } + + *offsetp = offset; + + return true; +} + +static bool init_states(QEMU_Elf *qe) { Elf64_Phdr *phdr = elf64_getphdr(qe->map); - Elf64_Nhdr *start = (void *)((uint8_t *)qe->map + phdr[0].p_offset); - Elf64_Nhdr *end = (void *)((uint8_t *)start + phdr[0].p_memsz); Elf64_Nhdr *nhdr; - size_t cpu_nr = 0; + GPtrArray *states; + QEMUCPUState *state; + uint32_t state_size; + uint64_t offset; + uint64_t end_offset; + char *name; if (phdr[0].p_type != PT_NOTE) { eprintf("Failed to find PT_NOTE\n"); - return 1; + return false; } qe->has_kernel_gs_base = 1; + offset = phdr[0].p_offset; + states = g_ptr_array_new(); + + if (uadd64_overflow(offset, phdr[0].p_memsz, &end_offset) || + end_offset > qe->size) { + end_offset = qe->size; + } - for (nhdr = start; nhdr < end; nhdr = nhdr_get_next(nhdr)) { - if (!strcmp(nhdr_get_name(nhdr), QEMU_NOTE_NAME)) { - QEMUCPUState *state = nhdr_get_desc(nhdr); + while (offset < end_offset) { + nhdr = (void *)((uint8_t *)qe->map + offset); - if (state->size < sizeof(*state)) { - eprintf("CPU #%zu: QEMU CPU state size %u doesn't match\n", - cpu_nr, state->size); + if (!advance_note_offset(&offset, sizeof(*nhdr), end_offset)) { + break; + } + + name = (char *)qe->map + offset; + + if (!advance_note_offset(&offset, nhdr->n_namesz, end_offset)) { + break; + } + + state = (void *)((uint8_t *)qe->map + offset); + + if (!advance_note_offset(&offset, nhdr->n_descsz, end_offset)) { + break; + } + + if (!strcmp(name, QEMU_NOTE_NAME) && + nhdr->n_descsz >= offsetof(QEMUCPUState, kernel_gs_base)) { + state_size = MIN(state->size, nhdr->n_descsz); + + if (state_size < sizeof(*state)) { + eprintf("CPU #%u: QEMU CPU state size %u doesn't match\n", + states->len, state_size); /* * We assume either every QEMU CPU state has KERNEL_GS_BASE or * no one has. */ qe->has_kernel_gs_base = 0; } - cpu_nr++; + g_ptr_array_add(states, state); } } - printf("%zu CPU states has been found\n", cpu_nr); - - qe->state = g_new(QEMUCPUState*, cpu_nr); - - cpu_nr = 0; - - for (nhdr = start; nhdr < end; nhdr = nhdr_get_next(nhdr)) { - if (!strcmp(nhdr_get_name(nhdr), QEMU_NOTE_NAME)) { - qe->state[cpu_nr] = nhdr_get_desc(nhdr); - cpu_nr++; - } - } + printf("%u CPU states has been found\n", states->len); - qe->state_nr = cpu_nr; + qe->state_nr = states->len; + qe->state = (void *)g_ptr_array_free(states, FALSE); - return 0; + return true; } static void exit_states(QEMU_Elf *qe) @@ -118,6 +132,7 @@ static void exit_states(QEMU_Elf *qe) static bool check_ehdr(QEMU_Elf *qe) { Elf64_Ehdr *ehdr = qe->map; + uint64_t phendoff; if (sizeof(Elf64_Ehdr) > qe->size) { eprintf("Invalid input dump file size\n"); @@ -159,10 +174,17 @@ static bool check_ehdr(QEMU_Elf *qe) return false; } + if (umul64_overflow(ehdr->e_phnum, sizeof(Elf64_Phdr), &phendoff) || + uadd64_overflow(phendoff, ehdr->e_phoff, &phendoff) || + phendoff > qe->size) { + eprintf("phdrs do not fit in file\n"); + return false; + } + return true; } -static int QEMU_Elf_map(QEMU_Elf *qe, const char *filename) +static bool QEMU_Elf_map(QEMU_Elf *qe, const char *filename) { #ifdef CONFIG_LINUX struct stat st; @@ -173,13 +195,13 @@ static int QEMU_Elf_map(QEMU_Elf *qe, const char *filename) fd = open(filename, O_RDONLY, 0); if (fd == -1) { eprintf("Failed to open ELF dump file \'%s\'\n", filename); - return 1; + return false; } if (fstat(fd, &st)) { eprintf("Failed to get size of ELF dump file\n"); close(fd); - return 1; + return false; } qe->size = st.st_size; @@ -188,7 +210,7 @@ static int QEMU_Elf_map(QEMU_Elf *qe, const char *filename) if (qe->map == MAP_FAILED) { eprintf("Failed to map ELF file\n"); close(fd); - return 1; + return false; } close(fd); @@ -201,14 +223,14 @@ static int QEMU_Elf_map(QEMU_Elf *qe, const char *filename) if (gerr) { eprintf("Failed to map ELF dump file \'%s\'\n", filename); g_error_free(gerr); - return 1; + return false; } qe->map = g_mapped_file_get_contents(qe->gmf); qe->size = g_mapped_file_get_length(qe->gmf); #endif - return 0; + return true; } static void QEMU_Elf_unmap(QEMU_Elf *qe) @@ -220,25 +242,25 @@ static void QEMU_Elf_unmap(QEMU_Elf *qe) #endif } -int QEMU_Elf_init(QEMU_Elf *qe, const char *filename) +bool QEMU_Elf_init(QEMU_Elf *qe, const char *filename) { - if (QEMU_Elf_map(qe, filename)) { - return 1; + if (!QEMU_Elf_map(qe, filename)) { + return false; } if (!check_ehdr(qe)) { eprintf("Input file has the wrong format\n"); QEMU_Elf_unmap(qe); - return 1; + return false; } - if (init_states(qe)) { + if (!init_states(qe)) { eprintf("Failed to extract QEMU CPU states\n"); QEMU_Elf_unmap(qe); - return 1; + return false; } - return 0; + return true; } void QEMU_Elf_exit(QEMU_Elf *qe) diff --git a/contrib/elf2dmp/qemu_elf.h b/contrib/elf2dmp/qemu_elf.h index afa75f10b2d..adc50238b46 100644 --- a/contrib/elf2dmp/qemu_elf.h +++ b/contrib/elf2dmp/qemu_elf.h @@ -42,7 +42,7 @@ typedef struct QEMU_Elf { int has_kernel_gs_base; } QEMU_Elf; -int QEMU_Elf_init(QEMU_Elf *qe, const char *filename); +bool QEMU_Elf_init(QEMU_Elf *qe, const char *filename); void QEMU_Elf_exit(QEMU_Elf *qe); Elf64_Phdr *elf64_getphdr(void *map); diff --git a/contrib/ivshmem-client/meson.build b/contrib/ivshmem-client/meson.build index ce8dcca84dd..3c8b09af4bf 100644 --- a/contrib/ivshmem-client/meson.build +++ b/contrib/ivshmem-client/meson.build @@ -1,4 +1,4 @@ executable('ivshmem-client', files('ivshmem-client.c', 'main.c'), genh, dependencies: glib, - build_by_default: targetos == 'linux', + build_by_default: host_os == 'linux', install: false) diff --git a/contrib/ivshmem-server/meson.build b/contrib/ivshmem-server/meson.build index c6c3c82e89f..1c8fea6594d 100644 --- a/contrib/ivshmem-server/meson.build +++ b/contrib/ivshmem-server/meson.build @@ -1,4 +1,4 @@ executable('ivshmem-server', files('ivshmem-server.c', 'main.c'), genh, dependencies: [qemuutil, rt], - build_by_default: targetos == 'linux', + build_by_default: host_os == 'linux', install: false) diff --git a/contrib/plugins/cache.c b/contrib/plugins/cache.c index 9e7ade3b374..c5c8ac75a9c 100644 --- a/contrib/plugins/cache.c +++ b/contrib/plugins/cache.c @@ -767,7 +767,7 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, policy = LRU; - cores = sys ? qemu_plugin_n_vcpus() : 1; + cores = sys ? info->system.smp_vcpus : 1; for (i = 0; i < argc; i++) { char *opt = argv[i]; diff --git a/contrib/plugins/execlog.c b/contrib/plugins/execlog.c index 82dc2f584e2..fab18113d42 100644 --- a/contrib/plugins/execlog.c +++ b/contrib/plugins/execlog.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2021, Alexandre Iooss * - * Log instruction execution with memory access. + * Log instruction execution with memory access and register changes * * License: GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -15,29 +15,40 @@ #include +typedef struct { + struct qemu_plugin_register *handle; + GByteArray *last; + GByteArray *new; + const char *name; +} Register; + +typedef struct CPU { + /* Store last executed instruction on each vCPU as a GString */ + GString *last_exec; + /* Ptr array of Register */ + GPtrArray *registers; +} CPU; + QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; -/* Store last executed instruction on each vCPU as a GString */ -static GPtrArray *last_exec; +static GArray *cpus; static GRWLock expand_array_lock; static GPtrArray *imatches; static GArray *amatches; +static GPtrArray *rmatches; +static bool disas_assist; +static GMutex add_reg_name_lock; +static GPtrArray *all_reg_names; -/* - * Expand last_exec array. - * - * As we could have multiple threads trying to do this we need to - * serialise the expansion under a lock. - */ -static void expand_last_exec(int cpu_index) +static CPU *get_cpu(int vcpu_index) { - g_rw_lock_writer_lock(&expand_array_lock); - while (cpu_index >= last_exec->len) { - GString *s = g_string_new(NULL); - g_ptr_array_add(last_exec, s); - } - g_rw_lock_writer_unlock(&expand_array_lock); + CPU *c; + g_rw_lock_reader_lock(&expand_array_lock); + c = &g_array_index(cpus, CPU, vcpu_index); + g_rw_lock_reader_unlock(&expand_array_lock); + + return c; } /** @@ -46,13 +57,10 @@ static void expand_last_exec(int cpu_index) static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t info, uint64_t vaddr, void *udata) { - GString *s; + CPU *c = get_cpu(cpu_index); + GString *s = c->last_exec; /* Find vCPU in array */ - g_rw_lock_reader_lock(&expand_array_lock); - g_assert(cpu_index < last_exec->len); - s = g_ptr_array_index(last_exec, cpu_index); - g_rw_lock_reader_unlock(&expand_array_lock); /* Indicate type of memory access */ if (qemu_plugin_mem_is_store(info)) { @@ -73,32 +81,91 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t info, } /** - * Log instruction execution + * Log instruction execution, outputting the last one. + * + * vcpu_insn_exec() is a copy and paste of vcpu_insn_exec_with_regs() + * without the checking of register values when we've attempted to + * optimise with disas_assist. */ -static void vcpu_insn_exec(unsigned int cpu_index, void *udata) +static void insn_check_regs(CPU *cpu) { - GString *s; + for (int n = 0; n < cpu->registers->len; n++) { + Register *reg = cpu->registers->pdata[n]; + int sz; - /* Find or create vCPU in array */ - g_rw_lock_reader_lock(&expand_array_lock); - if (cpu_index >= last_exec->len) { - g_rw_lock_reader_unlock(&expand_array_lock); - expand_last_exec(cpu_index); - g_rw_lock_reader_lock(&expand_array_lock); + g_byte_array_set_size(reg->new, 0); + sz = qemu_plugin_read_register(reg->handle, reg->new); + g_assert(sz == reg->last->len); + + if (memcmp(reg->last->data, reg->new->data, sz)) { + GByteArray *temp = reg->last; + g_string_append_printf(cpu->last_exec, ", %s -> 0x", reg->name); + /* TODO: handle BE properly */ + for (int i = sz; i >= 0; i--) { + g_string_append_printf(cpu->last_exec, "%02x", + reg->new->data[i]); + } + reg->last = reg->new; + reg->new = temp; + } } - s = g_ptr_array_index(last_exec, cpu_index); - g_rw_lock_reader_unlock(&expand_array_lock); +} + +/* Log last instruction while checking registers */ +static void vcpu_insn_exec_with_regs(unsigned int cpu_index, void *udata) +{ + CPU *cpu = get_cpu(cpu_index); + + /* Print previous instruction in cache */ + if (cpu->last_exec->len) { + if (cpu->registers) { + insn_check_regs(cpu); + } + + qemu_plugin_outs(cpu->last_exec->str); + qemu_plugin_outs("\n"); + } + + /* Store new instruction in cache */ + /* vcpu_mem will add memory access information to last_exec */ + g_string_printf(cpu->last_exec, "%u, ", cpu_index); + g_string_append(cpu->last_exec, (char *)udata); +} + +/* Log last instruction while checking registers, ignore next */ +static void vcpu_insn_exec_only_regs(unsigned int cpu_index, void *udata) +{ + CPU *cpu = get_cpu(cpu_index); /* Print previous instruction in cache */ - if (s->len) { - qemu_plugin_outs(s->str); + if (cpu->last_exec->len) { + if (cpu->registers) { + insn_check_regs(cpu); + } + + qemu_plugin_outs(cpu->last_exec->str); + qemu_plugin_outs("\n"); + } + + /* reset */ + cpu->last_exec->len = 0; +} + +/* Log last instruction without checking regs, setup next */ +static void vcpu_insn_exec(unsigned int cpu_index, void *udata) +{ + CPU *cpu = get_cpu(cpu_index); + + /* Print previous instruction in cache */ + if (cpu->last_exec->len) { + qemu_plugin_outs(cpu->last_exec->str); qemu_plugin_outs("\n"); } /* Store new instruction in cache */ /* vcpu_mem will add memory access information to last_exec */ - g_string_printf(s, "%u, ", cpu_index); - g_string_append(s, (char *)udata); + g_string_printf(cpu->last_exec, "%u, ", cpu_index); + g_string_append(cpu->last_exec, (char *)udata); } /** @@ -111,6 +178,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) { struct qemu_plugin_insn *insn; bool skip = (imatches || amatches); + bool check_regs_this = rmatches; + bool check_regs_next = false; size_t n = qemu_plugin_tb_n_insns(tb); for (size_t i = 0; i < n; i++) { @@ -131,7 +200,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) /* * If we are filtering we better check out if we have any * hits. The skip "latches" so we can track memory accesses - * after the instruction we care about. + * after the instruction we care about. Also enable register + * checking on the next instruction. */ if (skip && imatches) { int j; @@ -139,6 +209,7 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) char *m = g_ptr_array_index(imatches, j); if (g_str_has_prefix(insn_disas, m)) { skip = false; + check_regs_next = rmatches; } } } @@ -153,8 +224,39 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) } } + /* + * Check the disassembly to see if a register we care about + * will be affected by this instruction. This relies on the + * dissembler doing something sensible for the registers we + * care about. + */ + if (disas_assist && rmatches) { + check_regs_next = false; + gchar *args = g_strstr_len(insn_disas, -1, " "); + for (int n = 0; n < all_reg_names->len; n++) { + gchar *reg = g_ptr_array_index(all_reg_names, n); + if (g_strrstr(args, reg)) { + check_regs_next = true; + skip = false; + } + } + } + + /* + * We now have 3 choices: + * + * - Log insn + * - Log insn while checking registers + * - Don't log this insn but check if last insn changed registers + */ + if (skip) { - g_free(insn_disas); + if (check_regs_this) { + qemu_plugin_register_vcpu_insn_exec_cb(insn, + vcpu_insn_exec_only_regs, + QEMU_PLUGIN_CB_R_REGS, + NULL); + } } else { uint32_t insn_opcode; insn_opcode = *((uint32_t *)qemu_plugin_insn_data(insn)); @@ -167,30 +269,142 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) QEMU_PLUGIN_MEM_RW, NULL); /* Register callback on instruction */ - qemu_plugin_register_vcpu_insn_exec_cb(insn, vcpu_insn_exec, - QEMU_PLUGIN_CB_NO_REGS, output); + if (check_regs_this) { + qemu_plugin_register_vcpu_insn_exec_cb( + insn, vcpu_insn_exec_with_regs, + QEMU_PLUGIN_CB_R_REGS, + output); + } else { + qemu_plugin_register_vcpu_insn_exec_cb( + insn, vcpu_insn_exec, + QEMU_PLUGIN_CB_NO_REGS, + output); + } /* reset skip */ skip = (imatches || amatches); } + /* set regs for next */ + if (disas_assist && rmatches) { + check_regs_this = check_regs_next; + } + + g_free(insn_disas); } } +static Register *init_vcpu_register(qemu_plugin_reg_descriptor *desc) +{ + Register *reg = g_new0(Register, 1); + g_autofree gchar *lower = g_utf8_strdown(desc->name, -1); + int r; + + reg->handle = desc->handle; + reg->name = g_intern_string(lower); + reg->last = g_byte_array_new(); + reg->new = g_byte_array_new(); + + /* read the initial value */ + r = qemu_plugin_read_register(reg->handle, reg->last); + g_assert(r > 0); + return reg; +} + +/* + * g_pattern_match_string has been deprecated in Glib since 2.70 and + * will complain about it if you try to use it. Fortunately the + * signature of both functions is the same making it easy to work + * around. + */ +static inline +gboolean g_pattern_spec_match_string_qemu(GPatternSpec *pspec, + const gchar *string) +{ +#if GLIB_CHECK_VERSION(2, 70, 0) + return g_pattern_spec_match_string(pspec, string); +#else + return g_pattern_match_string(pspec, string); +#endif +}; +#define g_pattern_spec_match_string(p, s) g_pattern_spec_match_string_qemu(p, s) + +static GPtrArray *registers_init(int vcpu_index) +{ + g_autoptr(GPtrArray) registers = g_ptr_array_new(); + g_autoptr(GArray) reg_list = qemu_plugin_get_registers(); + + if (rmatches && reg_list->len) { + /* + * Go through each register in the complete list and + * see if we want to track it. + */ + for (int r = 0; r < reg_list->len; r++) { + qemu_plugin_reg_descriptor *rd = &g_array_index( + reg_list, qemu_plugin_reg_descriptor, r); + for (int p = 0; p < rmatches->len; p++) { + g_autoptr(GPatternSpec) pat = g_pattern_spec_new(rmatches->pdata[p]); + g_autofree gchar *rd_lower = g_utf8_strdown(rd->name, -1); + if (g_pattern_spec_match_string(pat, rd->name) || + g_pattern_spec_match_string(pat, rd_lower)) { + Register *reg = init_vcpu_register(rd); + g_ptr_array_add(registers, reg); + + /* we need a list of regnames at TB translation time */ + if (disas_assist) { + g_mutex_lock(&add_reg_name_lock); + if (!g_ptr_array_find(all_reg_names, reg->name, NULL)) { + g_ptr_array_add(all_reg_names, (gpointer)reg->name); + } + g_mutex_unlock(&add_reg_name_lock); + } + } + } + } + } + + return registers->len ? g_steal_pointer(®isters) : NULL; +} + +/* + * Initialise a new vcpu/thread with: + * - last_exec tracking data + * - list of tracked registers + * - initial value of registers + * + * As we could have multiple threads trying to do this we need to + * serialise the expansion under a lock. + */ +static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index) +{ + CPU *c; + + g_rw_lock_writer_lock(&expand_array_lock); + if (vcpu_index >= cpus->len) { + g_array_set_size(cpus, vcpu_index + 1); + } + g_rw_lock_writer_unlock(&expand_array_lock); + + c = get_cpu(vcpu_index); + c->last_exec = g_string_new(NULL); + c->registers = registers_init(vcpu_index); +} + /** * On plugin exit, print last instruction in cache */ static void plugin_exit(qemu_plugin_id_t id, void *p) { guint i; - GString *s; - for (i = 0; i < last_exec->len; i++) { - s = g_ptr_array_index(last_exec, i); - if (s->str) { - qemu_plugin_outs(s->str); + g_rw_lock_reader_lock(&expand_array_lock); + for (i = 0; i < cpus->len; i++) { + CPU *c = get_cpu(i); + if (c->last_exec && c->last_exec->str) { + qemu_plugin_outs(c->last_exec->str); qemu_plugin_outs("\n"); } } + g_rw_lock_reader_unlock(&expand_array_lock); } /* Add a match to the array of matches */ @@ -199,7 +413,7 @@ static void parse_insn_match(char *match) if (!imatches) { imatches = g_ptr_array_new(); } - g_ptr_array_add(imatches, match); + g_ptr_array_add(imatches, g_strdup(match)); } static void parse_vaddr_match(char *match) @@ -212,6 +426,18 @@ static void parse_vaddr_match(char *match) g_array_append_val(amatches, v); } +/* + * We have to wait until vCPUs are started before we can check the + * patterns find anything. + */ +static void add_regpat(char *regpat) +{ + if (!rmatches) { + rmatches = g_ptr_array_new(); + } + g_ptr_array_add(rmatches, g_strdup(regpat)); +} + /** * Install the plugin */ @@ -223,11 +449,8 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, * Initialize dynamic array to cache vCPU instruction. In user mode * we don't know the size before emulation. */ - if (info->system_emulation) { - last_exec = g_ptr_array_sized_new(info->system.max_vcpus); - } else { - last_exec = g_ptr_array_new(); - } + cpus = g_array_sized_new(true, true, sizeof(CPU), + info->system_emulation ? info->system.max_vcpus : 1); for (int i = 0; i < argc; i++) { char *opt = argv[i]; @@ -236,13 +459,22 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, parse_insn_match(tokens[1]); } else if (g_strcmp0(tokens[0], "afilter") == 0) { parse_vaddr_match(tokens[1]); + } else if (g_strcmp0(tokens[0], "reg") == 0) { + add_regpat(tokens[1]); + } else if (g_strcmp0(tokens[0], "rdisas") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &disas_assist)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + all_reg_names = g_ptr_array_new(); } else { fprintf(stderr, "option parsing failed: %s\n", opt); return -1; } } - /* Register translation block and exit callbacks */ + /* Register init, translation block and exit callbacks */ + qemu_plugin_register_vcpu_init_cb(id, vcpu_init); qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c index 4de1b134944..02bc5078bdd 100644 --- a/contrib/plugins/hotblocks.c +++ b/contrib/plugins/hotblocks.c @@ -34,8 +34,8 @@ static guint64 limit = 20; */ typedef struct { uint64_t start_addr; - uint64_t exec_count; - int trans_count; + struct qemu_plugin_scoreboard *exec_count; + int trans_count; unsigned long insns; } ExecCount; @@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b) { ExecCount *ea = (ExecCount *) a; ExecCount *eb = (ExecCount *) b; - return ea->exec_count > eb->exec_count ? -1 : 1; + uint64_t count_a = + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count)); + uint64_t count_b = + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count)); + return count_a > count_b ? -1 : 1; +} + +static void exec_count_free(gpointer key, gpointer value, gpointer user_data) +{ + ExecCount *cnt = value; + qemu_plugin_scoreboard_free(cnt->exec_count); } static void plugin_exit(qemu_plugin_id_t id, void *p) @@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) GList *counts, *it; int i; - g_mutex_lock(&lock); g_string_append_printf(report, "%d entries in the hash table\n", g_hash_table_size(hotblocks)); counts = g_hash_table_get_values(hotblocks); @@ -63,16 +72,21 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) for (i = 0; i < limit && it->next; i++, it = it->next) { ExecCount *rec = (ExecCount *) it->data; - g_string_append_printf(report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", - rec->start_addr, rec->trans_count, - rec->insns, rec->exec_count); + g_string_append_printf( + report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", + rec->start_addr, rec->trans_count, + rec->insns, + qemu_plugin_u64_sum( + qemu_plugin_scoreboard_u64(rec->exec_count))); } g_list_free(it); } - g_mutex_unlock(&lock); qemu_plugin_outs(report->str); + + g_hash_table_foreach(hotblocks, exec_count_free, NULL); + g_hash_table_destroy(hotblocks); } static void plugin_init(void) @@ -82,15 +96,9 @@ static void plugin_init(void) static void vcpu_tb_exec(unsigned int cpu_index, void *udata) { - ExecCount *cnt; - uint64_t hash = (uint64_t) udata; - - g_mutex_lock(&lock); - cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash); - /* should always succeed */ - g_assert(cnt); - cnt->exec_count++; - g_mutex_unlock(&lock); + ExecCount *cnt = (ExecCount *)udata; + qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(cnt->exec_count), + cpu_index, 1); } /* @@ -114,18 +122,20 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) cnt->start_addr = pc; cnt->trans_count = 1; cnt->insns = insns; + cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t)); g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt); } g_mutex_unlock(&lock); if (do_inline) { - qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64, - &cnt->exec_count, 1); + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_ADD_U64, + qemu_plugin_scoreboard_u64(cnt->exec_count), 1); } else { qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, QEMU_PLUGIN_CB_NO_REGS, - (void *)hash); + (void *)cnt); } } diff --git a/contrib/plugins/howvec.c b/contrib/plugins/howvec.c index 644a7856bb2..94bbc53820a 100644 --- a/contrib/plugins/howvec.c +++ b/contrib/plugins/howvec.c @@ -43,13 +43,13 @@ typedef struct { uint32_t mask; uint32_t pattern; CountType what; - uint64_t count; + qemu_plugin_u64 count; } InsnClassExecCount; typedef struct { char *insn; uint32_t opcode; - uint64_t count; + qemu_plugin_u64 count; InsnClassExecCount *class; } InsnExecCount; @@ -159,12 +159,15 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b) { InsnExecCount *ea = (InsnExecCount *) a; InsnExecCount *eb = (InsnExecCount *) b; - return ea->count > eb->count ? -1 : 1; + uint64_t count_a = qemu_plugin_u64_sum(ea->count); + uint64_t count_b = qemu_plugin_u64_sum(eb->count); + return count_a > count_b ? -1 : 1; } static void free_record(gpointer data) { InsnExecCount *rec = (InsnExecCount *) data; + qemu_plugin_scoreboard_free(rec->count.score); g_free(rec->insn); g_free(rec); } @@ -173,6 +176,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) { g_autoptr(GString) report = g_string_new("Instruction Classes:\n"); int i; + uint64_t total_count; GList *counts; InsnClassExecCount *class = NULL; @@ -180,11 +184,12 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) class = &class_table[i]; switch (class->what) { case COUNT_CLASS: - if (class->count || verbose) { + total_count = qemu_plugin_u64_sum(class->count); + if (total_count || verbose) { g_string_append_printf(report, "Class: %-24s\t(%" PRId64 " hits)\n", class->class, - class->count); + total_count); } break; case COUNT_INDIVIDUAL: @@ -212,7 +217,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) "Instr: %-24s\t(%" PRId64 " hits)" "\t(op=0x%08x/%s)\n", rec->insn, - rec->count, + qemu_plugin_u64_sum(rec->count), rec->opcode, rec->class ? rec->class->class : "un-categorised"); @@ -221,6 +226,12 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) } g_hash_table_destroy(insns); + for (i = 0; i < ARRAY_SIZE(class_tables); i++) { + for (int j = 0; j < class_tables[i].table_sz; ++j) { + qemu_plugin_scoreboard_free(class_tables[i].table[j].count.score); + } + } + qemu_plugin_outs(report->str); } @@ -232,11 +243,12 @@ static void plugin_init(void) static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata) { - uint64_t *count = (uint64_t *) udata; - (*count)++; + struct qemu_plugin_scoreboard *score = udata; + qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(score), cpu_index, 1); } -static uint64_t *find_counter(struct qemu_plugin_insn *insn) +static struct qemu_plugin_scoreboard *find_counter( + struct qemu_plugin_insn *insn) { int i; uint64_t *cnt = NULL; @@ -265,7 +277,7 @@ static uint64_t *find_counter(struct qemu_plugin_insn *insn) case COUNT_NONE: return NULL; case COUNT_CLASS: - return &class->count; + return class->count.score; case COUNT_INDIVIDUAL: { InsnExecCount *icount; @@ -279,13 +291,16 @@ static uint64_t *find_counter(struct qemu_plugin_insn *insn) icount->opcode = opcode; icount->insn = qemu_plugin_insn_disas(insn); icount->class = class; + struct qemu_plugin_scoreboard *score = + qemu_plugin_scoreboard_new(sizeof(uint64_t)); + icount->count = qemu_plugin_scoreboard_u64(score); g_hash_table_insert(insns, GUINT_TO_POINTER(opcode), (gpointer) icount); } g_mutex_unlock(&lock); - return &icount->count; + return icount->count.score; } default: g_assert_not_reached(); @@ -300,14 +315,14 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) size_t i; for (i = 0; i < n; i++) { - uint64_t *cnt; struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); - cnt = find_counter(insn); + struct qemu_plugin_scoreboard *cnt = find_counter(insn); if (cnt) { if (do_inline) { - qemu_plugin_register_vcpu_insn_exec_inline( - insn, QEMU_PLUGIN_INLINE_ADD_U64, cnt, 1); + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + insn, QEMU_PLUGIN_INLINE_ADD_U64, + qemu_plugin_scoreboard_u64(cnt), 1); } else { qemu_plugin_register_vcpu_insn_exec_cb( insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS, cnt); @@ -322,6 +337,14 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, { int i; + for (i = 0; i < ARRAY_SIZE(class_tables); i++) { + for (int j = 0; j < class_tables[i].table_sz; ++j) { + struct qemu_plugin_scoreboard *score = + qemu_plugin_scoreboard_new(sizeof(uint64_t)); + class_tables[i].table[j].count = qemu_plugin_scoreboard_u64(score); + } + } + /* Select a class table appropriate to the guest architecture */ for (i = 0; i < ARRAY_SIZE(class_tables); i++) { ClassSelector *entry = &class_tables[i]; diff --git a/contrib/vhost-user-blk/meson.build b/contrib/vhost-user-blk/meson.build index dcb9e2ffcd0..ac1eece37a4 100644 --- a/contrib/vhost-user-blk/meson.build +++ b/contrib/vhost-user-blk/meson.build @@ -1,4 +1,4 @@ executable('vhost-user-blk', files('vhost-user-blk.c'), dependencies: [qemuutil, vhost_user], - build_by_default: targetos == 'linux', + build_by_default: host_os == 'linux', install: false) diff --git a/contrib/vhost-user-input/meson.build b/contrib/vhost-user-input/meson.build index 21a9ed4f15e..840d866594b 100644 --- a/contrib/vhost-user-input/meson.build +++ b/contrib/vhost-user-input/meson.build @@ -1,4 +1,4 @@ executable('vhost-user-input', files('main.c'), dependencies: [qemuutil, vhost_user], - build_by_default: targetos == 'linux', + build_by_default: host_os == 'linux', install: false) diff --git a/contrib/vhost-user-scsi/meson.build b/contrib/vhost-user-scsi/meson.build index cc893f6f203..44be04853e4 100644 --- a/contrib/vhost-user-scsi/meson.build +++ b/contrib/vhost-user-scsi/meson.build @@ -1,6 +1,6 @@ if libiscsi.found() executable('vhost-user-scsi', files('vhost-user-scsi.c'), dependencies: [qemuutil, libiscsi, vhost_user], - build_by_default: targetos == 'linux', + build_by_default: host_os == 'linux', install: false) endif diff --git a/cpu-common.c b/cpu-common.c index c81fd72d16d..ce78273af59 100644 --- a/cpu-common.c +++ b/cpu-common.c @@ -351,11 +351,11 @@ void process_queued_cpu_work(CPUState *cpu) * BQL, so it goes to sleep; start_exclusive() is sleeping too, so * neither CPU can proceed. */ - qemu_mutex_unlock_iothread(); + bql_unlock(); start_exclusive(); wi->func(cpu, wi->data); end_exclusive(); - qemu_mutex_lock_iothread(); + bql_lock(); } else { wi->func(cpu, wi->data); } diff --git a/cpu-target.c b/cpu-target.c index 508013e23d2..4c0621bf33b 100644 --- a/cpu-target.c +++ b/cpu-target.c @@ -24,12 +24,14 @@ #include "hw/qdev-core.h" #include "hw/qdev-properties.h" #include "qemu/error-report.h" +#include "qemu/qemu-print.h" #include "migration/vmstate.h" #ifdef CONFIG_USER_ONLY #include "qemu.h" #else #include "hw/core/sysemu-cpu-ops.h" #include "exec/address-spaces.h" +#include "exec/memory.h" #endif #include "sysemu/cpus.h" #include "sysemu/tcg.h" @@ -43,9 +45,6 @@ #include "trace/trace-root.h" #include "qemu/accel.h" -uintptr_t qemu_host_page_size; -intptr_t qemu_host_page_mask; - #ifndef CONFIG_USER_ONLY static int cpu_common_post_load(void *opaque, int version_id) { @@ -87,7 +86,7 @@ static const VMStateDescription vmstate_cpu_common_exception_index = { .version_id = 1, .minimum_version_id = 1, .needed = cpu_common_exception_index_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(exception_index, CPUState), VMSTATE_END_OF_LIST() } @@ -105,7 +104,7 @@ static const VMStateDescription vmstate_cpu_common_crash_occurred = { .version_id = 1, .minimum_version_id = 1, .needed = cpu_common_crash_occurred_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(crash_occurred, CPUState), VMSTATE_END_OF_LIST() } @@ -117,12 +116,12 @@ const VMStateDescription vmstate_cpu_common = { .minimum_version_id = 1, .pre_load = cpu_common_pre_load, .post_load = cpu_common_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(halted, CPUState), VMSTATE_UINT32(interrupt_request, CPUState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_cpu_common_exception_index, &vmstate_cpu_common_crash_occurred, NULL @@ -203,6 +202,7 @@ static Property cpu_common_props[] = { DEFINE_PROP_END_OF_LIST(), }; +#ifndef CONFIG_USER_ONLY static bool cpu_get_start_powered_off(Object *obj, Error **errp) { CPUState *cpu = CPU(obj); @@ -214,12 +214,13 @@ static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp) CPUState *cpu = CPU(obj); cpu->start_powered_off = value; } +#endif void cpu_class_init_props(DeviceClass *dc) { +#ifndef CONFIG_USER_ONLY ObjectClass *oc = OBJECT_CLASS(dc); - device_class_set_props(dc, cpu_common_props); /* * We can't use DEFINE_PROP_BOOL in the Property array for this * property, because we want this to be settable after realize. @@ -227,6 +228,9 @@ void cpu_class_init_props(DeviceClass *dc) object_class_property_add_bool(oc, "start-powered-off", cpu_get_start_powered_off, cpu_set_start_powered_off); +#endif + + device_class_set_props(dc, cpu_common_props); } void cpu_exec_initfn(CPUState *cpu) @@ -241,6 +245,21 @@ void cpu_exec_initfn(CPUState *cpu) #endif } +char *cpu_model_from_type(const char *typename) +{ + const char *suffix = "-" CPU_RESOLVING_TYPE; + + if (!object_class_by_name(typename)) { + return NULL; + } + + if (g_str_has_suffix(typename, suffix)) { + return g_strndup(typename, strlen(typename) - strlen(suffix)); + } + + return g_strdup(typename); +} + const char *parse_cpu_option(const char *cpu_option) { ObjectClass *oc; @@ -268,43 +287,36 @@ const char *parse_cpu_option(const char *cpu_option) return cpu_type; } -void list_cpus(void) +#ifndef cpu_list +static void cpu_list_entry(gpointer data, gpointer user_data) { - /* XXX: implement xxx_cpu_list for targets that still miss it */ -#if defined(cpu_list) - cpu_list(); -#endif + CPUClass *cc = CPU_CLASS(OBJECT_CLASS(data)); + const char *typename = object_class_get_name(OBJECT_CLASS(data)); + g_autofree char *model = cpu_model_from_type(typename); + + if (cc->deprecation_note) { + qemu_printf(" %s (deprecated)\n", model); + } else { + qemu_printf(" %s\n", model); + } } -#if defined(CONFIG_USER_ONLY) -void tb_invalidate_phys_addr(hwaddr addr) +static void cpu_list(void) { - mmap_lock(); - tb_invalidate_phys_page(addr); - mmap_unlock(); -} -#else -void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) -{ - ram_addr_t ram_addr; - MemoryRegion *mr; - hwaddr l = 1; - - if (!tcg_enabled()) { - return; - } + GSList *list; - RCU_READ_LOCK_GUARD(); - mr = address_space_translate(as, addr, &addr, &l, false, attrs); - if (!(memory_region_is_ram(mr) - || memory_region_is_romd(mr))) { - return; - } - ram_addr = memory_region_get_ram_addr(mr) + addr; - tb_invalidate_phys_page(ram_addr); + list = object_class_get_list_sorted(TYPE_CPU, false); + qemu_printf("Available CPUs:\n"); + g_slist_foreach(list, cpu_list_entry, NULL); + g_slist_free(list); } #endif +void list_cpus(void) +{ + cpu_list(); +} + /* enable or disable single step mode. EXCP_DEBUG is returned by the CPU loop after each instruction */ void cpu_single_step(CPUState *cpu, int enabled) @@ -368,6 +380,9 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, vaddr l, page; void * p; uint8_t *buf = ptr; + ssize_t written; + int ret = -1; + int fd = -1; while (len > 0) { page = addr & TARGET_PAGE_MASK; @@ -375,30 +390,75 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, if (l > len) l = len; flags = page_get_flags(page); - if (!(flags & PAGE_VALID)) - return -1; + if (!(flags & PAGE_VALID)) { + goto out_close; + } if (is_write) { - if (!(flags & PAGE_WRITE)) - return -1; + if (flags & PAGE_WRITE) { + /* XXX: this code should not depend on lock_user */ + p = lock_user(VERIFY_WRITE, addr, l, 0); + if (!p) { + goto out_close; + } + memcpy(p, buf, l); + unlock_user(p, addr, l); + } else { + /* Bypass the host page protection using ptrace. */ + if (fd == -1) { + fd = open("/proc/self/mem", O_WRONLY); + if (fd == -1) { + goto out; + } + } + /* + * If there is a TranslationBlock and we weren't bypassing the + * host page protection, the memcpy() above would SEGV, + * ultimately leading to page_unprotect(). So invalidate the + * translations manually. Both invalidation and pwrite() must + * be under mmap_lock() in order to prevent the creation of + * another TranslationBlock in between. + */ + mmap_lock(); + tb_invalidate_phys_range(addr, addr + l - 1); + written = pwrite(fd, buf, l, + (off_t)(uintptr_t)g2h_untagged(addr)); + mmap_unlock(); + if (written != l) { + goto out_close; + } + } + } else if (flags & PAGE_READ) { /* XXX: this code should not depend on lock_user */ - if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) - return -1; - memcpy(p, buf, l); - unlock_user(p, addr, l); - } else { - if (!(flags & PAGE_READ)) - return -1; - /* XXX: this code should not depend on lock_user */ - if (!(p = lock_user(VERIFY_READ, addr, l, 1))) - return -1; + p = lock_user(VERIFY_READ, addr, l, 1); + if (!p) { + goto out_close; + } memcpy(buf, p, l); unlock_user(p, addr, 0); + } else { + /* Bypass the host page protection using ptrace. */ + if (fd == -1) { + fd = open("/proc/self/mem", O_RDONLY); + if (fd == -1) { + goto out; + } + } + if (pread(fd, buf, l, + (off_t)(uintptr_t)g2h_untagged(addr)) != l) { + goto out_close; + } } len -= l; buf += l; addr += l; } - return 0; + ret = 0; +out_close: + if (fd != -1) { + close(fd); + } +out: + return ret; } #endif @@ -411,16 +471,3 @@ const char *target_name(void) { return TARGET_NAME; } - -void page_size_init(void) -{ - /* NOTE: we can always suppose that qemu_host_page_size >= - TARGET_PAGE_SIZE */ - if (qemu_host_page_size == 0) { - qemu_host_page_size = qemu_real_host_page_size(); - } - if (qemu_host_page_size < TARGET_PAGE_SIZE) { - qemu_host_page_size = TARGET_PAGE_SIZE; - } - qemu_host_page_mask = -(intptr_t)qemu_host_page_size; -} diff --git a/crypto/block-luks.c b/crypto/block-luks.c index fb01ec38bbf..3ee928fb5ad 100644 --- a/crypto/block-luks.c +++ b/crypto/block-luks.c @@ -95,12 +95,23 @@ qcrypto_block_luks_cipher_size_map_twofish[] = { { 0, 0 }, }; +#ifdef CONFIG_CRYPTO_SM4 +static const QCryptoBlockLUKSCipherSizeMap +qcrypto_block_luks_cipher_size_map_sm4[] = { + { 16, QCRYPTO_CIPHER_ALG_SM4}, + { 0, 0 }, +}; +#endif + static const QCryptoBlockLUKSCipherNameMap qcrypto_block_luks_cipher_name_map[] = { { "aes", qcrypto_block_luks_cipher_size_map_aes }, { "cast5", qcrypto_block_luks_cipher_size_map_cast5 }, { "serpent", qcrypto_block_luks_cipher_size_map_serpent }, { "twofish", qcrypto_block_luks_cipher_size_map_twofish }, +#ifdef CONFIG_CRYPTO_SM4 + { "sm4", qcrypto_block_luks_cipher_size_map_sm4}, +#endif }; QEMU_BUILD_BUG_ON(sizeof(struct QCryptoBlockLUKSKeySlot) != 48); @@ -457,12 +468,15 @@ qcrypto_block_luks_load_header(QCryptoBlock *block, * Does basic sanity checks on the LUKS header */ static int -qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp) +qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, + unsigned int flags, + Error **errp) { size_t i, j; unsigned int header_sectors = QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; + bool detached = flags & QCRYPTO_BLOCK_OPEN_DETACHED; if (memcmp(luks->header.magic, qcrypto_block_luks_magic, QCRYPTO_BLOCK_LUKS_MAGIC_LEN) != 0) { @@ -494,7 +508,7 @@ qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp) return -1; } - if (luks->header.payload_offset_sector < + if (!detached && luks->header.payload_offset_sector < DIV_ROUND_UP(QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) { error_setg(errp, "LUKS payload is overlapping with the header"); @@ -543,7 +557,7 @@ qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp) return -1; } - if (start1 + len1 > luks->header.payload_offset_sector) { + if (!detached && start1 + len1 > luks->header.payload_offset_sector) { error_setg(errp, "Keyslot %zu is overlapping with the encrypted payload", i); @@ -1203,7 +1217,7 @@ qcrypto_block_luks_open(QCryptoBlock *block, goto fail; } - if (qcrypto_block_luks_check_header(luks, errp) < 0) { + if (qcrypto_block_luks_check_header(luks, flags, errp) < 0) { goto fail; } @@ -1257,6 +1271,7 @@ qcrypto_block_luks_open(QCryptoBlock *block, block->sector_size = QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; block->payload_offset = luks->header.payload_offset_sector * block->sector_size; + block->detached_header = (block->payload_offset == 0) ? true : false; return 0; @@ -1301,6 +1316,7 @@ qcrypto_block_luks_create(QCryptoBlock *block, const char *hash_alg; g_autofree char *cipher_mode_spec = NULL; uint64_t iters; + uint64_t detached_header_size; memcpy(&luks_opts, &options->u.luks, sizeof(luks_opts)); if (!luks_opts.has_iter_time) { @@ -1529,19 +1545,32 @@ qcrypto_block_luks_create(QCryptoBlock *block, slot->stripes = QCRYPTO_BLOCK_LUKS_STRIPES; } - /* The total size of the LUKS headers is the partition header + key - * slot headers, rounded up to the nearest sector, combined with - * the size of each master key material region, also rounded up - * to the nearest sector */ - luks->header.payload_offset_sector = header_sectors + - QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS * split_key_sectors; + if (block->detached_header) { + /* + * For a detached LUKS header image, set the payload_offset_sector + * to 0 to specify the starting point for read/write + */ + luks->header.payload_offset_sector = 0; + } else { + /* + * The total size of the LUKS headers is the partition header + key + * slot headers, rounded up to the nearest sector, combined with + * the size of each master key material region, also rounded up + * to the nearest sector + */ + luks->header.payload_offset_sector = header_sectors + + QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS * split_key_sectors; + } block->sector_size = QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; block->payload_offset = luks->header.payload_offset_sector * block->sector_size; + detached_header_size = + (header_sectors + QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS * + split_key_sectors) * block->sector_size; /* Reserve header space to match payload offset */ - initfunc(block, block->payload_offset, opaque, &local_err); + initfunc(block, detached_header_size, opaque, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; @@ -1867,6 +1896,7 @@ static int qcrypto_block_luks_get_info(QCryptoBlock *block, info->u.luks.master_key_iters = luks->header.master_key_iterations; info->u.luks.uuid = g_strndup((const char *)luks->header.uuid, sizeof(luks->header.uuid)); + info->u.luks.detached_header = block->detached_header; for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { slot = g_new0(QCryptoBlockInfoLUKSSlot, 1); diff --git a/crypto/block.c b/crypto/block.c index 7bb4b74a37c..506ea1d1a31 100644 --- a/crypto/block.c +++ b/crypto/block.c @@ -87,6 +87,7 @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options, QCryptoBlockInitFunc initfunc, QCryptoBlockWriteFunc writefunc, void *opaque, + unsigned int flags, Error **errp) { QCryptoBlock *block = g_new0(QCryptoBlock, 1); @@ -102,6 +103,7 @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options, } block->driver = qcrypto_block_drivers[options->format]; + block->detached_header = flags & QCRYPTO_BLOCK_CREATE_DETACHED; if (block->driver->create(block, options, optprefix, initfunc, writefunc, opaque, errp) < 0) { @@ -146,7 +148,7 @@ qcrypto_block_calculate_payload_offset(QCryptoBlockCreateOptions *create_opts, qcrypto_block_create(create_opts, optprefix, qcrypto_block_headerlen_hdr_init_func, qcrypto_block_headerlen_hdr_write_func, - len, errp); + len, 0, errp); return crypto != NULL; } diff --git a/crypto/blockpriv.h b/crypto/blockpriv.h index 3c7ccea5040..836f3b47266 100644 --- a/crypto/blockpriv.h +++ b/crypto/blockpriv.h @@ -42,6 +42,8 @@ struct QCryptoBlock { size_t niv; uint64_t payload_offset; /* In bytes */ uint64_t sector_size; /* In bytes */ + + bool detached_header; /* True if disk has a detached LUKS header */ }; struct QCryptoBlockDriver { diff --git a/crypto/cipher-gcrypt.c.inc b/crypto/cipher-gcrypt.c.inc index a6a0117717f..4a8314746db 100644 --- a/crypto/cipher-gcrypt.c.inc +++ b/crypto/cipher-gcrypt.c.inc @@ -20,6 +20,56 @@ #include +static int qcrypto_cipher_alg_to_gcry_alg(QCryptoCipherAlgorithm alg) +{ + switch (alg) { + case QCRYPTO_CIPHER_ALG_DES: + return GCRY_CIPHER_DES; + case QCRYPTO_CIPHER_ALG_3DES: + return GCRY_CIPHER_3DES; + case QCRYPTO_CIPHER_ALG_AES_128: + return GCRY_CIPHER_AES128; + case QCRYPTO_CIPHER_ALG_AES_192: + return GCRY_CIPHER_AES192; + case QCRYPTO_CIPHER_ALG_AES_256: + return GCRY_CIPHER_AES256; + case QCRYPTO_CIPHER_ALG_CAST5_128: + return GCRY_CIPHER_CAST5; + case QCRYPTO_CIPHER_ALG_SERPENT_128: + return GCRY_CIPHER_SERPENT128; + case QCRYPTO_CIPHER_ALG_SERPENT_192: + return GCRY_CIPHER_SERPENT192; + case QCRYPTO_CIPHER_ALG_SERPENT_256: + return GCRY_CIPHER_SERPENT256; + case QCRYPTO_CIPHER_ALG_TWOFISH_128: + return GCRY_CIPHER_TWOFISH128; + case QCRYPTO_CIPHER_ALG_TWOFISH_256: + return GCRY_CIPHER_TWOFISH; +#ifdef CONFIG_CRYPTO_SM4 + case QCRYPTO_CIPHER_ALG_SM4: + return GCRY_CIPHER_SM4; +#endif + default: + return GCRY_CIPHER_NONE; + } +} + +static int qcrypto_cipher_mode_to_gcry_mode(QCryptoCipherMode mode) +{ + switch (mode) { + case QCRYPTO_CIPHER_MODE_ECB: + return GCRY_CIPHER_MODE_ECB; + case QCRYPTO_CIPHER_MODE_XTS: + return GCRY_CIPHER_MODE_XTS; + case QCRYPTO_CIPHER_MODE_CBC: + return GCRY_CIPHER_MODE_CBC; + case QCRYPTO_CIPHER_MODE_CTR: + return GCRY_CIPHER_MODE_CTR; + default: + return GCRY_CIPHER_MODE_NONE; + } +} + bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, QCryptoCipherMode mode) { @@ -35,11 +85,19 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, case QCRYPTO_CIPHER_ALG_SERPENT_256: case QCRYPTO_CIPHER_ALG_TWOFISH_128: case QCRYPTO_CIPHER_ALG_TWOFISH_256: +#ifdef CONFIG_CRYPTO_SM4 + case QCRYPTO_CIPHER_ALG_SM4: +#endif break; default: return false; } + if (gcry_cipher_algo_info(qcrypto_cipher_alg_to_gcry_alg(alg), + GCRYCTL_TEST_ALGO, NULL, NULL) != 0) { + return false; + } + switch (mode) { case QCRYPTO_CIPHER_MODE_ECB: case QCRYPTO_CIPHER_MODE_CBC: @@ -185,67 +243,26 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return NULL; } - switch (alg) { - case QCRYPTO_CIPHER_ALG_DES: - gcryalg = GCRY_CIPHER_DES; - break; - case QCRYPTO_CIPHER_ALG_3DES: - gcryalg = GCRY_CIPHER_3DES; - break; - case QCRYPTO_CIPHER_ALG_AES_128: - gcryalg = GCRY_CIPHER_AES128; - break; - case QCRYPTO_CIPHER_ALG_AES_192: - gcryalg = GCRY_CIPHER_AES192; - break; - case QCRYPTO_CIPHER_ALG_AES_256: - gcryalg = GCRY_CIPHER_AES256; - break; - case QCRYPTO_CIPHER_ALG_CAST5_128: - gcryalg = GCRY_CIPHER_CAST5; - break; - case QCRYPTO_CIPHER_ALG_SERPENT_128: - gcryalg = GCRY_CIPHER_SERPENT128; - break; - case QCRYPTO_CIPHER_ALG_SERPENT_192: - gcryalg = GCRY_CIPHER_SERPENT192; - break; - case QCRYPTO_CIPHER_ALG_SERPENT_256: - gcryalg = GCRY_CIPHER_SERPENT256; - break; - case QCRYPTO_CIPHER_ALG_TWOFISH_128: - gcryalg = GCRY_CIPHER_TWOFISH128; - break; - case QCRYPTO_CIPHER_ALG_TWOFISH_256: - gcryalg = GCRY_CIPHER_TWOFISH; - break; - default: + gcryalg = qcrypto_cipher_alg_to_gcry_alg(alg); + if (gcryalg == GCRY_CIPHER_NONE) { error_setg(errp, "Unsupported cipher algorithm %s", QCryptoCipherAlgorithm_str(alg)); return NULL; } - drv = &qcrypto_gcrypt_driver; - switch (mode) { - case QCRYPTO_CIPHER_MODE_ECB: - gcrymode = GCRY_CIPHER_MODE_ECB; - break; - case QCRYPTO_CIPHER_MODE_XTS: - gcrymode = GCRY_CIPHER_MODE_XTS; - break; - case QCRYPTO_CIPHER_MODE_CBC: - gcrymode = GCRY_CIPHER_MODE_CBC; - break; - case QCRYPTO_CIPHER_MODE_CTR: - drv = &qcrypto_gcrypt_ctr_driver; - gcrymode = GCRY_CIPHER_MODE_CTR; - break; - default: + gcrymode = qcrypto_cipher_mode_to_gcry_mode(mode); + if (gcrymode == GCRY_CIPHER_MODE_NONE) { error_setg(errp, "Unsupported cipher mode %s", QCryptoCipherMode_str(mode)); return NULL; } + if (mode == QCRYPTO_CIPHER_MODE_CTR) { + drv = &qcrypto_gcrypt_ctr_driver; + } else { + drv = &qcrypto_gcrypt_driver; + } + ctx = g_new0(QCryptoCipherGcrypt, 1); ctx->base.driver = drv; diff --git a/crypto/cipher-nettle.c.inc b/crypto/cipher-nettle.c.inc index 24cc61f87bf..42b39e18a23 100644 --- a/crypto/cipher-nettle.c.inc +++ b/crypto/cipher-nettle.c.inc @@ -33,6 +33,9 @@ #ifndef CONFIG_QEMU_PRIVATE_XTS #include #endif +#ifdef CONFIG_CRYPTO_SM4 +#include +#endif static inline bool qcrypto_length_check(size_t len, size_t blocksize, Error **errp) @@ -426,6 +429,30 @@ DEFINE_ECB_CBC_CTR_XTS(qcrypto_nettle_twofish, QCryptoNettleTwofish, TWOFISH_BLOCK_SIZE, twofish_encrypt_native, twofish_decrypt_native) +#ifdef CONFIG_CRYPTO_SM4 +typedef struct QCryptoNettleSm4 { + QCryptoCipher base; + struct sm4_ctx key[2]; +} QCryptoNettleSm4; + +static void sm4_encrypt_native(void *ctx, size_t length, + uint8_t *dst, const uint8_t *src) +{ + struct sm4_ctx *keys = ctx; + sm4_crypt(&keys[0], length, dst, src); +} + +static void sm4_decrypt_native(void *ctx, size_t length, + uint8_t *dst, const uint8_t *src) +{ + struct sm4_ctx *keys = ctx; + sm4_crypt(&keys[1], length, dst, src); +} + +DEFINE_ECB(qcrypto_nettle_sm4, + QCryptoNettleSm4, SM4_BLOCK_SIZE, + sm4_encrypt_native, sm4_decrypt_native) +#endif bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, QCryptoCipherMode mode) @@ -443,6 +470,9 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, case QCRYPTO_CIPHER_ALG_TWOFISH_128: case QCRYPTO_CIPHER_ALG_TWOFISH_192: case QCRYPTO_CIPHER_ALG_TWOFISH_256: +#ifdef CONFIG_CRYPTO_SM4 + case QCRYPTO_CIPHER_ALG_SM4: +#endif break; default: return false; @@ -701,6 +731,25 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return &ctx->base; } +#ifdef CONFIG_CRYPTO_SM4 + case QCRYPTO_CIPHER_ALG_SM4: + { + QCryptoNettleSm4 *ctx = g_new0(QCryptoNettleSm4, 1); + + switch (mode) { + case QCRYPTO_CIPHER_MODE_ECB: + ctx->base.driver = &qcrypto_nettle_sm4_driver_ecb; + break; + default: + goto bad_cipher_mode; + } + + sm4_set_encrypt_key(&ctx->key[0], key); + sm4_set_decrypt_key(&ctx->key[1], key); + + return &ctx->base; + } +#endif default: error_setg(errp, "Unsupported cipher algorithm %s", diff --git a/crypto/cipher.c b/crypto/cipher.c index 74b09a5b261..5f512768ea3 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -38,6 +38,9 @@ static const size_t alg_key_len[QCRYPTO_CIPHER_ALG__MAX] = { [QCRYPTO_CIPHER_ALG_TWOFISH_128] = 16, [QCRYPTO_CIPHER_ALG_TWOFISH_192] = 24, [QCRYPTO_CIPHER_ALG_TWOFISH_256] = 32, +#ifdef CONFIG_CRYPTO_SM4 + [QCRYPTO_CIPHER_ALG_SM4] = 16, +#endif }; static const size_t alg_block_len[QCRYPTO_CIPHER_ALG__MAX] = { @@ -53,6 +56,9 @@ static const size_t alg_block_len[QCRYPTO_CIPHER_ALG__MAX] = { [QCRYPTO_CIPHER_ALG_TWOFISH_128] = 16, [QCRYPTO_CIPHER_ALG_TWOFISH_192] = 16, [QCRYPTO_CIPHER_ALG_TWOFISH_256] = 16, +#ifdef CONFIG_CRYPTO_SM4 + [QCRYPTO_CIPHER_ALG_SM4] = 16, +#endif }; static const bool mode_need_iv[QCRYPTO_CIPHER_MODE__MAX] = { diff --git a/disas/disas-mon.c b/disas/disas-mon.c index 48ac492c6ca..5d6d9aa02d0 100644 --- a/disas/disas-mon.c +++ b/disas/disas-mon.c @@ -34,6 +34,7 @@ void monitor_disas(Monitor *mon, CPUState *cpu, uint64_t pc, disas_initialize_debug_target(&s, cpu); s.info.fprintf_func = disas_gstring_printf; s.info.stream = (FILE *)ds; /* abuse this slot */ + s.info.show_opcodes = true; if (is_physical) { s.info.read_memory_func = physical_read_memory; diff --git a/disas/disas.c b/disas/disas.c index 0d2d06c2ecc..7e3b0bb46c5 100644 --- a/disas/disas.c +++ b/disas/disas.c @@ -211,6 +211,7 @@ void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size) s.info.stream = out; s.info.buffer_vma = code; s.info.buffer_length = size; + s.info.show_opcodes = true; if (s.info.cap_arch >= 0 && cap_disas_target(&s.info, code, size)) { return; @@ -299,6 +300,7 @@ void disas(FILE *out, const void *code, size_t size) s.info.buffer = code; s.info.buffer_vma = (uintptr_t)code; s.info.buffer_length = size; + s.info.show_opcodes = true; if (s.info.cap_arch >= 0 && cap_disas_host(&s.info, code, size)) { return; diff --git a/disas/hppa.c b/disas/hppa.c index cce4f4aa374..49e2231ae62 100644 --- a/disas/hppa.c +++ b/disas/hppa.c @@ -1609,6 +1609,10 @@ static const struct pa_opcode pa_opcodes[] = { "call", 0xe800a000, 0xffe0e000, "nW", pa10, FLAG_STRICT}, { "ret", 0xe840d000, 0xfffffffd, "n", pa20, FLAG_STRICT}, +/* Opcodes assigned to QEMU, used by SeaBIOS firmware and Linux kernel */ +{ "HALT QEMU", 0xfffdead0, 0xfffffffd, "n", pa10, FLAG_STRICT}, +{ "RESET QEMU", 0xfffdead1, 0xfffffffd, "n", pa10, FLAG_STRICT}, +{ "RESTORE SHR",0xfffdead2, 0xfffffffd, "n", pa10, FLAG_STRICT}, }; #define NUMOPCODES ((sizeof pa_opcodes)/(sizeof pa_opcodes[0])) @@ -1968,9 +1972,11 @@ print_insn_hppa (bfd_vma memaddr, disassemble_info *info) insn = bfd_getb32 (buffer); - info->fprintf_func(info->stream, " %02x %02x %02x %02x ", - (insn >> 24) & 0xff, (insn >> 16) & 0xff, - (insn >> 8) & 0xff, insn & 0xff); + if (info->show_opcodes) { + info->fprintf_func(info->stream, " %02x %02x %02x %02x ", + (insn >> 24) & 0xff, (insn >> 16) & 0xff, + (insn >> 8) & 0xff, insn & 0xff); + } for (i = 0; i < NUMOPCODES; ++i) { diff --git a/disas/nanomips.c b/disas/nanomips.c index a0253598dd6..db0c297b8dc 100644 --- a/disas/nanomips.c +++ b/disas/nanomips.c @@ -36,35 +36,6 @@ typedef uint32_t uint32; typedef uint16_t uint16; typedef uint64_t img_address; -typedef enum { - instruction, - call_instruction, - branch_instruction, - return_instruction, - reserved_block, - pool, -} TABLE_ENTRY_TYPE; - -typedef enum { - MIPS64_ = 0x00000001, - XNP_ = 0x00000002, - XMMS_ = 0x00000004, - EVA_ = 0x00000008, - DSP_ = 0x00000010, - MT_ = 0x00000020, - EJTAG_ = 0x00000040, - TLBINV_ = 0x00000080, - CP0_ = 0x00000100, - CP1_ = 0x00000200, - CP2_ = 0x00000400, - UDI_ = 0x00000800, - MCU_ = 0x00001000, - VZ_ = 0x00002000, - TLB_ = 0x00004000, - MVH_ = 0x00008000, - ALL_ATTRIBUTES = 0xffffffffull, -} TABLE_ATTRIBUTE_TYPE; - typedef struct Dis_info { img_address m_pc; fprintf_function fprintf_func; @@ -72,22 +43,6 @@ typedef struct Dis_info { sigjmp_buf buf; } Dis_info; -typedef bool (*conditional_function)(uint64 instruction); -typedef char * (*disassembly_function)(uint64 instruction, - Dis_info *info); - -typedef struct Pool { - TABLE_ENTRY_TYPE type; - const struct Pool *next_table; - int next_table_size; - int instructions_size; - uint64 mask; - uint64 value; - disassembly_function disassembly; - conditional_function condition; - uint64 attributes; -} Pool; - #define IMGASSERTONCE(test) @@ -544,58 +499,6 @@ static uint64 extract_op_code_value(const uint16 *data, int size) } -/* - * Recurse through tables until the instruction is found then return - * the string and size - * - * inputs: - * pointer to a word stream, - * disassember table and size - * returns: - * instruction size - negative is error - * disassembly string - on error will constain error string - */ -static int Disassemble(const uint16 *data, char **dis, - TABLE_ENTRY_TYPE *type, const Pool *table, - int table_size, Dis_info *info) -{ - for (int i = 0; i < table_size; i++) { - uint64 op_code = extract_op_code_value(data, - table[i].instructions_size); - if ((op_code & table[i].mask) == table[i].value) { - /* possible match */ - conditional_function cond = table[i].condition; - if ((cond == NULL) || cond(op_code)) { - if (table[i].type == pool) { - return Disassemble(data, dis, type, - table[i].next_table, - table[i].next_table_size, - info); - } else if ((table[i].type == instruction) || - (table[i].type == call_instruction) || - (table[i].type == branch_instruction) || - (table[i].type == return_instruction)) { - disassembly_function dis_fn = table[i].disassembly; - if (dis_fn == 0) { - *dis = g_strdup( - "disassembler failure - bad table entry"); - return -6; - } - *type = table[i].type; - *dis = dis_fn(op_code, info); - return table[i].instructions_size; - } else { - *dis = g_strdup("reserved instruction"); - return -2; - } - } - } - } - *dis = g_strdup("failed to disassemble"); - return -1; /* failed to disassemble */ -} - - static uint64 extract_code_18_to_0(uint64 instruction) { uint64 value = 0; @@ -16213,6 +16116,51 @@ static char *YIELD(uint64 instruction, Dis_info *info) * */ +typedef enum { + instruction, + call_instruction, + branch_instruction, + return_instruction, + reserved_block, + pool, +} TABLE_ENTRY_TYPE; + +typedef enum { + MIPS64_ = 0x00000001, + XNP_ = 0x00000002, + XMMS_ = 0x00000004, + EVA_ = 0x00000008, + DSP_ = 0x00000010, + MT_ = 0x00000020, + EJTAG_ = 0x00000040, + TLBINV_ = 0x00000080, + CP0_ = 0x00000100, + CP1_ = 0x00000200, + CP2_ = 0x00000400, + UDI_ = 0x00000800, + MCU_ = 0x00001000, + VZ_ = 0x00002000, + TLB_ = 0x00004000, + MVH_ = 0x00008000, + ALL_ATTRIBUTES = 0xffffffffull, +} TABLE_ATTRIBUTE_TYPE; + +typedef bool (*conditional_function)(uint64 instruction); +typedef char * (*disassembly_function)(uint64 instruction, + Dis_info *info); + +typedef struct Pool { + TABLE_ENTRY_TYPE type; + const struct Pool *next_table; + int next_table_size; + int instructions_size; + uint64 mask; + uint64 value; + disassembly_function disassembly; + conditional_function condition; + uint64 attributes; +} Pool; + static const Pool P_SYSCALL[2] = { { instruction , 0 , 0 , 32, 0xfffc0000, 0x00080000, &SYSCALL_32_ , 0, @@ -21907,6 +21855,58 @@ static const Pool MAJOR[2] = { 0x0 }, /* P16 */ }; +/* + * Recurse through tables until the instruction is found then return + * the string and size + * + * inputs: + * pointer to a word stream, + * disassember table and size + * returns: + * instruction size - negative is error + * disassembly string - on error will constain error string + */ +static int Disassemble(const uint16 *data, char **dis, + TABLE_ENTRY_TYPE *type, const Pool *table, + int table_size, Dis_info *info) +{ + for (int i = 0; i < table_size; i++) { + uint64 op_code = extract_op_code_value(data, + table[i].instructions_size); + if ((op_code & table[i].mask) == table[i].value) { + /* possible match */ + conditional_function cond = table[i].condition; + if ((cond == NULL) || cond(op_code)) { + if (table[i].type == pool) { + return Disassemble(data, dis, type, + table[i].next_table, + table[i].next_table_size, + info); + } else if ((table[i].type == instruction) || + (table[i].type == call_instruction) || + (table[i].type == branch_instruction) || + (table[i].type == return_instruction)) { + disassembly_function dis_fn = table[i].disassembly; + if (dis_fn == 0) { + *dis = g_strdup( + "disassembler failure - bad table entry"); + return -6; + } + *type = table[i].type; + *dis = dis_fn(op_code, info); + return table[i].instructions_size; + } else { + *dis = g_strdup("reserved instruction"); + return -2; + } + } + } + } + *dis = g_strdup("failed to disassemble"); + return -1; /* failed to disassemble */ +} + + static bool nanomips_dis(const uint16_t *data, char **buf, Dis_info *info) { TABLE_ENTRY_TYPE type; diff --git a/disas/riscv-xthead.c b/disas/riscv-xthead.c index 99da679d16c..fcca326d1c3 100644 --- a/disas/riscv-xthead.c +++ b/disas/riscv-xthead.c @@ -4,6 +4,7 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ +#include "qemu/osdep.h" #include "disas/riscv.h" #include "disas/riscv-xthead.h" diff --git a/disas/riscv-xventana.c b/disas/riscv-xventana.c index a0224d1fb31..cd694f15f32 100644 --- a/disas/riscv-xventana.c +++ b/disas/riscv-xventana.c @@ -4,6 +4,7 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ +#include "qemu/osdep.h" #include "disas/riscv.h" #include "disas/riscv-xventana.h" diff --git a/disas/riscv.c b/disas/riscv.c index e9458e574b9..e236c8b5b7c 100644 --- a/disas/riscv.c +++ b/disas/riscv.c @@ -903,6 +903,9 @@ typedef enum { rv_op_vwsll_vv = 872, rv_op_vwsll_vx = 873, rv_op_vwsll_vi = 874, + rv_op_amocas_w = 875, + rv_op_amocas_d = 876, + rv_op_amocas_q = 877, } rv_op; /* register names */ @@ -2090,6 +2093,9 @@ const rv_opcode_data rvi_opcode_data[] = { { "vwsll.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, 0, 0, 0 }, { "vwsll.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, 0, 0, 0 }, { "vwsll.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, 0, 0, 0 }, + { "amocas.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amocas.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amocas.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, }; /* CSR names */ @@ -2841,6 +2847,9 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 34: op = rv_op_amoxor_w; break; case 35: op = rv_op_amoxor_d; break; case 36: op = rv_op_amoxor_q; break; + case 42: op = rv_op_amocas_w; break; + case 43: op = rv_op_amocas_d; break; + case 44: op = rv_op_amocas_q; break; case 66: op = rv_op_amoor_w; break; case 67: op = rv_op_amoor_d; break; case 68: op = rv_op_amoor_q; break; @@ -5183,19 +5192,21 @@ print_insn_riscv(bfd_vma memaddr, struct disassemble_info *info, rv_isa isa) } } - switch (len) { - case 2: - (*info->fprintf_func)(info->stream, INST_FMT_2, inst); - break; - case 4: - (*info->fprintf_func)(info->stream, INST_FMT_4, inst); - break; - case 6: - (*info->fprintf_func)(info->stream, INST_FMT_6, inst); - break; - default: - (*info->fprintf_func)(info->stream, INST_FMT_8, inst); - break; + if (info->show_opcodes) { + switch (len) { + case 2: + (*info->fprintf_func)(info->stream, INST_FMT_2, inst); + break; + case 4: + (*info->fprintf_func)(info->stream, INST_FMT_4, inst); + break; + case 6: + (*info->fprintf_func)(info->stream, INST_FMT_6, inst); + break; + default: + (*info->fprintf_func)(info->stream, INST_FMT_8, inst); + break; + } } disasm_inst(buf, sizeof(buf), isa, memaddr, inst, diff --git a/disas/riscv.h b/disas/riscv.h index 19e5ed2ce63..16a08e4895c 100644 --- a/disas/riscv.h +++ b/disas/riscv.h @@ -7,7 +7,6 @@ #ifndef DISAS_RISCV_H #define DISAS_RISCV_H -#include "qemu/osdep.h" #include "target/riscv/cpu_cfg.h" /* types */ diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst index f2a7aec56ff..8fd7da140a3 100644 --- a/docs/about/build-platforms.rst +++ b/docs/about/build-platforms.rst @@ -139,6 +139,8 @@ unprivileged accounts can create symlinks if Developer Mode is enabled. When Developer Mode is not available/enabled, the SeCreateSymbolicLinkPrivilege privilege is required, or the process must be run as an administrator. +Only 64-bit Windows is supported. + .. _Homebrew: https://brew.sh/ .. _MacPorts: https://www.macports.org/ .. _MSYS2: https://www.msys2.org/ diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index 2e150402465..7b548519b5a 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -36,22 +36,6 @@ and will cause a warning. The replacement for the ``nodelay`` short-form boolean option is ``nodelay=on`` rather than ``delay=off``. -``-smp`` ("parameter=0" SMP configurations) (since 6.2) -''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Specified CPU topology parameters must be greater than zero. - -In the SMP configuration, users should either provide a CPU topology -parameter with a reasonable value (greater than zero) or just omit it -and QEMU will compute the missing value. - -However, historically it was implicitly allowed for users to provide -a parameter with zero value, which is meaningless and could also possibly -cause unexpected results in the -smp parsing. So support for this kind of -configurations (e.g. -smp 8,sockets=0) is deprecated since 6.2 and will -be removed in the near future, users have to ensure that all the topology -members described with -smp are greater than zero. - Plugin argument passing through ``arg=`` (since 6.1) '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' @@ -63,44 +47,29 @@ as short-form boolean values, and passed to plugins as ``arg_name=on``. However, short-form booleans are deprecated and full explicit ``arg_name=on`` form is preferred. -``-no-hpet`` (since 8.0) -'''''''''''''''''''''''' - -The HPET setting has been turned into a machine property. -Use ``-machine hpet=off`` instead. - -``-no-acpi`` (since 8.0) -'''''''''''''''''''''''' +``-smp`` (Unsupported "parameter=1" SMP configurations) (since 9.0) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' -The ``-no-acpi`` setting has been turned into a machine property. -Use ``-machine acpi=off`` instead. +Specified CPU topology parameters must be supported by the machine. -``-async-teardown`` (since 8.1) -''''''''''''''''''''''''''''''' +In the SMP configuration, users should provide the CPU topology parameters that +are supported by the target machine. -Use ``-run-with async-teardown=on`` instead. - -``-chroot`` (since 8.1) -''''''''''''''''''''''' - -Use ``-run-with chroot=dir`` instead. - -``-singlestep`` (since 8.1) -''''''''''''''''''''''''''' - -The ``-singlestep`` option has been turned into an accelerator property, -and given a name that better reflects what it actually does. -Use ``-accel tcg,one-insn-per-tb=on`` instead. +However, historically it was allowed for users to specify the unsupported +topology parameter as "1", which is meaningless. So support for this kind of +configurations (e.g. -smp drawers=1,books=1,clusters=1 for x86 PC machine) is +marked deprecated since 9.0, users have to ensure that all the topology members +described with -smp are supported by the target machine. User-mode emulator command line arguments ----------------------------------------- -``-singlestep`` (since 8.1) -''''''''''''''''''''''''''' +``-p`` (since 9.0) +'''''''''''''''''' -The ``-singlestep`` option has been given a name that better reflects -what it actually does. For both linux-user and bsd-user, use the -new ``-one-insn-per-tb`` option instead. +The ``-p`` option pretends to control the host page size. However, +it is not possible to change the host page size, and using the +option only causes failures. QEMU Machine Protocol (QMP) commands ------------------------------------ @@ -173,20 +142,6 @@ accepted incorrect commands will return an error. Users should make sure that all arguments passed to ``device_add`` are consistent with the documented property types. -``StatusInfo`` member ``singlestep`` (since 8.1) -'''''''''''''''''''''''''''''''''''''''''''''''' - -The ``singlestep`` member of the ``StatusInfo`` returned from the -``query-status`` command is deprecated. This member has a confusing -name and it never did what the documentation claimed or what its name -suggests. We do not believe that anybody is actually using the -information provided in this member. - -The information it reports is whether the TCG JIT is in "one -instruction per translated block" mode (which can be set on the -command line or via the HMP, but not via QMP). The information remains -available via the HMP 'info jit' command. - QEMU Machine Protocol (QMP) events ---------------------------------- @@ -203,15 +158,6 @@ points was removed in 7.0. However QMP still exposed the vcpu parameter. This argument has now been deprecated and the remaining remaining trace points that used it are selected just by name. -Human Monitor Protocol (HMP) commands -------------------------------------- - -``singlestep`` (since 8.1) -'''''''''''''''''''''''''' - -The ``singlestep`` command has been replaced by the ``one-insn-per-tb`` -command, which has the same behaviour but a less misleading name. - Host Architectures ------------------ @@ -245,6 +191,22 @@ Nios II CPU (since 8.2) The Nios II architecture is orphan. The ``nios2`` guest CPU support is deprecated and will be removed in a future version of QEMU. +``power5+`` and ``power7+`` CPU names (since 9.0) +''''''''''''''''''''''''''''''''''''''''''''''''' + +The character "+" in device (and thus also CPU) names is not allowed +in the QEMU object model anymore. ``power5+``, ``power5+_v2.1``, +``power7+`` and ``power7+_v2.1`` are currently still supported via +an alias, but for consistency these will get removed in a future +release, too. Use ``power5p_v2.1`` and ``power7p_v2.1`` instead. + +CRIS CPU architecture (since 9.0) +''''''''''''''''''''''''''''''''' + +The CRIS architecture was pulled from Linux in 4.17 and the compiler +is no longer packaged in any distro making it harder to run the +``check-tcg`` tests. Unless we can improve the testing situation there +is a chance the code will bitrot without anyone noticing. System emulator machines ------------------------ @@ -269,6 +231,34 @@ Nios II ``10m50-ghrd`` and ``nios2-generic-nommu`` machines (since 8.2) The Nios II architecture is orphan. +``shix`` (since 9.0) +'''''''''''''''''''' + +The machine is no longer in existence and has been long unmaintained +in QEMU. This also holds for the TC51828 16MiB flash that it uses. + +``pseries-2.1`` up to ``pseries-2.12`` (since 9.0) +'''''''''''''''''''''''''''''''''''''''''''''''''' + +Older pseries machines before version 3.0 have undergone many changes +to correct issues, mostly regarding migration compatibility. These are +no longer maintained and removing them will make the code easier to +read and maintain. Use versions 3.0 and above as a replacement. + +Arm machines ``akita``, ``borzoi``, ``cheetah``, ``connex``, ``mainstone``, ``n800``, ``n810``, ``spitz``, ``terrier``, ``tosa``, ``verdex``, ``z2`` (since 9.0) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +QEMU includes models of some machine types where the QEMU code that +emulates their SoCs is very old and unmaintained. This code is now +blocking our ability to move forward with various changes across +the codebase, and over many years nobody has been interested in +trying to modernise it. We don't expect any of these machines to have +a large number of users, because they're all modelling hardware that +has now passed away into history. We are therefore dropping support +for all machine types using the PXA2xx and OMAP2 SoCs. We are also +dropping the ``cheetah`` OMAP1 board, because we don't have any +test images for it and don't know of anybody who does; the ``sx1`` +and ``sx1-v1`` OMAP1 machines remain supported for now. Backend options --------------- @@ -428,6 +418,14 @@ Specifying the iSCSI password in plain text on the command line using the used instead, to refer to a ``--object secret...`` instance that provides a password via a file, or encrypted. +Character device options +'''''''''''''''''''''''' + +Backend ``memory`` (since 9.0) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``memory`` is a deprecated synonym for ``ringbuf``. + CPU device properties ''''''''''''''''''''' diff --git a/docs/about/removed-features.rst b/docs/about/removed-features.rst index f04036987b1..f9cf874f7b1 100644 --- a/docs/about/removed-features.rst +++ b/docs/about/removed-features.rst @@ -460,6 +460,62 @@ in this case. Note that the default audio backend must be configured on the command line if the ``-nodefaults`` options is used. +``-no-hpet`` (removed in 9.0) +''''''''''''''''''''''''''''' + +The HPET setting has been turned into a machine property. +Use ``-machine hpet=off`` instead. + +``-no-acpi`` (removed in 9.0) +''''''''''''''''''''''''''''' + +The ``-no-acpi`` setting has been turned into a machine property. +Use ``-machine acpi=off`` instead. + +``-async-teardown`` (removed in 9.0) +'''''''''''''''''''''''''''''''''''' + +Use ``-run-with async-teardown=on`` instead. + +``-chroot`` (removed in 9.0) +'''''''''''''''''''''''''''' + +Use ``-run-with chroot=dir`` instead. + +``-singlestep`` (removed in 9.0) +'''''''''''''''''''''''''''''''' + +The ``-singlestep`` option has been turned into an accelerator property, +and given a name that better reflects what it actually does. +Use ``-accel tcg,one-insn-per-tb=on`` instead. + +``-smp`` ("parameter=0" SMP configurations) (removed in 9.0) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Specified CPU topology parameters must be greater than zero. + +In the SMP configuration, users should either provide a CPU topology +parameter with a reasonable value (greater than zero) or just omit it +and QEMU will compute the missing value. + +However, historically it was implicitly allowed for users to provide +a parameter with zero value, which is meaningless and could also possibly +cause unexpected results in the -smp parsing. So support for this kind of +configurations (e.g. -smp 8,sockets=0) is removed since 9.0, users have +to ensure that all the topology members described with -smp are greater +than zero. + +User-mode emulator command line arguments +----------------------------------------- + +``-singlestep`` (removed in 9.0) +'''''''''''''''''''''''''''''''' + +The ``-singlestep`` option has been given a name that better reflects +what it actually does. For both linux-user and bsd-user, use the +``-one-insn-per-tb`` option instead. + + QEMU Machine Protocol (QMP) commands ------------------------------------ @@ -612,6 +668,27 @@ Use ``migrate-set-parameters`` instead. This command didn't produce any output already. Removed with no replacement. +``singlestep`` (removed in 9.0) +''''''''''''''''''''''''''''''' + +The ``singlestep`` command has been replaced by the ``one-insn-per-tb`` +command, which has the same behaviour but a less misleading name. + +Host Architectures +------------------ + +System emulation on 32-bit Windows hosts (removed in 9.0) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Windows 11 has no support for 32-bit host installs, and Windows 10 did +not support new 32-bit installs, only upgrades. 32-bit Windows support +has now been dropped by the MSYS2 project. QEMU also is deprecating +and dropping support for 32-bit x86 host deployments in +general. 32-bit Windows is therefore no longer a supported host for +QEMU. Since all recent x86 hardware from the past >10 years is +capable of the 64-bit x86 extensions, a corresponding 64-bit OS should +be used instead. + Guest Emulator ISAs ------------------- diff --git a/docs/colo-proxy.txt b/docs/colo-proxy.txt index 1fc38aed1b2..e712c883dba 100644 --- a/docs/colo-proxy.txt +++ b/docs/colo-proxy.txt @@ -162,7 +162,7 @@ Here is an example using demonstration IP and port addresses to more clearly describe the usage. Primary(ip:3.3.3.3): --netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,downscript=/etc/qemu-ifdown +-netdev tap,id=hn0,vhost=off -device e1000,id=e0,netdev=hn0,mac=52:a4:00:12:78:66 -chardev socket,id=mirror0,host=3.3.3.3,port=9003,server=on,wait=off -chardev socket,id=compare1,host=3.3.3.3,port=9004,server=on,wait=off @@ -177,7 +177,7 @@ Primary(ip:3.3.3.3): -object colo-compare,id=comp0,primary_in=compare0-0,secondary_in=compare1,outdev=compare_out0,iothread=iothread1 Secondary(ip:3.3.3.8): --netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,down script=/etc/qemu-ifdown +-netdev tap,id=hn0,vhost=off -device e1000,netdev=hn0,mac=52:a4:00:12:78:66 -chardev socket,id=red0,host=3.3.3.3,port=9003 -chardev socket,id=red1,host=3.3.3.3,port=9004 @@ -202,7 +202,7 @@ Primary(ip:3.3.3.3): -object colo-compare,id=comp0,primary_in=compare0-0,secondary_in=compare1,outdev=compare_out0,vnet_hdr_support Secondary(ip:3.3.3.8): --netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,down script=/etc/qemu-ifdown +-netdev tap,id=hn0,vhost=off -device e1000,netdev=hn0,mac=52:a4:00:12:78:66 -chardev socket,id=red0,host=3.3.3.3,port=9003 -chardev socket,id=red1,host=3.3.3.3,port=9004 diff --git a/docs/conf.py b/docs/conf.py index e84a95e71ce..aae0304ac6e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -29,7 +29,6 @@ import os import sys import sphinx -from distutils.version import LooseVersion from sphinx.errors import ConfigError # The per-manual conf.py will set qemu_docdir for a single-manual build; @@ -89,7 +88,7 @@ # General information about the project. project = u'QEMU' -copyright = u'2023, The QEMU Project Developers' +copyright = u'2024, The QEMU Project Developers' author = u'The QEMU Project Developers' # The version info for the project you're documenting, acts as replacement for @@ -165,11 +164,10 @@ # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -if LooseVersion(sphinx_rtd_theme.__version__) >= LooseVersion("0.4.3"): - html_theme_options = { - "style_nav_header_background": "#802400", - "navigation_with_keys": True, - } +html_theme_options = { + "style_nav_header_background": "#802400", + "navigation_with_keys": True, +} html_logo = os.path.join(qemu_docdir, "../ui/icons/qemu_128x128.png") diff --git a/docs/devel/acpi-bits.rst b/docs/devel/acpi-bits.rst index 9677b0098f4..1ec394f5fb3 100644 --- a/docs/devel/acpi-bits.rst +++ b/docs/devel/acpi-bits.rst @@ -1,26 +1,48 @@ ============================================================================= ACPI/SMBIOS avocado tests using biosbits ============================================================================= - +************ +Introduction +************ Biosbits is a software written by Josh Triplett that can be downloaded from https://biosbits.org/. The github codebase can be found -`here `__. It is a software that executes -the bios components such as acpi and smbios tables directly through acpica -bios interpreter (a freely available C based library written by Intel, +`here `__. It is a software that +executes the bios components such as acpi and smbios tables directly through +acpica bios interpreter (a freely available C based library written by Intel, downloadable from https://acpica.org/ and is included with biosbits) without an -operating system getting involved in between. +operating system getting involved in between. Bios-bits has python integration +with grub so actual routines that executes bios components can be written in +python instead of bash-ish (grub's native scripting language). There are several advantages to directly testing the bios in a real physical -machine or VM as opposed to indirectly discovering bios issues through the -operating system. For one thing, the OSes tend to hide bios problems from the -end user. The other is that we have more control of what we wanted to test -and how by directly using acpica interpreter on top of the bios on a running -system. More details on the inspiration for developing biosbits and its real -life uses can be found in [#a]_ and [#b]_. +machine or in a VM as opposed to indirectly discovering bios issues through the +operating system (the OS). Operating systems tend to bypass bios problems and +hide them from the end user. We have more control of what we wanted to test and +how by being as close to the bios on a running system as possible without a +complicated software component such as an operating system coming in between. +Another issue is that we cannot exercise bios components such as ACPI and +SMBIOS without being in the highest hardware privilege level, ring 0 for +example in case of x86. Since the OS executes from ring 0 whereas normal user +land software resides in unprivileged ring 3, operating system must be modified +in order to write our test routines that exercise and test the bios. This is +not possible in all cases. Lastly, test frameworks and routines are preferably +written using a high level scripting language such as python. OSes and +OS modules are generally written using low level languages such as C and +low level assembly machine language. Writing test routines in a low level +language makes things more cumbersome. These and other reasons makes using +bios-bits very attractive for testing bioses. More details on the inspiration +for developing biosbits and its real life uses can be found in [#a]_ and [#b]_. + For QEMU, we maintain a fork of bios bits in gitlab along with all the -dependent submodules here: https://gitlab.com/qemu-project/biosbits-bits +dependent submodules `here `__. This fork contains numerous fixes, a newer acpica and changes specific to running this avocado QEMU tests using bits. The author of this document -is the sole maintainer of the QEMU fork of bios bits repo. +is the sole maintainer of the QEMU fork of bios bits repository. For more +information, please see author's `FOSDEM talk on this bios-bits based test +framework `__. + +********************************* +Description of the test framework +********************************* Under the directory ``tests/avocado/``, ``acpi-bits.py`` is a QEMU avocado test that drives all this. @@ -120,8 +142,9 @@ Under ``tests/avocado/`` as the root we have: (b) Add a SPDX license header. (c) Perform modifications to the test. - Commits (a), (b) and (c) should go under separate commits so that the original - test script and the changes we have made are separated and clear. + Commits (a), (b) and (c) preferably should go under separate commits so that + the original test script and the changes we have made are separated and + clear. (a) and (b) can sometimes be combined into a single step. The test framework will then use your modified test script to run the test. No further changes would be needed. Please check the logs to make sure that @@ -141,4 +164,4 @@ References: ----------- .. [#a] https://blog.linuxplumbersconf.org/2011/ocw/system/presentations/867/original/bits.pdf .. [#b] https://www.youtube.com/watch?v=36QIepyUuhg - +.. [#c] https://fosdem.org/2024/schedule/event/fosdem-2024-2262-exercising-qemu-generated-acpi-smbios-tables-using-biosbits-from-within-a-guest-vm-/ diff --git a/docs/devel/atomics.rst b/docs/devel/atomics.rst index ff9b5ee30c8..b77c6e13e18 100644 --- a/docs/devel/atomics.rst +++ b/docs/devel/atomics.rst @@ -119,7 +119,7 @@ The only guarantees that you can rely upon in this case are: ordinary accesses instead cause data races if they are concurrent with other accesses of which at least one is a write. In order to ensure this, the compiler will not optimize accesses out of existence, create unsolicited - accesses, or perform other similar optimzations. + accesses, or perform other similar optimizations. - acquire operations will appear to happen, with respect to the other components of the system, before all the LOAD or STORE operations diff --git a/docs/devel/build-system.rst b/docs/devel/build-system.rst index 43d6005881e..09caf2f8e19 100644 --- a/docs/devel/build-system.rst +++ b/docs/devel/build-system.rst @@ -256,21 +256,6 @@ Target-independent emulator sourcesets: ``system_ss`` only in system emulators, ``user_ss`` only in user-mode emulators. - Target-independent sourcesets must exercise particular care when using - ``if_false`` rules. The ``if_false`` rule will be used correctly when linking - emulator binaries; however, when *compiling* target-independent files - into .o files, Meson may need to pick *both* the ``if_true`` and - ``if_false`` sides to cater for targets that want either side. To - achieve that, you can add a special rule using the ``CONFIG_ALL`` - symbol:: - - # Some targets have CONFIG_ACPI, some don't, so this is not enough - system_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi.c'), - if_false: files('acpi-stub.c')) - - # This is required as well: - system_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c')) - Target-dependent emulator sourcesets: In the target-dependent set lives CPU emulation, some device emulation and much glue code. This sometimes also has to be compiled multiple times, diff --git a/docs/devel/ci-jobs.rst.inc b/docs/devel/ci-jobs.rst.inc index 4c39cdb2d92..be063222792 100644 --- a/docs/devel/ci-jobs.rst.inc +++ b/docs/devel/ci-jobs.rst.inc @@ -115,7 +115,7 @@ CI pipeline. QEMU_JOB_SKIPPED ~~~~~~~~~~~~~~~~ -The job is not reliably successsful in general, so is not +The job is not reliably successful in general, so is not currently suitable to be run by default. Ideally this should be a temporary marker until the problems can be addressed, or the job permanently removed. @@ -147,7 +147,7 @@ Set this variable to 1 to create the pipelines, but leave all the jobs to be manually started from the UI Set this variable to 2 to create the pipelines and run all -the jobs immediately, as was historicaly behaviour +the jobs immediately, as was the historical behaviour QEMU_CI_AVOCADO_TESTING ~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/devel/clocks.rst b/docs/devel/clocks.rst index 675fbeb6abe..177ee1c90d7 100644 --- a/docs/devel/clocks.rst +++ b/docs/devel/clocks.rst @@ -279,6 +279,10 @@ You can change the multiplier and divider of a clock at runtime, so you can use this to model clock controller devices which have guest-programmable frequency multipliers or dividers. +Similarly to ``clock_set()``, ``clock_set_mul_div()`` returns ``true`` if +the clock state was modified; that is, if the multiplier or the diviser +or both were changed by the call. + Note that ``clock_set_mul_div()`` does not automatically call ``clock_propagate()``. If you make a runtime change to the multiplier or divider you must call clock_propagate() yourself. @@ -502,7 +506,7 @@ This is typically used to migrate an input clock state. For example: VMStateDescription my_device_vmstate = { .name = "my_device", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { [...], /* other migrated fields */ VMSTATE_CLOCK(clk, MyDeviceState), VMSTATE_END_OF_LIST() diff --git a/docs/devel/docs.rst b/docs/devel/docs.rst new file mode 100644 index 00000000000..a7768b53117 --- /dev/null +++ b/docs/devel/docs.rst @@ -0,0 +1,68 @@ + +================== +QEMU Documentation +================== + +QEMU's documentation is written in reStructuredText format and +built using the Sphinx documentation generator. We generate both +the HTML manual and the manpages from the some documentation sources. + +hxtool and .hx files +-------------------- + +The documentation for QEMU command line options and Human Monitor Protocol +(HMP) commands is written in files with the ``.hx`` suffix. These +are processed in two ways: + + * ``scripts/hxtool`` creates C header files from them, which are included + in QEMU to do things like handle the ``--help`` option output + * a Sphinx extension in ``docs/sphinx/hxtool.py`` generates rST output + to be included in the HTML or manpage documentation + +The syntax of these ``.hx`` files is simple. It is broadly an +alternation of C code put into the C output and rST format text +put into the documentation. A few special directives are recognised; +these are all-caps and must be at the beginning of the line. + +``HXCOMM`` is the comment marker. The line, including any arbitrary +text after the marker, is discarded and appears neither in the C output +nor the documentation output. + +``SRST`` starts a reStructuredText section. Following lines +are put into the documentation verbatim, and discarded from the C output. +The alternative form ``SRST()`` is used to define a label which can be +referenced from elsewhere in the rST documentation. The label will take +the form ````, where ``DOCNAME`` is the name of the +top level rST file, ``HXFILE`` is the filename of the .hx file without +the ``.hx`` extension, and ``LABEL`` is the text provided within the +``SRST()`` directive. For example, +````. + +``ERST`` ends the documentation section started with ``SRST``, +and switches back to a C code section. + +``DEFHEADING()`` defines a heading that should appear in both the +``--help`` output and in the documentation. This directive should +be in the C code block. If there is a string inside the brackets, +this is the heading to use. If this string is empty, it produces +a blank line in the ``--help`` output and is ignored for the rST +output. + +``ARCHHEADING()`` is a variant of ``DEFHEADING()`` which produces +the heading only if the specified guest architecture was compiled +into QEMU. This should be avoided in new documentation. + +Within C code sections, you should check the comments at the top +of the file to see what the expected usage is, because this +varies between files. For instance in ``qemu-options.hx`` we use +the ``DEF()`` macro to define each option and specify its ``--help`` +text, but in ``hmp-commands.hx`` the C code sections are elements +of an array of structs of type ``HMPCommand`` which define the +name, behaviour and help text for each monitor command. + +In the file ``qemu-options.hx``, do not try to explicitly define a +reStructuredText label within a documentation section. This file +is included into two separate Sphinx documents, and some +versions of Sphinx will complain about the duplicate label +that results. Use the ``SRST()`` directive documented above, to +emit an unambiguous label. diff --git a/docs/devel/index-build.rst b/docs/devel/index-build.rst index 57e8d39d985..90b406ca0ed 100644 --- a/docs/devel/index-build.rst +++ b/docs/devel/index-build.rst @@ -10,6 +10,7 @@ the basics if you are adding new files and targets to the build. build-system kconfig + docs testing acpi-bits qtest diff --git a/docs/devel/index-internals.rst b/docs/devel/index-internals.rst index 6f81df92bca..5636e9cf1d7 100644 --- a/docs/devel/index-internals.rst +++ b/docs/devel/index-internals.rst @@ -11,12 +11,12 @@ Details about QEMU's various subsystems including how to add features to them. block-coroutine-wrapper clocks ebpf_rss - migration + migration/index multi-process reset s390-cpu-topology s390-dasd-ipl tracing - vfio-migration + vfio-iommufd writing-monitor-commands virtio-backends diff --git a/docs/devel/kconfig.rst b/docs/devel/kconfig.rst index 73f52de1067..ccb9a46bd77 100644 --- a/docs/devel/kconfig.rst +++ b/docs/devel/kconfig.rst @@ -316,6 +316,6 @@ variable:: host_kconfig = \ (have_tpm ? ['CONFIG_TPM=y'] : []) + \ - (targetos == 'linux' ? ['CONFIG_LINUX=y'] : []) + \ + (host_os == 'linux' ? ['CONFIG_LINUX=y'] : []) + \ (have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \ ... diff --git a/docs/devel/migration.rst b/docs/devel/migration.rst deleted file mode 100644 index ec55089b253..00000000000 --- a/docs/devel/migration.rst +++ /dev/null @@ -1,1514 +0,0 @@ -========= -Migration -========= - -QEMU has code to load/save the state of the guest that it is running. -These are two complementary operations. Saving the state just does -that, saves the state for each device that the guest is running. -Restoring a guest is just the opposite operation: we need to load the -state of each device. - -For this to work, QEMU has to be launched with the same arguments the -two times. I.e. it can only restore the state in one guest that has -the same devices that the one it was saved (this last requirement can -be relaxed a bit, but for now we can consider that configuration has -to be exactly the same). - -Once that we are able to save/restore a guest, a new functionality is -requested: migration. This means that QEMU is able to start in one -machine and being "migrated" to another machine. I.e. being moved to -another machine. - -Next was the "live migration" functionality. This is important -because some guests run with a lot of state (specially RAM), and it -can take a while to move all state from one machine to another. Live -migration allows the guest to continue running while the state is -transferred. Only while the last part of the state is transferred has -the guest to be stopped. Typically the time that the guest is -unresponsive during live migration is the low hundred of milliseconds -(notice that this depends on a lot of things). - -.. contents:: - -Transports -========== - -The migration stream is normally just a byte stream that can be passed -over any transport. - -- tcp migration: do the migration using tcp sockets -- unix migration: do the migration using unix sockets -- exec migration: do the migration using the stdin/stdout through a process. -- fd migration: do the migration using a file descriptor that is - passed to QEMU. QEMU doesn't care how this file descriptor is opened. - -In addition, support is included for migration using RDMA, which -transports the page data using ``RDMA``, where the hardware takes care of -transporting the pages, and the load on the CPU is much lower. While the -internals of RDMA migration are a bit different, this isn't really visible -outside the RAM migration code. - -All these migration protocols use the same infrastructure to -save/restore state devices. This infrastructure is shared with the -savevm/loadvm functionality. - -Debugging -========= - -The migration stream can be analyzed thanks to ``scripts/analyze-migration.py``. - -Example usage: - -.. code-block:: shell - - $ qemu-system-x86_64 -display none -monitor stdio - (qemu) migrate "exec:cat > mig" - (qemu) q - $ ./scripts/analyze-migration.py -f mig - { - "ram (3)": { - "section sizes": { - "pc.ram": "0x0000000008000000", - ... - -See also ``analyze-migration.py -h`` help for more options. - -Common infrastructure -===================== - -The files, sockets or fd's that carry the migration stream are abstracted by -the ``QEMUFile`` type (see ``migration/qemu-file.h``). In most cases this -is connected to a subtype of ``QIOChannel`` (see ``io/``). - - -Saving the state of one device -============================== - -For most devices, the state is saved in a single call to the migration -infrastructure; these are *non-iterative* devices. The data for these -devices is sent at the end of precopy migration, when the CPUs are paused. -There are also *iterative* devices, which contain a very large amount of -data (e.g. RAM or large tables). See the iterative device section below. - -General advice for device developers ------------------------------------- - -- The migration state saved should reflect the device being modelled rather - than the way your implementation works. That way if you change the implementation - later the migration stream will stay compatible. That model may include - internal state that's not directly visible in a register. - -- When saving a migration stream the device code may walk and check - the state of the device. These checks might fail in various ways (e.g. - discovering internal state is corrupt or that the guest has done something bad). - Consider carefully before asserting/aborting at this point, since the - normal response from users is that *migration broke their VM* since it had - apparently been running fine until then. In these error cases, the device - should log a message indicating the cause of error, and should consider - putting the device into an error state, allowing the rest of the VM to - continue execution. - -- The migration might happen at an inconvenient point, - e.g. right in the middle of the guest reprogramming the device, during - guest reboot or shutdown or while the device is waiting for external IO. - It's strongly preferred that migrations do not fail in this situation, - since in the cloud environment migrations might happen automatically to - VMs that the administrator doesn't directly control. - -- If you do need to fail a migration, ensure that sufficient information - is logged to identify what went wrong. - -- The destination should treat an incoming migration stream as hostile - (which we do to varying degrees in the existing code). Check that offsets - into buffers and the like can't cause overruns. Fail the incoming migration - in the case of a corrupted stream like this. - -- Take care with internal device state or behaviour that might become - migration version dependent. For example, the order of PCI capabilities - is required to stay constant across migration. Another example would - be that a special case handled by subsections (see below) might become - much more common if a default behaviour is changed. - -- The state of the source should not be changed or destroyed by the - outgoing migration. Migrations timing out or being failed by - higher levels of management, or failures of the destination host are - not unusual, and in that case the VM is restarted on the source. - Note that the management layer can validly revert the migration - even though the QEMU level of migration has succeeded as long as it - does it before starting execution on the destination. - -- Buses and devices should be able to explicitly specify addresses when - instantiated, and management tools should use those. For example, - when hot adding USB devices it's important to specify the ports - and addresses, since implicit ordering based on the command line order - may be different on the destination. This can result in the - device state being loaded into the wrong device. - -VMState -------- - -Most device data can be described using the ``VMSTATE`` macros (mostly defined -in ``include/migration/vmstate.h``). - -An example (from hw/input/pckbd.c) - -.. code:: c - - static const VMStateDescription vmstate_kbd = { - .name = "pckbd", - .version_id = 3, - .minimum_version_id = 3, - .fields = (VMStateField[]) { - VMSTATE_UINT8(write_cmd, KBDState), - VMSTATE_UINT8(status, KBDState), - VMSTATE_UINT8(mode, KBDState), - VMSTATE_UINT8(pending, KBDState), - VMSTATE_END_OF_LIST() - } - }; - -We are declaring the state with name "pckbd". The ``version_id`` is -3, and there are 4 uint8_t fields in the KBDState structure. We -registered this ``VMSTATEDescription`` with one of the following -functions. The first one will generate a device ``instance_id`` -different for each registration. Use the second one if you already -have an id that is different for each instance of the device: - -.. code:: c - - vmstate_register_any(NULL, &vmstate_kbd, s); - vmstate_register(NULL, instance_id, &vmstate_kbd, s); - -For devices that are ``qdev`` based, we can register the device in the class -init function: - -.. code:: c - - dc->vmsd = &vmstate_kbd_isa; - -The VMState macros take care of ensuring that the device data section -is formatted portably (normally big endian) and make some compile time checks -against the types of the fields in the structures. - -VMState macros can include other VMStateDescriptions to store substructures -(see ``VMSTATE_STRUCT_``), arrays (``VMSTATE_ARRAY_``) and variable length -arrays (``VMSTATE_VARRAY_``). Various other macros exist for special -cases. - -Note that the format on the wire is still very raw; i.e. a VMSTATE_UINT32 -ends up with a 4 byte bigendian representation on the wire; in the future -it might be possible to use a more structured format. - -Legacy way ----------- - -This way is going to disappear as soon as all current users are ported to VMSTATE; -although converting existing code can be tricky, and thus 'soon' is relative. - -Each device has to register two functions, one to save the state and -another to load the state back. - -.. code:: c - - int register_savevm_live(const char *idstr, - int instance_id, - int version_id, - SaveVMHandlers *ops, - void *opaque); - -Two functions in the ``ops`` structure are the ``save_state`` -and ``load_state`` functions. Notice that ``load_state`` receives a version_id -parameter to know what state format is receiving. ``save_state`` doesn't -have a version_id parameter because it always uses the latest version. - -Note that because the VMState macros still save the data in a raw -format, in many cases it's possible to replace legacy code -with a carefully constructed VMState description that matches the -byte layout of the existing code. - -Changing migration data structures ----------------------------------- - -When we migrate a device, we save/load the state as a series -of fields. Sometimes, due to bugs or new functionality, we need to -change the state to store more/different information. Changing the migration -state saved for a device can break migration compatibility unless -care is taken to use the appropriate techniques. In general QEMU tries -to maintain forward migration compatibility (i.e. migrating from -QEMU n->n+1) and there are users who benefit from backward compatibility -as well. - -Subsections ------------ - -The most common structure change is adding new data, e.g. when adding -a newer form of device, or adding that state that you previously -forgot to migrate. This is best solved using a subsection. - -A subsection is "like" a device vmstate, but with a particularity, it -has a Boolean function that tells if that values are needed to be sent -or not. If this functions returns false, the subsection is not sent. -Subsections have a unique name, that is looked for on the receiving -side. - -On the receiving side, if we found a subsection for a device that we -don't understand, we just fail the migration. If we understand all -the subsections, then we load the state with success. There's no check -that a subsection is loaded, so a newer QEMU that knows about a subsection -can (with care) load a stream from an older QEMU that didn't send -the subsection. - -If the new data is only needed in a rare case, then the subsection -can be made conditional on that case and the migration will still -succeed to older QEMUs in most cases. This is OK for data that's -critical, but in some use cases it's preferred that the migration -should succeed even with the data missing. To support this the -subsection can be connected to a device property and from there -to a versioned machine type. - -The 'pre_load' and 'post_load' functions on subsections are only -called if the subsection is loaded. - -One important note is that the outer post_load() function is called "after" -loading all subsections, because a newer subsection could change the same -value that it uses. A flag, and the combination of outer pre_load and -post_load can be used to detect whether a subsection was loaded, and to -fall back on default behaviour when the subsection isn't present. - -Example: - -.. code:: c - - static bool ide_drive_pio_state_needed(void *opaque) - { - IDEState *s = opaque; - - return ((s->status & DRQ_STAT) != 0) - || (s->bus->error_status & BM_STATUS_PIO_RETRY); - } - - const VMStateDescription vmstate_ide_drive_pio_state = { - .name = "ide_drive/pio_state", - .version_id = 1, - .minimum_version_id = 1, - .pre_save = ide_drive_pio_pre_save, - .post_load = ide_drive_pio_post_load, - .needed = ide_drive_pio_state_needed, - .fields = (VMStateField[]) { - VMSTATE_INT32(req_nb_sectors, IDEState), - VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1, - vmstate_info_uint8, uint8_t), - VMSTATE_INT32(cur_io_buffer_offset, IDEState), - VMSTATE_INT32(cur_io_buffer_len, IDEState), - VMSTATE_UINT8(end_transfer_fn_idx, IDEState), - VMSTATE_INT32(elementary_transfer_size, IDEState), - VMSTATE_INT32(packet_transfer_size, IDEState), - VMSTATE_END_OF_LIST() - } - }; - - const VMStateDescription vmstate_ide_drive = { - .name = "ide_drive", - .version_id = 3, - .minimum_version_id = 0, - .post_load = ide_drive_post_load, - .fields = (VMStateField[]) { - .... several fields .... - VMSTATE_END_OF_LIST() - }, - .subsections = (const VMStateDescription*[]) { - &vmstate_ide_drive_pio_state, - NULL - } - }; - -Here we have a subsection for the pio state. We only need to -save/send this state when we are in the middle of a pio operation -(that is what ``ide_drive_pio_state_needed()`` checks). If DRQ_STAT is -not enabled, the values on that fields are garbage and don't need to -be sent. - -Connecting subsections to properties ------------------------------------- - -Using a condition function that checks a 'property' to determine whether -to send a subsection allows backward migration compatibility when -new subsections are added, especially when combined with versioned -machine types. - -For example: - - a) Add a new property using ``DEFINE_PROP_BOOL`` - e.g. support-foo and - default it to true. - b) Add an entry to the ``hw_compat_`` for the previous version that sets - the property to false. - c) Add a static bool support_foo function that tests the property. - d) Add a subsection with a .needed set to the support_foo function - e) (potentially) Add an outer pre_load that sets up a default value - for 'foo' to be used if the subsection isn't loaded. - -Now that subsection will not be generated when using an older -machine type and the migration stream will be accepted by older -QEMU versions. - -Not sending existing elements ------------------------------ - -Sometimes members of the VMState are no longer needed: - - - removing them will break migration compatibility - - - making them version dependent and bumping the version will break backward migration - compatibility. - -Adding a dummy field into the migration stream is normally the best way to preserve -compatibility. - -If the field really does need to be removed then: - - a) Add a new property/compatibility/function in the same way for subsections above. - b) replace the VMSTATE macro with the _TEST version of the macro, e.g.: - - ``VMSTATE_UINT32(foo, barstruct)`` - - becomes - - ``VMSTATE_UINT32_TEST(foo, barstruct, pre_version_baz)`` - - Sometime in the future when we no longer care about the ancient versions these can be killed off. - Note that for backward compatibility it's important to fill in the structure with - data that the destination will understand. - -Any difference in the predicates on the source and destination will end up -with different fields being enabled and data being loaded into the wrong -fields; for this reason conditional fields like this are very fragile. - -Versions --------- - -Version numbers are intended for major incompatible changes to the -migration of a device, and using them breaks backward-migration -compatibility; in general most changes can be made by adding Subsections -(see above) or _TEST macros (see above) which won't break compatibility. - -Each version is associated with a series of fields saved. The ``save_state`` always saves -the state as the newer version. But ``load_state`` sometimes is able to -load state from an older version. - -You can see that there are two version fields: - -- ``version_id``: the maximum version_id supported by VMState for that device. -- ``minimum_version_id``: the minimum version_id that VMState is able to understand - for that device. - -VMState is able to read versions from minimum_version_id to version_id. - -There are *_V* forms of many ``VMSTATE_`` macros to load fields for version dependent fields, -e.g. - -.. code:: c - - VMSTATE_UINT16_V(ip_id, Slirp, 2), - -only loads that field for versions 2 and newer. - -Saving state will always create a section with the 'version_id' value -and thus can't be loaded by any older QEMU. - -Massaging functions -------------------- - -Sometimes, it is not enough to be able to save the state directly -from one structure, we need to fill the correct values there. One -example is when we are using kvm. Before saving the cpu state, we -need to ask kvm to copy to QEMU the state that it is using. And the -opposite when we are loading the state, we need a way to tell kvm to -load the state for the cpu that we have just loaded from the QEMUFile. - -The functions to do that are inside a vmstate definition, and are called: - -- ``int (*pre_load)(void *opaque);`` - - This function is called before we load the state of one device. - -- ``int (*post_load)(void *opaque, int version_id);`` - - This function is called after we load the state of one device. - -- ``int (*pre_save)(void *opaque);`` - - This function is called before we save the state of one device. - -- ``int (*post_save)(void *opaque);`` - - This function is called after we save the state of one device - (even upon failure, unless the call to pre_save returned an error). - -Example: You can look at hpet.c, that uses the first three functions -to massage the state that is transferred. - -The ``VMSTATE_WITH_TMP`` macro may be useful when the migration -data doesn't match the stored device data well; it allows an -intermediate temporary structure to be populated with migration -data and then transferred to the main structure. - -If you use memory API functions that update memory layout outside -initialization (i.e., in response to a guest action), this is a strong -indication that you need to call these functions in a ``post_load`` callback. -Examples of such memory API functions are: - - - memory_region_add_subregion() - - memory_region_del_subregion() - - memory_region_set_readonly() - - memory_region_set_nonvolatile() - - memory_region_set_enabled() - - memory_region_set_address() - - memory_region_set_alias_offset() - -Iterative device migration --------------------------- - -Some devices, such as RAM, Block storage or certain platform devices, -have large amounts of data that would mean that the CPUs would be -paused for too long if they were sent in one section. For these -devices an *iterative* approach is taken. - -The iterative devices generally don't use VMState macros -(although it may be possible in some cases) and instead use -qemu_put_*/qemu_get_* macros to read/write data to the stream. Specialist -versions exist for high bandwidth IO. - - -An iterative device must provide: - - - A ``save_setup`` function that initialises the data structures and - transmits a first section containing information on the device. In the - case of RAM this transmits a list of RAMBlocks and sizes. - - - A ``load_setup`` function that initialises the data structures on the - destination. - - - A ``state_pending_exact`` function that indicates how much more - data we must save. The core migration code will use this to - determine when to pause the CPUs and complete the migration. - - - A ``state_pending_estimate`` function that indicates how much more - data we must save. When the estimated amount is smaller than the - threshold, we call ``state_pending_exact``. - - - A ``save_live_iterate`` function should send a chunk of data until - the point that stream bandwidth limits tell it to stop. Each call - generates one section. - - - A ``save_live_complete_precopy`` function that must transmit the - last section for the device containing any remaining data. - - - A ``load_state`` function used to load sections generated by - any of the save functions that generate sections. - - - ``cleanup`` functions for both save and load that are called - at the end of migration. - -Note that the contents of the sections for iterative migration tend -to be open-coded by the devices; care should be taken in parsing -the results and structuring the stream to make them easy to validate. - -Device ordering ---------------- - -There are cases in which the ordering of device loading matters; for -example in some systems where a device may assert an interrupt during loading, -if the interrupt controller is loaded later then it might lose the state. - -Some ordering is implicitly provided by the order in which the machine -definition creates devices, however this is somewhat fragile. - -The ``MigrationPriority`` enum provides a means of explicitly enforcing -ordering. Numerically higher priorities are loaded earlier. -The priority is set by setting the ``priority`` field of the top level -``VMStateDescription`` for the device. - -Stream structure -================ - -The stream tries to be word and endian agnostic, allowing migration between hosts -of different characteristics running the same VM. - - - Header - - - Magic - - Version - - VM configuration section - - - Machine type - - Target page bits - - List of sections - Each section contains a device, or one iteration of a device save. - - - section type - - section id - - ID string (First section of each device) - - instance id (First section of each device) - - version id (First section of each device) - - - - Footer mark - - EOF mark - - VM Description structure - Consisting of a JSON description of the contents for analysis only - -The ``device data`` in each section consists of the data produced -by the code described above. For non-iterative devices they have a single -section; iterative devices have an initial and last section and a set -of parts in between. -Note that there is very little checking by the common code of the integrity -of the ``device data`` contents, that's up to the devices themselves. -The ``footer mark`` provides a little bit of protection for the case where -the receiving side reads more or less data than expected. - -The ``ID string`` is normally unique, having been formed from a bus name -and device address, PCI devices and storage devices hung off PCI controllers -fit this pattern well. Some devices are fixed single instances (e.g. "pc-ram"). -Others (especially either older devices or system devices which for -some reason don't have a bus concept) make use of the ``instance id`` -for otherwise identically named devices. - -Return path ------------ - -Only a unidirectional stream is required for normal migration, however a -``return path`` can be created when bidirectional communication is desired. -This is primarily used by postcopy, but is also used to return a success -flag to the source at the end of migration. - -``qemu_file_get_return_path(QEMUFile* fwdpath)`` gives the QEMUFile* for the return -path. - - Source side - - Forward path - written by migration thread - Return path - opened by main thread, read by return-path thread - - Destination side - - Forward path - read by main thread - Return path - opened by main thread, written by main thread AND postcopy - thread (protected by rp_mutex) - -Dirty limit -===================== -The dirty limit, short for dirty page rate upper limit, is a new capability -introduced in the 8.1 QEMU release that uses a new algorithm based on the KVM -dirty ring to throttle down the guest during live migration. - -The algorithm framework is as follows: - -:: - - ------------------------------------------------------------------------------ - main --------------> throttle thread ------------> PREPARE(1) <-------- - thread \ | | - \ | | - \ V | - -\ CALCULATE(2) | - \ | | - \ | | - \ V | - \ SET PENALTY(3) ----- - -\ | - \ | - \ V - -> virtual CPU thread -------> ACCEPT PENALTY(4) - ------------------------------------------------------------------------------ - -When the qmp command qmp_set_vcpu_dirty_limit is called for the first time, -the QEMU main thread starts the throttle thread. The throttle thread, once -launched, executes the loop, which consists of three steps: - - - PREPARE (1) - - The entire work of PREPARE (1) is preparation for the second stage, - CALCULATE(2), as the name implies. It involves preparing the dirty - page rate value and the corresponding upper limit of the VM: - The dirty page rate is calculated via the KVM dirty ring mechanism, - which tells QEMU how many dirty pages a virtual CPU has had since the - last KVM_EXIT_DIRTY_RING_FULL exception; The dirty page rate upper - limit is specified by caller, therefore fetch it directly. - - - CALCULATE (2) - - Calculate a suitable sleep period for each virtual CPU, which will be - used to determine the penalty for the target virtual CPU. The - computation must be done carefully in order to reduce the dirty page - rate progressively down to the upper limit without oscillation. To - achieve this, two strategies are provided: the first is to add or - subtract sleep time based on the ratio of the current dirty page rate - to the limit, which is used when the current dirty page rate is far - from the limit; the second is to add or subtract a fixed time when - the current dirty page rate is close to the limit. - - - SET PENALTY (3) - - Set the sleep time for each virtual CPU that should be penalized based - on the results of the calculation supplied by step CALCULATE (2). - -After completing the three above stages, the throttle thread loops back -to step PREPARE (1) until the dirty limit is reached. - -On the other hand, each virtual CPU thread reads the sleep duration and -sleeps in the path of the KVM_EXIT_DIRTY_RING_FULL exception handler, that -is ACCEPT PENALTY (4). Virtual CPUs tied with writing processes will -obviously exit to the path and get penalized, whereas virtual CPUs involved -with read processes will not. - -In summary, thanks to the KVM dirty ring technology, the dirty limit -algorithm will restrict virtual CPUs as needed to keep their dirty page -rate inside the limit. This leads to more steady reading performance during -live migration and can aid in improving large guest responsiveness. - -Postcopy -======== - -'Postcopy' migration is a way to deal with migrations that refuse to converge -(or take too long to converge) its plus side is that there is an upper bound on -the amount of migration traffic and time it takes, the down side is that during -the postcopy phase, a failure of *either* side causes the guest to be lost. - -In postcopy the destination CPUs are started before all the memory has been -transferred, and accesses to pages that are yet to be transferred cause -a fault that's translated by QEMU into a request to the source QEMU. - -Postcopy can be combined with precopy (i.e. normal migration) so that if precopy -doesn't finish in a given time the switch is made to postcopy. - -Enabling postcopy ------------------ - -To enable postcopy, issue this command on the monitor (both source and -destination) prior to the start of migration: - -``migrate_set_capability postcopy-ram on`` - -The normal commands are then used to start a migration, which is still -started in precopy mode. Issuing: - -``migrate_start_postcopy`` - -will now cause the transition from precopy to postcopy. -It can be issued immediately after migration is started or any -time later on. Issuing it after the end of a migration is harmless. - -Blocktime is a postcopy live migration metric, intended to show how -long the vCPU was in state of interruptible sleep due to pagefault. -That metric is calculated both for all vCPUs as overlapped value, and -separately for each vCPU. These values are calculated on destination -side. To enable postcopy blocktime calculation, enter following -command on destination monitor: - -``migrate_set_capability postcopy-blocktime on`` - -Postcopy blocktime can be retrieved by query-migrate qmp command. -postcopy-blocktime value of qmp command will show overlapped blocking -time for all vCPU, postcopy-vcpu-blocktime will show list of blocking -time per vCPU. - -.. note:: - During the postcopy phase, the bandwidth limits set using - ``migrate_set_parameter`` is ignored (to avoid delaying requested pages that - the destination is waiting for). - -Postcopy device transfer ------------------------- - -Loading of device data may cause the device emulation to access guest RAM -that may trigger faults that have to be resolved by the source, as such -the migration stream has to be able to respond with page data *during* the -device load, and hence the device data has to be read from the stream completely -before the device load begins to free the stream up. This is achieved by -'packaging' the device data into a blob that's read in one go. - -Source behaviour ----------------- - -Until postcopy is entered the migration stream is identical to normal -precopy, except for the addition of a 'postcopy advise' command at -the beginning, to tell the destination that postcopy might happen. -When postcopy starts the source sends the page discard data and then -forms the 'package' containing: - - - Command: 'postcopy listen' - - The device state - - A series of sections, identical to the precopy streams device state stream - containing everything except postcopiable devices (i.e. RAM) - - Command: 'postcopy run' - -The 'package' is sent as the data part of a Command: ``CMD_PACKAGED``, and the -contents are formatted in the same way as the main migration stream. - -During postcopy the source scans the list of dirty pages and sends them -to the destination without being requested (in much the same way as precopy), -however when a page request is received from the destination, the dirty page -scanning restarts from the requested location. This causes requested pages -to be sent quickly, and also causes pages directly after the requested page -to be sent quickly in the hope that those pages are likely to be used -by the destination soon. - -Destination behaviour ---------------------- - -Initially the destination looks the same as precopy, with a single thread -reading the migration stream; the 'postcopy advise' and 'discard' commands -are processed to change the way RAM is managed, but don't affect the stream -processing. - -:: - - ------------------------------------------------------------------------------ - 1 2 3 4 5 6 7 - main -----DISCARD-CMD_PACKAGED ( LISTEN DEVICE DEVICE DEVICE RUN ) - thread | | - | (page request) - | \___ - v \ - listen thread: --- page -- page -- page -- page -- page -- - - a b c - ------------------------------------------------------------------------------ - -- On receipt of ``CMD_PACKAGED`` (1) - - All the data associated with the package - the ( ... ) section in the diagram - - is read into memory, and the main thread recurses into qemu_loadvm_state_main - to process the contents of the package (2) which contains commands (3,6) and - devices (4...) - -- On receipt of 'postcopy listen' - 3 -(i.e. the 1st command in the package) - - a new thread (a) is started that takes over servicing the migration stream, - while the main thread carries on loading the package. It loads normal - background page data (b) but if during a device load a fault happens (5) - the returned page (c) is loaded by the listen thread allowing the main - threads device load to carry on. - -- The last thing in the ``CMD_PACKAGED`` is a 'RUN' command (6) - - letting the destination CPUs start running. At the end of the - ``CMD_PACKAGED`` (7) the main thread returns to normal running behaviour and - is no longer used by migration, while the listen thread carries on servicing - page data until the end of migration. - -Postcopy Recovery ------------------ - -Comparing to precopy, postcopy is special on error handlings. When any -error happens (in this case, mostly network errors), QEMU cannot easily -fail a migration because VM data resides in both source and destination -QEMU instances. On the other hand, when issue happens QEMU on both sides -will go into a paused state. It'll need a recovery phase to continue a -paused postcopy migration. - -The recovery phase normally contains a few steps: - - - When network issue occurs, both QEMU will go into PAUSED state - - - When the network is recovered (or a new network is provided), the admin - can setup the new channel for migration using QMP command - 'migrate-recover' on destination node, preparing for a resume. - - - On source host, the admin can continue the interrupted postcopy - migration using QMP command 'migrate' with resume=true flag set. - - - After the connection is re-established, QEMU will continue the postcopy - migration on both sides. - -During a paused postcopy migration, the VM can logically still continue -running, and it will not be impacted from any page access to pages that -were already migrated to destination VM before the interruption happens. -However, if any of the missing pages got accessed on destination VM, the VM -thread will be halted waiting for the page to be migrated, it means it can -be halted until the recovery is complete. - -The impact of accessing missing pages can be relevant to different -configurations of the guest. For example, when with async page fault -enabled, logically the guest can proactively schedule out the threads -accessing missing pages. - -Postcopy states ---------------- - -Postcopy moves through a series of states (see postcopy_state) from -ADVISE->DISCARD->LISTEN->RUNNING->END - - - Advise - - Set at the start of migration if postcopy is enabled, even - if it hasn't had the start command; here the destination - checks that its OS has the support needed for postcopy, and performs - setup to ensure the RAM mappings are suitable for later postcopy. - The destination will fail early in migration at this point if the - required OS support is not present. - (Triggered by reception of POSTCOPY_ADVISE command) - - - Discard - - Entered on receipt of the first 'discard' command; prior to - the first Discard being performed, hugepages are switched off - (using madvise) to ensure that no new huge pages are created - during the postcopy phase, and to cause any huge pages that - have discards on them to be broken. - - - Listen - - The first command in the package, POSTCOPY_LISTEN, switches - the destination state to Listen, and starts a new thread - (the 'listen thread') which takes over the job of receiving - pages off the migration stream, while the main thread carries - on processing the blob. With this thread able to process page - reception, the destination now 'sensitises' the RAM to detect - any access to missing pages (on Linux using the 'userfault' - system). - - - Running - - POSTCOPY_RUN causes the destination to synchronise all - state and start the CPUs and IO devices running. The main - thread now finishes processing the migration package and - now carries on as it would for normal precopy migration - (although it can't do the cleanup it would do as it - finishes a normal migration). - - - Paused - - Postcopy can run into a paused state (normally on both sides when - happens), where all threads will be temporarily halted mostly due to - network errors. When reaching paused state, migration will make sure - the qemu binary on both sides maintain the data without corrupting - the VM. To continue the migration, the admin needs to fix the - migration channel using the QMP command 'migrate-recover' on the - destination node, then resume the migration using QMP command 'migrate' - again on source node, with resume=true flag set. - - - End - - The listen thread can now quit, and perform the cleanup of migration - state, the migration is now complete. - -Source side page map --------------------- - -The 'migration bitmap' in postcopy is basically the same as in the precopy, -where each of the bit to indicate that page is 'dirty' - i.e. needs -sending. During the precopy phase this is updated as the CPU dirties -pages, however during postcopy the CPUs are stopped and nothing should -dirty anything any more. Instead, dirty bits are cleared when the relevant -pages are sent during postcopy. - -Postcopy with hugepages ------------------------ - -Postcopy now works with hugetlbfs backed memory: - - a) The linux kernel on the destination must support userfault on hugepages. - b) The huge-page configuration on the source and destination VMs must be - identical; i.e. RAMBlocks on both sides must use the same page size. - c) Note that ``-mem-path /dev/hugepages`` will fall back to allocating normal - RAM if it doesn't have enough hugepages, triggering (b) to fail. - Using ``-mem-prealloc`` enforces the allocation using hugepages. - d) Care should be taken with the size of hugepage used; postcopy with 2MB - hugepages works well, however 1GB hugepages are likely to be problematic - since it takes ~1 second to transfer a 1GB hugepage across a 10Gbps link, - and until the full page is transferred the destination thread is blocked. - -Postcopy with shared memory ---------------------------- - -Postcopy migration with shared memory needs explicit support from the other -processes that share memory and from QEMU. There are restrictions on the type of -memory that userfault can support shared. - -The Linux kernel userfault support works on ``/dev/shm`` memory and on ``hugetlbfs`` -(although the kernel doesn't provide an equivalent to ``madvise(MADV_DONTNEED)`` -for hugetlbfs which may be a problem in some configurations). - -The vhost-user code in QEMU supports clients that have Postcopy support, -and the ``vhost-user-bridge`` (in ``tests/``) and the DPDK package have changes -to support postcopy. - -The client needs to open a userfaultfd and register the areas -of memory that it maps with userfault. The client must then pass the -userfaultfd back to QEMU together with a mapping table that allows -fault addresses in the clients address space to be converted back to -RAMBlock/offsets. The client's userfaultfd is added to the postcopy -fault-thread and page requests are made on behalf of the client by QEMU. -QEMU performs 'wake' operations on the client's userfaultfd to allow it -to continue after a page has arrived. - -.. note:: - There are two future improvements that would be nice: - a) Some way to make QEMU ignorant of the addresses in the clients - address space - b) Avoiding the need for QEMU to perform ufd-wake calls after the - pages have arrived - -Retro-fitting postcopy to existing clients is possible: - a) A mechanism is needed for the registration with userfault as above, - and the registration needs to be coordinated with the phases of - postcopy. In vhost-user extra messages are added to the existing - control channel. - b) Any thread that can block due to guest memory accesses must be - identified and the implication understood; for example if the - guest memory access is made while holding a lock then all other - threads waiting for that lock will also be blocked. - -Postcopy Preemption Mode ------------------------- - -Postcopy preempt is a new capability introduced in 8.0 QEMU release, it -allows urgent pages (those got page fault requested from destination QEMU -explicitly) to be sent in a separate preempt channel, rather than queued in -the background migration channel. Anyone who cares about latencies of page -faults during a postcopy migration should enable this feature. By default, -it's not enabled. - -Firmware -======== - -Migration migrates the copies of RAM and ROM, and thus when running -on the destination it includes the firmware from the source. Even after -resetting a VM, the old firmware is used. Only once QEMU has been restarted -is the new firmware in use. - -- Changes in firmware size can cause changes in the required RAMBlock size - to hold the firmware and thus migration can fail. In practice it's best - to pad firmware images to convenient powers of 2 with plenty of space - for growth. - -- Care should be taken with device emulation code so that newer - emulation code can work with older firmware to allow forward migration. - -- Care should be taken with newer firmware so that backward migration - to older systems with older device emulation code will work. - -In some cases it may be best to tie specific firmware versions to specific -versioned machine types to cut down on the combinations that will need -support. This is also useful when newer versions of firmware outgrow -the padding. - - -Backwards compatibility -======================= - -How backwards compatibility works ---------------------------------- - -When we do migration, we have two QEMU processes: the source and the -target. There are two cases, they are the same version or they are -different versions. The easy case is when they are the same version. -The difficult one is when they are different versions. - -There are two things that are different, but they have very similar -names and sometimes get confused: - -- QEMU version -- machine type version - -Let's start with a practical example, we start with: - -- qemu-system-x86_64 (v5.2), from now on qemu-5.2. -- qemu-system-x86_64 (v5.1), from now on qemu-5.1. - -Related to this are the "latest" machine types defined on each of -them: - -- pc-q35-5.2 (newer one in qemu-5.2) from now on pc-5.2 -- pc-q35-5.1 (newer one in qemu-5.1) from now on pc-5.1 - -First of all, migration is only supposed to work if you use the same -machine type in both source and destination. The QEMU hardware -configuration needs to be the same also on source and destination. -Most aspects of the backend configuration can be changed at will, -except for a few cases where the backend features influence frontend -device feature exposure. But that is not relevant for this section. - -I am going to list the number of combinations that we can have. Let's -start with the trivial ones, QEMU is the same on source and -destination: - -1 - qemu-5.2 -M pc-5.2 -> migrates to -> qemu-5.2 -M pc-5.2 - - This is the latest QEMU with the latest machine type. - This have to work, and if it doesn't work it is a bug. - -2 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 - - Exactly the same case than the previous one, but for 5.1. - Nothing to see here either. - -This are the easiest ones, we will not talk more about them in this -section. - -Now we start with the more interesting cases. Consider the case where -we have the same QEMU version in both sides (qemu-5.2) but we are using -the latest machine type for that version (pc-5.2) but one of an older -QEMU version, in this case pc-5.1. - -3 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 - - It needs to use the definition of pc-5.1 and the devices as they - were configured on 5.1, but this should be easy in the sense that - both sides are the same QEMU and both sides have exactly the same - idea of what the pc-5.1 machine is. - -4 - qemu-5.1 -M pc-5.2 -> migrates to -> qemu-5.1 -M pc-5.2 - - This combination is not possible as the qemu-5.1 doesn't understand - pc-5.2 machine type. So nothing to worry here. - -Now it comes the interesting ones, when both QEMU processes are -different. Notice also that the machine type needs to be pc-5.1, -because we have the limitation than qemu-5.1 doesn't know pc-5.2. So -the possible cases are: - -5 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 - - This migration is known as newer to older. We need to make sure - when we are developing 5.2 we need to take care about not to break - migration to qemu-5.1. Notice that we can't make updates to - qemu-5.1 to understand whatever qemu-5.2 decides to change, so it is - in qemu-5.2 side to make the relevant changes. - -6 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 - - This migration is known as older to newer. We need to make sure - than we are able to receive migrations from qemu-5.1. The problem is - similar to the previous one. - -If qemu-5.1 and qemu-5.2 were the same, there will not be any -compatibility problems. But the reason that we create qemu-5.2 is to -get new features, devices, defaults, etc. - -If we get a device that has a new feature, or change a default value, -we have a problem when we try to migrate between different QEMU -versions. - -So we need a way to tell qemu-5.2 that when we are using machine type -pc-5.1, it needs to **not** use the feature, to be able to migrate to -real qemu-5.1. - -And the equivalent part when migrating from qemu-5.1 to qemu-5.2. -qemu-5.2 has to expect that it is not going to get data for the new -feature, because qemu-5.1 doesn't know about it. - -How do we tell QEMU about these device feature changes? In -hw/core/machine.c:hw_compat_X_Y arrays. - -If we change a default value, we need to put back the old value on -that array. And the device, during initialization needs to look at -that array to see what value it needs to get for that feature. And -what are we going to put in that array, the value of a property. - -To create a property for a device, we need to use one of the -DEFINE_PROP_*() macros. See include/hw/qdev-properties.h to find the -macros that exist. With it, we set the default value for that -property, and that is what it is going to get in the latest released -version. But if we want a different value for a previous version, we -can change that in the hw_compat_X_Y arrays. - -hw_compat_X_Y is an array of registers that have the format: - -- name_device -- name_property -- value - -Let's see a practical example. - -In qemu-5.2 virtio-blk-device got multi queue support. This is a -change that is not backward compatible. In qemu-5.1 it has one -queue. In qemu-5.2 it has the same number of queues as the number of -cpus in the system. - -When we are doing migration, if we migrate from a device that has 4 -queues to a device that have only one queue, we don't know where to -put the extra information for the other 3 queues, and we fail -migration. - -Similar problem when we migrate from qemu-5.1 that has only one queue -to qemu-5.2, we only sent information for one queue, but destination -has 4, and we have 3 queues that are not properly initialized and -anything can happen. - -So, how can we address this problem. Easy, just convince qemu-5.2 -that when it is running pc-5.1, it needs to set the number of queues -for virtio-blk-devices to 1. - -That way we fix the cases 5 and 6. - -5 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 - - qemu-5.2 -M pc-5.1 sets number of queues to be 1. - qemu-5.1 -M pc-5.1 expects number of queues to be 1. - - correct. migration works. - -6 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 - - qemu-5.1 -M pc-5.1 sets number of queues to be 1. - qemu-5.2 -M pc-5.1 expects number of queues to be 1. - - correct. migration works. - -And now the other interesting case, case 3. In this case we have: - -3 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 - - Here we have the same QEMU in both sides. So it doesn't matter a - lot if we have set the number of queues to 1 or not, because - they are the same. - - WRONG! - - Think what happens if we do one of this double migrations: - - A -> migrates -> B -> migrates -> C - - where: - - A: qemu-5.1 -M pc-5.1 - B: qemu-5.2 -M pc-5.1 - C: qemu-5.2 -M pc-5.1 - - migration A -> B is case 6, so number of queues needs to be 1. - - migration B -> C is case 3, so we don't care. But actually we - care because we haven't started the guest in qemu-5.2, it came - migrated from qemu-5.1. So to be in the safe place, we need to - always use number of queues 1 when we are using pc-5.1. - -Now, how was this done in reality? The following commit shows how it -was done:: - - commit 9445e1e15e66c19e42bea942ba810db28052cd05 - Author: Stefan Hajnoczi - Date: Tue Aug 18 15:33:47 2020 +0100 - - virtio-blk-pci: default num_queues to -smp N - -The relevant parts for migration are:: - - @@ -1281,7 +1284,8 @@ static Property virtio_blk_properties[] = { - #endif - DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, - true), - - DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1), - + DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, - + VIRTIO_BLK_AUTO_NUM_QUEUES), - DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256), - -It changes the default value of num_queues. But it fishes it for old -machine types to have the right value:: - - @@ -31,6 +31,7 @@ - GlobalProperty hw_compat_5_1[] = { - ... - + { "virtio-blk-device", "num-queues", "1"}, - ... - }; - -A device with different features on both sides ----------------------------------------------- - -Let's assume that we are using the same QEMU binary on both sides, -just to make the things easier. But we have a device that has -different features on both sides of the migration. That can be -because the devices are different, because the kernel driver of both -devices have different features, whatever. - -How can we get this to work with migration. The way to do that is -"theoretically" easy. You have to get the features that the device -has in the source of the migration. The features that the device has -on the target of the migration, you get the intersection of the -features of both sides, and that is the way that you should launch -QEMU. - -Notice that this is not completely related to QEMU. The most -important thing here is that this should be handled by the managing -application that launches QEMU. If QEMU is configured correctly, the -migration will succeed. - -That said, actually doing it is complicated. Almost all devices are -bad at being able to be launched with only some features enabled. -With one big exception: cpus. - -You can read the documentation for QEMU x86 cpu models here: - -https://qemu-project.gitlab.io/qemu/system/qemu-cpu-models.html - -See when they talk about migration they recommend that one chooses the -newest cpu model that is supported for all cpus. - -Let's say that we have: - -Host A: - -Device X has the feature Y - -Host B: - -Device X has not the feature Y - -If we try to migrate without any care from host A to host B, it will -fail because when migration tries to load the feature Y on -destination, it will find that the hardware is not there. - -Doing this would be the equivalent of doing with cpus: - -Host A: - -$ qemu-system-x86_64 -cpu host - -Host B: - -$ qemu-system-x86_64 -cpu host - -When both hosts have different cpu features this is guaranteed to -fail. Especially if Host B has less features than host A. If host A -has less features than host B, sometimes it works. Important word of -last sentence is "sometimes". - -So, forgetting about cpu models and continuing with the -cpu host -example, let's see that the differences of the cpus is that Host A and -B have the following features: - -Features: 'pcid' 'stibp' 'taa-no' -Host A: X X -Host B: X - -And we want to migrate between them, the way configure both QEMU cpu -will be: - -Host A: - -$ qemu-system-x86_64 -cpu host,pcid=off,stibp=off - -Host B: - -$ qemu-system-x86_64 -cpu host,taa-no=off - -And you would be able to migrate between them. It is responsibility -of the management application or of the user to make sure that the -configuration is correct. QEMU doesn't know how to look at this kind -of features in general. - -Notice that we don't recommend to use -cpu host for migration. It is -used in this example because it makes the example simpler. - -Other devices have worse control about individual features. If they -want to be able to migrate between hosts that show different features, -the device needs a way to configure which ones it is going to use. - -In this section we have considered that we are using the same QEMU -binary in both sides of the migration. If we use different QEMU -versions process, then we need to have into account all other -differences and the examples become even more complicated. - -How to mitigate when we have a backward compatibility error ------------------------------------------------------------ - -We broke migration for old machine types continuously during -development. But as soon as we find that there is a problem, we fix -it. The problem is what happens when we detect after we have done a -release that something has gone wrong. - -Let see how it worked with one example. - -After the release of qemu-8.0 we found a problem when doing migration -of the machine type pc-7.2. - -- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 - - This migration works - -- $ qemu-8.0 -M pc-7.2 -> qemu-8.0 -M pc-7.2 - - This migration works - -- $ qemu-8.0 -M pc-7.2 -> qemu-7.2 -M pc-7.2 - - This migration fails - -- $ qemu-7.2 -M pc-7.2 -> qemu-8.0 -M pc-7.2 - - This migration fails - -So clearly something fails when migration between qemu-7.2 and -qemu-8.0 with machine type pc-7.2. The error messages, and git bisect -pointed to this commit. - -In qemu-8.0 we got this commit:: - - commit 010746ae1db7f52700cb2e2c46eb94f299cfa0d2 - Author: Jonathan Cameron - Date: Thu Mar 2 13:37:02 2023 +0000 - - hw/pci/aer: Implement PCI_ERR_UNCOR_MASK register - - -The relevant bits of the commit for our example are this ones:: - - --- a/hw/pci/pcie_aer.c - +++ b/hw/pci/pcie_aer.c - @@ -112,6 +112,10 @@ int pcie_aer_init(PCIDevice *dev, - - pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, - PCI_ERR_UNC_SUPPORTED); - + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, - + PCI_ERR_UNC_MASK_DEFAULT); - + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, - + PCI_ERR_UNC_SUPPORTED); - - pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, - PCI_ERR_UNC_SEVERITY_DEFAULT); - -The patch changes how we configure PCI space for AER. But QEMU fails -when the PCI space configuration is different between source and -destination. - -The following commit shows how this got fixed:: - - commit 5ed3dabe57dd9f4c007404345e5f5bf0e347317f - Author: Leonardo Bras - Date: Tue May 2 21:27:02 2023 -0300 - - hw/pci: Disable PCI_ERR_UNCOR_MASK register for machine type < 8.0 - - [...] - -The relevant parts of the fix in QEMU are as follow: - -First, we create a new property for the device to be able to configure -the old behaviour or the new behaviour:: - - diff --git a/hw/pci/pci.c b/hw/pci/pci.c - index 8a87ccc8b0..5153ad63d6 100644 - --- a/hw/pci/pci.c - +++ b/hw/pci/pci.c - @@ -79,6 +79,8 @@ static Property pci_props[] = { - DEFINE_PROP_STRING("failover_pair_id", PCIDevice, - failover_pair_id), - DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), - + DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, - + QEMU_PCIE_ERR_UNC_MASK_BITNR, true), - DEFINE_PROP_END_OF_LIST() - }; - -Notice that we enable the feature for new machine types. - -Now we see how the fix is done. This is going to depend on what kind -of breakage happens, but in this case it is quite simple:: - - diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c - index 103667c368..374d593ead 100644 - --- a/hw/pci/pcie_aer.c - +++ b/hw/pci/pcie_aer.c - @@ -112,10 +112,13 @@ int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, - uint16_t offset, - - pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, - PCI_ERR_UNC_SUPPORTED); - - pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, - - PCI_ERR_UNC_MASK_DEFAULT); - - pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, - - PCI_ERR_UNC_SUPPORTED); - + - + if (dev->cap_present & QEMU_PCIE_ERR_UNC_MASK) { - + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, - + PCI_ERR_UNC_MASK_DEFAULT); - + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, - + PCI_ERR_UNC_SUPPORTED); - + } - - pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, - PCI_ERR_UNC_SEVERITY_DEFAULT); - -I.e. If the property bit is enabled, we configure it as we did for -qemu-8.0. If the property bit is not set, we configure it as it was in 7.2. - -And now, everything that is missing is disabling the feature for old -machine types:: - - diff --git a/hw/core/machine.c b/hw/core/machine.c - index 47a34841a5..07f763eb2e 100644 - --- a/hw/core/machine.c - +++ b/hw/core/machine.c - @@ -48,6 +48,7 @@ GlobalProperty hw_compat_7_2[] = { - { "e1000e", "migrate-timadj", "off" }, - { "virtio-mem", "x-early-migration", "false" }, - { "migration", "x-preempt-pre-7-2", "true" }, - + { TYPE_PCI_DEVICE, "x-pcie-err-unc-mask", "off" }, - }; - const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2); - -And now, when qemu-8.0.1 is released with this fix, all combinations -are going to work as supposed. - -- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 (works) -- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 (works) -- $ qemu-8.0.1 -M pc-7.2 -> qemu-7.2 -M pc-7.2 (works) -- $ qemu-7.2 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 (works) - -So the normality has been restored and everything is ok, no? - -Not really, now our matrix is much bigger. We started with the easy -cases, migration from the same version to the same version always -works: - -- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 -- $ qemu-8.0 -M pc-7.2 -> qemu-8.0 -M pc-7.2 -- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 - -Now the interesting ones. When the QEMU processes versions are -different. For the 1st set, their fail and we can do nothing, both -versions are released and we can't change anything. - -- $ qemu-7.2 -M pc-7.2 -> qemu-8.0 -M pc-7.2 -- $ qemu-8.0 -M pc-7.2 -> qemu-7.2 -M pc-7.2 - -This two are the ones that work. The whole point of making the -change in qemu-8.0.1 release was to fix this issue: - -- $ qemu-7.2 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 -- $ qemu-8.0.1 -M pc-7.2 -> qemu-7.2 -M pc-7.2 - -But now we found that qemu-8.0 neither can migrate to qemu-7.2 not -qemu-8.0.1. - -- $ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 -- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0 -M pc-7.2 - -So, if we start a pc-7.2 machine in qemu-8.0 we can't migrate it to -anything except to qemu-8.0. - -Can we do better? - -Yeap. If we know that we are going to do this migration: - -- $ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 - -We can launch the appropriate devices with:: - - --device...,x-pci-e-err-unc-mask=on - -And now we can receive a migration from 8.0. And from now on, we can -do that migration to new machine types if we remember to enable that -property for pc-7.2. Notice that we need to remember, it is not -enough to know that the source of the migration is qemu-8.0. Think of -this example: - -$ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 -> qemu-8.2 -M pc-7.2 - -In the second migration, the source is not qemu-8.0, but we still have -that "problem" and have that property enabled. Notice that we need to -continue having this mark/property until we have this machine -rebooted. But it is not a normal reboot (that don't reload QEMU) we -need the machine to poweroff/poweron on a fixed QEMU. And from now -on we can use the proper real machine. diff --git a/docs/devel/migration/CPR.rst b/docs/devel/migration/CPR.rst new file mode 100644 index 00000000000..63c36470cf6 --- /dev/null +++ b/docs/devel/migration/CPR.rst @@ -0,0 +1,147 @@ +CheckPoint and Restart (CPR) +============================ + +CPR is the umbrella name for a set of migration modes in which the +VM is migrated to a new QEMU instance on the same host. It is +intended for use when the goal is to update host software components +that run the VM, such as QEMU or even the host kernel. At this time, +cpr-reboot is the only available mode. + +Because QEMU is restarted on the same host, with access to the same +local devices, CPR is allowed in certain cases where normal migration +would be blocked. However, the user must not modify the contents of +guest block devices between quitting old QEMU and starting new QEMU. + +CPR unconditionally stops VM execution before memory is saved, and +thus does not depend on any form of dirty page tracking. + +cpr-reboot mode +--------------- + +In this mode, QEMU stops the VM, and writes VM state to the migration +URI, which will typically be a file. After quitting QEMU, the user +resumes by running QEMU with the ``-incoming`` option. Because the +old and new QEMU instances are not active concurrently, the URI cannot +be a type that streams data from one instance to the other. + +Guest RAM can be saved in place if backed by shared memory, or can be +copied to a file. The former is more efficient and is therefore +preferred. + +After state and memory are saved, the user may update userland host +software before restarting QEMU and resuming the VM. Further, if +the RAM is backed by persistent shared memory, such as a DAX device, +then the user may reboot to a new host kernel before restarting QEMU. + +This mode supports VFIO devices provided the user first puts the +guest in the suspended runstate, such as by issuing the +``guest-suspend-ram`` command to the QEMU guest agent. The agent +must be pre-installed in the guest, and the guest must support +suspend to RAM. Beware that suspension can take a few seconds, so +the user should poll to see the suspended state before proceeding +with the CPR operation. + +Usage +^^^^^ + +It is recommended that guest RAM be backed with some type of shared +memory, such as ``memory-backend-file,share=on``, and that the +``x-ignore-shared`` capability be set. This combination allows memory +to be saved in place. Otherwise, after QEMU stops the VM, all guest +RAM is copied to the migration URI. + +Outgoing: + * Set the migration mode parameter to ``cpr-reboot``. + * Set the ``x-ignore-shared`` capability if desired. + * Issue the ``migrate`` command. It is recommended the the URI be a + ``file`` type, but one can use other types such as ``exec``, + provided the command captures all the data from the outgoing side, + and provides all the data to the incoming side. + * Quit when QEMU reaches the postmigrate state. + +Incoming: + * Start QEMU with the ``-incoming defer`` option. + * Set the migration mode parameter to ``cpr-reboot``. + * Set the ``x-ignore-shared`` capability if desired. + * Issue the ``migrate-incoming`` command. + * If the VM was running when the outgoing ``migrate`` command was + issued, then QEMU automatically resumes VM execution. + +Example 1 +^^^^^^^^^ +:: + + # qemu-kvm -monitor stdio + -object memory-backend-file,id=ram0,size=4G,mem-path=/dev/dax0.0,align=2M,share=on -m 4G + ... + + (qemu) info status + VM status: running + (qemu) migrate_set_parameter mode cpr-reboot + (qemu) migrate_set_capability x-ignore-shared on + (qemu) migrate -d file:vm.state + (qemu) info status + VM status: paused (postmigrate) + (qemu) quit + + ### optionally update kernel and reboot + # systemctl kexec + kexec_core: Starting new kernel + ... + + # qemu-kvm ... -incoming defer + (qemu) info status + VM status: paused (inmigrate) + (qemu) migrate_set_parameter mode cpr-reboot + (qemu) migrate_set_capability x-ignore-shared on + (qemu) migrate_incoming file:vm.state + (qemu) info status + VM status: running + +Example 2: VFIO +^^^^^^^^^^^^^^^ +:: + + # qemu-kvm -monitor stdio + -object memory-backend-file,id=ram0,size=4G,mem-path=/dev/dax0.0,align=2M,share=on -m 4G + -device vfio-pci, ... + -chardev socket,id=qga0,path=qga.sock,server=on,wait=off + -device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0 + ... + + (qemu) info status + VM status: running + + # echo '{"execute":"guest-suspend-ram"}' | ncat --send-only -U qga.sock + + (qemu) info status + VM status: paused (suspended) + (qemu) migrate_set_parameter mode cpr-reboot + (qemu) migrate_set_capability x-ignore-shared on + (qemu) migrate -d file:vm.state + (qemu) info status + VM status: paused (postmigrate) + (qemu) quit + + ### optionally update kernel and reboot + # systemctl kexec + kexec_core: Starting new kernel + ... + + # qemu-kvm ... -incoming defer + (qemu) info status + VM status: paused (inmigrate) + (qemu) migrate_set_parameter mode cpr-reboot + (qemu) migrate_set_capability x-ignore-shared on + (qemu) migrate_incoming file:vm.state + (qemu) info status + VM status: paused (suspended) + (qemu) system_wakeup + (qemu) info status + VM status: running + +Caveats +^^^^^^^ + +cpr-reboot mode may not be used with postcopy, background-snapshot, +or COLO. diff --git a/docs/devel/migration/best-practices.rst b/docs/devel/migration/best-practices.rst new file mode 100644 index 00000000000..d7c34a30149 --- /dev/null +++ b/docs/devel/migration/best-practices.rst @@ -0,0 +1,48 @@ +============== +Best practices +============== + +Debugging +========= + +The migration stream can be analyzed thanks to ``scripts/analyze-migration.py``. + +Example usage: + +.. code-block:: shell + + $ qemu-system-x86_64 -display none -monitor stdio + (qemu) migrate "exec:cat > mig" + (qemu) q + $ ./scripts/analyze-migration.py -f mig + { + "ram (3)": { + "section sizes": { + "pc.ram": "0x0000000008000000", + ... + +See also ``analyze-migration.py -h`` help for more options. + +Firmware +======== + +Migration migrates the copies of RAM and ROM, and thus when running +on the destination it includes the firmware from the source. Even after +resetting a VM, the old firmware is used. Only once QEMU has been restarted +is the new firmware in use. + +- Changes in firmware size can cause changes in the required RAMBlock size + to hold the firmware and thus migration can fail. In practice it's best + to pad firmware images to convenient powers of 2 with plenty of space + for growth. + +- Care should be taken with device emulation code so that newer + emulation code can work with older firmware to allow forward migration. + +- Care should be taken with newer firmware so that backward migration + to older systems with older device emulation code will work. + +In some cases it may be best to tie specific firmware versions to specific +versioned machine types to cut down on the combinations that will need +support. This is also useful when newer versions of firmware outgrow +the padding. diff --git a/docs/devel/migration/compatibility.rst b/docs/devel/migration/compatibility.rst new file mode 100644 index 00000000000..5a5417ef069 --- /dev/null +++ b/docs/devel/migration/compatibility.rst @@ -0,0 +1,517 @@ +Backwards compatibility +======================= + +How backwards compatibility works +--------------------------------- + +When we do migration, we have two QEMU processes: the source and the +target. There are two cases, they are the same version or they are +different versions. The easy case is when they are the same version. +The difficult one is when they are different versions. + +There are two things that are different, but they have very similar +names and sometimes get confused: + +- QEMU version +- machine type version + +Let's start with a practical example, we start with: + +- qemu-system-x86_64 (v5.2), from now on qemu-5.2. +- qemu-system-x86_64 (v5.1), from now on qemu-5.1. + +Related to this are the "latest" machine types defined on each of +them: + +- pc-q35-5.2 (newer one in qemu-5.2) from now on pc-5.2 +- pc-q35-5.1 (newer one in qemu-5.1) from now on pc-5.1 + +First of all, migration is only supposed to work if you use the same +machine type in both source and destination. The QEMU hardware +configuration needs to be the same also on source and destination. +Most aspects of the backend configuration can be changed at will, +except for a few cases where the backend features influence frontend +device feature exposure. But that is not relevant for this section. + +I am going to list the number of combinations that we can have. Let's +start with the trivial ones, QEMU is the same on source and +destination: + +1 - qemu-5.2 -M pc-5.2 -> migrates to -> qemu-5.2 -M pc-5.2 + + This is the latest QEMU with the latest machine type. + This have to work, and if it doesn't work it is a bug. + +2 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 + + Exactly the same case than the previous one, but for 5.1. + Nothing to see here either. + +This are the easiest ones, we will not talk more about them in this +section. + +Now we start with the more interesting cases. Consider the case where +we have the same QEMU version in both sides (qemu-5.2) but we are using +the latest machine type for that version (pc-5.2) but one of an older +QEMU version, in this case pc-5.1. + +3 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 + + It needs to use the definition of pc-5.1 and the devices as they + were configured on 5.1, but this should be easy in the sense that + both sides are the same QEMU and both sides have exactly the same + idea of what the pc-5.1 machine is. + +4 - qemu-5.1 -M pc-5.2 -> migrates to -> qemu-5.1 -M pc-5.2 + + This combination is not possible as the qemu-5.1 doesn't understand + pc-5.2 machine type. So nothing to worry here. + +Now it comes the interesting ones, when both QEMU processes are +different. Notice also that the machine type needs to be pc-5.1, +because we have the limitation than qemu-5.1 doesn't know pc-5.2. So +the possible cases are: + +5 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 + + This migration is known as newer to older. We need to make sure + when we are developing 5.2 we need to take care about not to break + migration to qemu-5.1. Notice that we can't make updates to + qemu-5.1 to understand whatever qemu-5.2 decides to change, so it is + in qemu-5.2 side to make the relevant changes. + +6 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 + + This migration is known as older to newer. We need to make sure + than we are able to receive migrations from qemu-5.1. The problem is + similar to the previous one. + +If qemu-5.1 and qemu-5.2 were the same, there will not be any +compatibility problems. But the reason that we create qemu-5.2 is to +get new features, devices, defaults, etc. + +If we get a device that has a new feature, or change a default value, +we have a problem when we try to migrate between different QEMU +versions. + +So we need a way to tell qemu-5.2 that when we are using machine type +pc-5.1, it needs to **not** use the feature, to be able to migrate to +real qemu-5.1. + +And the equivalent part when migrating from qemu-5.1 to qemu-5.2. +qemu-5.2 has to expect that it is not going to get data for the new +feature, because qemu-5.1 doesn't know about it. + +How do we tell QEMU about these device feature changes? In +hw/core/machine.c:hw_compat_X_Y arrays. + +If we change a default value, we need to put back the old value on +that array. And the device, during initialization needs to look at +that array to see what value it needs to get for that feature. And +what are we going to put in that array, the value of a property. + +To create a property for a device, we need to use one of the +DEFINE_PROP_*() macros. See include/hw/qdev-properties.h to find the +macros that exist. With it, we set the default value for that +property, and that is what it is going to get in the latest released +version. But if we want a different value for a previous version, we +can change that in the hw_compat_X_Y arrays. + +hw_compat_X_Y is an array of registers that have the format: + +- name_device +- name_property +- value + +Let's see a practical example. + +In qemu-5.2 virtio-blk-device got multi queue support. This is a +change that is not backward compatible. In qemu-5.1 it has one +queue. In qemu-5.2 it has the same number of queues as the number of +cpus in the system. + +When we are doing migration, if we migrate from a device that has 4 +queues to a device that have only one queue, we don't know where to +put the extra information for the other 3 queues, and we fail +migration. + +Similar problem when we migrate from qemu-5.1 that has only one queue +to qemu-5.2, we only sent information for one queue, but destination +has 4, and we have 3 queues that are not properly initialized and +anything can happen. + +So, how can we address this problem. Easy, just convince qemu-5.2 +that when it is running pc-5.1, it needs to set the number of queues +for virtio-blk-devices to 1. + +That way we fix the cases 5 and 6. + +5 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 + + qemu-5.2 -M pc-5.1 sets number of queues to be 1. + qemu-5.1 -M pc-5.1 expects number of queues to be 1. + + correct. migration works. + +6 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 + + qemu-5.1 -M pc-5.1 sets number of queues to be 1. + qemu-5.2 -M pc-5.1 expects number of queues to be 1. + + correct. migration works. + +And now the other interesting case, case 3. In this case we have: + +3 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 + + Here we have the same QEMU in both sides. So it doesn't matter a + lot if we have set the number of queues to 1 or not, because + they are the same. + + WRONG! + + Think what happens if we do one of this double migrations: + + A -> migrates -> B -> migrates -> C + + where: + + A: qemu-5.1 -M pc-5.1 + B: qemu-5.2 -M pc-5.1 + C: qemu-5.2 -M pc-5.1 + + migration A -> B is case 6, so number of queues needs to be 1. + + migration B -> C is case 3, so we don't care. But actually we + care because we haven't started the guest in qemu-5.2, it came + migrated from qemu-5.1. So to be in the safe place, we need to + always use number of queues 1 when we are using pc-5.1. + +Now, how was this done in reality? The following commit shows how it +was done:: + + commit 9445e1e15e66c19e42bea942ba810db28052cd05 + Author: Stefan Hajnoczi + Date: Tue Aug 18 15:33:47 2020 +0100 + + virtio-blk-pci: default num_queues to -smp N + +The relevant parts for migration are:: + + @@ -1281,7 +1284,8 @@ static Property virtio_blk_properties[] = { + #endif + DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, + true), + - DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1), + + DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, + + VIRTIO_BLK_AUTO_NUM_QUEUES), + DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256), + +It changes the default value of num_queues. But it fishes it for old +machine types to have the right value:: + + @@ -31,6 +31,7 @@ + GlobalProperty hw_compat_5_1[] = { + ... + + { "virtio-blk-device", "num-queues", "1"}, + ... + }; + +A device with different features on both sides +---------------------------------------------- + +Let's assume that we are using the same QEMU binary on both sides, +just to make the things easier. But we have a device that has +different features on both sides of the migration. That can be +because the devices are different, because the kernel driver of both +devices have different features, whatever. + +How can we get this to work with migration. The way to do that is +"theoretically" easy. You have to get the features that the device +has in the source of the migration. The features that the device has +on the target of the migration, you get the intersection of the +features of both sides, and that is the way that you should launch +QEMU. + +Notice that this is not completely related to QEMU. The most +important thing here is that this should be handled by the managing +application that launches QEMU. If QEMU is configured correctly, the +migration will succeed. + +That said, actually doing it is complicated. Almost all devices are +bad at being able to be launched with only some features enabled. +With one big exception: cpus. + +You can read the documentation for QEMU x86 cpu models here: + +https://qemu-project.gitlab.io/qemu/system/qemu-cpu-models.html + +See when they talk about migration they recommend that one chooses the +newest cpu model that is supported for all cpus. + +Let's say that we have: + +Host A: + +Device X has the feature Y + +Host B: + +Device X has not the feature Y + +If we try to migrate without any care from host A to host B, it will +fail because when migration tries to load the feature Y on +destination, it will find that the hardware is not there. + +Doing this would be the equivalent of doing with cpus: + +Host A: + +$ qemu-system-x86_64 -cpu host + +Host B: + +$ qemu-system-x86_64 -cpu host + +When both hosts have different cpu features this is guaranteed to +fail. Especially if Host B has less features than host A. If host A +has less features than host B, sometimes it works. Important word of +last sentence is "sometimes". + +So, forgetting about cpu models and continuing with the -cpu host +example, let's see that the differences of the cpus is that Host A and +B have the following features: + +Features: 'pcid' 'stibp' 'taa-no' +Host A: X X +Host B: X + +And we want to migrate between them, the way configure both QEMU cpu +will be: + +Host A: + +$ qemu-system-x86_64 -cpu host,pcid=off,stibp=off + +Host B: + +$ qemu-system-x86_64 -cpu host,taa-no=off + +And you would be able to migrate between them. It is responsibility +of the management application or of the user to make sure that the +configuration is correct. QEMU doesn't know how to look at this kind +of features in general. + +Notice that we don't recommend to use -cpu host for migration. It is +used in this example because it makes the example simpler. + +Other devices have worse control about individual features. If they +want to be able to migrate between hosts that show different features, +the device needs a way to configure which ones it is going to use. + +In this section we have considered that we are using the same QEMU +binary in both sides of the migration. If we use different QEMU +versions process, then we need to have into account all other +differences and the examples become even more complicated. + +How to mitigate when we have a backward compatibility error +----------------------------------------------------------- + +We broke migration for old machine types continuously during +development. But as soon as we find that there is a problem, we fix +it. The problem is what happens when we detect after we have done a +release that something has gone wrong. + +Let see how it worked with one example. + +After the release of qemu-8.0 we found a problem when doing migration +of the machine type pc-7.2. + +- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 + + This migration works + +- $ qemu-8.0 -M pc-7.2 -> qemu-8.0 -M pc-7.2 + + This migration works + +- $ qemu-8.0 -M pc-7.2 -> qemu-7.2 -M pc-7.2 + + This migration fails + +- $ qemu-7.2 -M pc-7.2 -> qemu-8.0 -M pc-7.2 + + This migration fails + +So clearly something fails when migration between qemu-7.2 and +qemu-8.0 with machine type pc-7.2. The error messages, and git bisect +pointed to this commit. + +In qemu-8.0 we got this commit:: + + commit 010746ae1db7f52700cb2e2c46eb94f299cfa0d2 + Author: Jonathan Cameron + Date: Thu Mar 2 13:37:02 2023 +0000 + + hw/pci/aer: Implement PCI_ERR_UNCOR_MASK register + + +The relevant bits of the commit for our example are this ones:: + + --- a/hw/pci/pcie_aer.c + +++ b/hw/pci/pcie_aer.c + @@ -112,6 +112,10 @@ int pcie_aer_init(PCIDevice *dev, + + pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, + PCI_ERR_UNC_SUPPORTED); + + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, + + PCI_ERR_UNC_MASK_DEFAULT); + + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, + + PCI_ERR_UNC_SUPPORTED); + + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, + PCI_ERR_UNC_SEVERITY_DEFAULT); + +The patch changes how we configure PCI space for AER. But QEMU fails +when the PCI space configuration is different between source and +destination. + +The following commit shows how this got fixed:: + + commit 5ed3dabe57dd9f4c007404345e5f5bf0e347317f + Author: Leonardo Bras + Date: Tue May 2 21:27:02 2023 -0300 + + hw/pci: Disable PCI_ERR_UNCOR_MASK register for machine type < 8.0 + + [...] + +The relevant parts of the fix in QEMU are as follow: + +First, we create a new property for the device to be able to configure +the old behaviour or the new behaviour:: + + diff --git a/hw/pci/pci.c b/hw/pci/pci.c + index 8a87ccc8b0..5153ad63d6 100644 + --- a/hw/pci/pci.c + +++ b/hw/pci/pci.c + @@ -79,6 +79,8 @@ static Property pci_props[] = { + DEFINE_PROP_STRING("failover_pair_id", PCIDevice, + failover_pair_id), + DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), + + DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, + + QEMU_PCIE_ERR_UNC_MASK_BITNR, true), + DEFINE_PROP_END_OF_LIST() + }; + +Notice that we enable the feature for new machine types. + +Now we see how the fix is done. This is going to depend on what kind +of breakage happens, but in this case it is quite simple:: + + diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c + index 103667c368..374d593ead 100644 + --- a/hw/pci/pcie_aer.c + +++ b/hw/pci/pcie_aer.c + @@ -112,10 +112,13 @@ int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, + uint16_t offset, + + pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, + PCI_ERR_UNC_SUPPORTED); + - pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, + - PCI_ERR_UNC_MASK_DEFAULT); + - pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, + - PCI_ERR_UNC_SUPPORTED); + + + + if (dev->cap_present & QEMU_PCIE_ERR_UNC_MASK) { + + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, + + PCI_ERR_UNC_MASK_DEFAULT); + + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, + + PCI_ERR_UNC_SUPPORTED); + + } + + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, + PCI_ERR_UNC_SEVERITY_DEFAULT); + +I.e. If the property bit is enabled, we configure it as we did for +qemu-8.0. If the property bit is not set, we configure it as it was in 7.2. + +And now, everything that is missing is disabling the feature for old +machine types:: + + diff --git a/hw/core/machine.c b/hw/core/machine.c + index 47a34841a5..07f763eb2e 100644 + --- a/hw/core/machine.c + +++ b/hw/core/machine.c + @@ -48,6 +48,7 @@ GlobalProperty hw_compat_7_2[] = { + { "e1000e", "migrate-timadj", "off" }, + { "virtio-mem", "x-early-migration", "false" }, + { "migration", "x-preempt-pre-7-2", "true" }, + + { TYPE_PCI_DEVICE, "x-pcie-err-unc-mask", "off" }, + }; + const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2); + +And now, when qemu-8.0.1 is released with this fix, all combinations +are going to work as supposed. + +- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 (works) +- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 (works) +- $ qemu-8.0.1 -M pc-7.2 -> qemu-7.2 -M pc-7.2 (works) +- $ qemu-7.2 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 (works) + +So the normality has been restored and everything is ok, no? + +Not really, now our matrix is much bigger. We started with the easy +cases, migration from the same version to the same version always +works: + +- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 +- $ qemu-8.0 -M pc-7.2 -> qemu-8.0 -M pc-7.2 +- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 + +Now the interesting ones. When the QEMU processes versions are +different. For the 1st set, their fail and we can do nothing, both +versions are released and we can't change anything. + +- $ qemu-7.2 -M pc-7.2 -> qemu-8.0 -M pc-7.2 +- $ qemu-8.0 -M pc-7.2 -> qemu-7.2 -M pc-7.2 + +This two are the ones that work. The whole point of making the +change in qemu-8.0.1 release was to fix this issue: + +- $ qemu-7.2 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 +- $ qemu-8.0.1 -M pc-7.2 -> qemu-7.2 -M pc-7.2 + +But now we found that qemu-8.0 neither can migrate to qemu-7.2 not +qemu-8.0.1. + +- $ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 +- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0 -M pc-7.2 + +So, if we start a pc-7.2 machine in qemu-8.0 we can't migrate it to +anything except to qemu-8.0. + +Can we do better? + +Yeap. If we know that we are going to do this migration: + +- $ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 + +We can launch the appropriate devices with:: + + --device...,x-pci-e-err-unc-mask=on + +And now we can receive a migration from 8.0. And from now on, we can +do that migration to new machine types if we remember to enable that +property for pc-7.2. Notice that we need to remember, it is not +enough to know that the source of the migration is qemu-8.0. Think of +this example: + +$ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 -> qemu-8.2 -M pc-7.2 + +In the second migration, the source is not qemu-8.0, but we still have +that "problem" and have that property enabled. Notice that we need to +continue having this mark/property until we have this machine +rebooted. But it is not a normal reboot (that don't reload QEMU) we +need the machine to poweroff/poweron on a fixed QEMU. And from now +on we can use the proper real machine. diff --git a/docs/devel/migration/dirty-limit.rst b/docs/devel/migration/dirty-limit.rst new file mode 100644 index 00000000000..8f32329d5fd --- /dev/null +++ b/docs/devel/migration/dirty-limit.rst @@ -0,0 +1,71 @@ +Dirty limit +=========== + +The dirty limit, short for dirty page rate upper limit, is a new capability +introduced in the 8.1 QEMU release that uses a new algorithm based on the KVM +dirty ring to throttle down the guest during live migration. + +The algorithm framework is as follows: + +:: + + ------------------------------------------------------------------------------ + main --------------> throttle thread ------------> PREPARE(1) <-------- + thread \ | | + \ | | + \ V | + -\ CALCULATE(2) | + \ | | + \ | | + \ V | + \ SET PENALTY(3) ----- + -\ | + \ | + \ V + -> virtual CPU thread -------> ACCEPT PENALTY(4) + ------------------------------------------------------------------------------ + +When the qmp command qmp_set_vcpu_dirty_limit is called for the first time, +the QEMU main thread starts the throttle thread. The throttle thread, once +launched, executes the loop, which consists of three steps: + + - PREPARE (1) + + The entire work of PREPARE (1) is preparation for the second stage, + CALCULATE(2), as the name implies. It involves preparing the dirty + page rate value and the corresponding upper limit of the VM: + The dirty page rate is calculated via the KVM dirty ring mechanism, + which tells QEMU how many dirty pages a virtual CPU has had since the + last KVM_EXIT_DIRTY_RING_FULL exception; The dirty page rate upper + limit is specified by caller, therefore fetch it directly. + + - CALCULATE (2) + + Calculate a suitable sleep period for each virtual CPU, which will be + used to determine the penalty for the target virtual CPU. The + computation must be done carefully in order to reduce the dirty page + rate progressively down to the upper limit without oscillation. To + achieve this, two strategies are provided: the first is to add or + subtract sleep time based on the ratio of the current dirty page rate + to the limit, which is used when the current dirty page rate is far + from the limit; the second is to add or subtract a fixed time when + the current dirty page rate is close to the limit. + + - SET PENALTY (3) + + Set the sleep time for each virtual CPU that should be penalized based + on the results of the calculation supplied by step CALCULATE (2). + +After completing the three above stages, the throttle thread loops back +to step PREPARE (1) until the dirty limit is reached. + +On the other hand, each virtual CPU thread reads the sleep duration and +sleeps in the path of the KVM_EXIT_DIRTY_RING_FULL exception handler, that +is ACCEPT PENALTY (4). Virtual CPUs tied with writing processes will +obviously exit to the path and get penalized, whereas virtual CPUs involved +with read processes will not. + +In summary, thanks to the KVM dirty ring technology, the dirty limit +algorithm will restrict virtual CPUs as needed to keep their dirty page +rate inside the limit. This leads to more steady reading performance during +live migration and can aid in improving large guest responsiveness. diff --git a/docs/devel/migration/features.rst b/docs/devel/migration/features.rst new file mode 100644 index 00000000000..d5ca7b86d5d --- /dev/null +++ b/docs/devel/migration/features.rst @@ -0,0 +1,14 @@ +Migration features +================== + +Migration has plenty of features to support different use cases. + +.. toctree:: + :maxdepth: 2 + + postcopy + dirty-limit + vfio + virtio + mapped-ram + CPR diff --git a/docs/devel/migration/index.rst b/docs/devel/migration/index.rst new file mode 100644 index 00000000000..2aa294d6314 --- /dev/null +++ b/docs/devel/migration/index.rst @@ -0,0 +1,13 @@ +Migration +========= + +This is the main entry for QEMU migration documentations. It explains how +QEMU live migration works. + +.. toctree:: + :maxdepth: 2 + + main + features + compatibility + best-practices diff --git a/docs/devel/migration/main.rst b/docs/devel/migration/main.rst new file mode 100644 index 00000000000..54385a23e51 --- /dev/null +++ b/docs/devel/migration/main.rst @@ -0,0 +1,582 @@ +=================== +Migration framework +=================== + +QEMU has code to load/save the state of the guest that it is running. +These are two complementary operations. Saving the state just does +that, saves the state for each device that the guest is running. +Restoring a guest is just the opposite operation: we need to load the +state of each device. + +For this to work, QEMU has to be launched with the same arguments the +two times. I.e. it can only restore the state in one guest that has +the same devices that the one it was saved (this last requirement can +be relaxed a bit, but for now we can consider that configuration has +to be exactly the same). + +Once that we are able to save/restore a guest, a new functionality is +requested: migration. This means that QEMU is able to start in one +machine and being "migrated" to another machine. I.e. being moved to +another machine. + +Next was the "live migration" functionality. This is important +because some guests run with a lot of state (specially RAM), and it +can take a while to move all state from one machine to another. Live +migration allows the guest to continue running while the state is +transferred. Only while the last part of the state is transferred has +the guest to be stopped. Typically the time that the guest is +unresponsive during live migration is the low hundred of milliseconds +(notice that this depends on a lot of things). + +.. contents:: + +Transports +========== + +The migration stream is normally just a byte stream that can be passed +over any transport. + +- tcp migration: do the migration using tcp sockets +- unix migration: do the migration using unix sockets +- exec migration: do the migration using the stdin/stdout through a process. +- fd migration: do the migration using a file descriptor that is + passed to QEMU. QEMU doesn't care how this file descriptor is opened. +- file migration: do the migration using a file that is passed to QEMU + by path. A file offset option is supported to allow a management + application to add its own metadata to the start of the file without + QEMU interference. Note that QEMU does not flush cached file + data/metadata at the end of migration. + +In addition, support is included for migration using RDMA, which +transports the page data using ``RDMA``, where the hardware takes care of +transporting the pages, and the load on the CPU is much lower. While the +internals of RDMA migration are a bit different, this isn't really visible +outside the RAM migration code. + +All these migration protocols use the same infrastructure to +save/restore state devices. This infrastructure is shared with the +savevm/loadvm functionality. + +Common infrastructure +===================== + +The files, sockets or fd's that carry the migration stream are abstracted by +the ``QEMUFile`` type (see ``migration/qemu-file.h``). In most cases this +is connected to a subtype of ``QIOChannel`` (see ``io/``). + + +Saving the state of one device +============================== + +For most devices, the state is saved in a single call to the migration +infrastructure; these are *non-iterative* devices. The data for these +devices is sent at the end of precopy migration, when the CPUs are paused. +There are also *iterative* devices, which contain a very large amount of +data (e.g. RAM or large tables). See the iterative device section below. + +General advice for device developers +------------------------------------ + +- The migration state saved should reflect the device being modelled rather + than the way your implementation works. That way if you change the implementation + later the migration stream will stay compatible. That model may include + internal state that's not directly visible in a register. + +- When saving a migration stream the device code may walk and check + the state of the device. These checks might fail in various ways (e.g. + discovering internal state is corrupt or that the guest has done something bad). + Consider carefully before asserting/aborting at this point, since the + normal response from users is that *migration broke their VM* since it had + apparently been running fine until then. In these error cases, the device + should log a message indicating the cause of error, and should consider + putting the device into an error state, allowing the rest of the VM to + continue execution. + +- The migration might happen at an inconvenient point, + e.g. right in the middle of the guest reprogramming the device, during + guest reboot or shutdown or while the device is waiting for external IO. + It's strongly preferred that migrations do not fail in this situation, + since in the cloud environment migrations might happen automatically to + VMs that the administrator doesn't directly control. + +- If you do need to fail a migration, ensure that sufficient information + is logged to identify what went wrong. + +- The destination should treat an incoming migration stream as hostile + (which we do to varying degrees in the existing code). Check that offsets + into buffers and the like can't cause overruns. Fail the incoming migration + in the case of a corrupted stream like this. + +- Take care with internal device state or behaviour that might become + migration version dependent. For example, the order of PCI capabilities + is required to stay constant across migration. Another example would + be that a special case handled by subsections (see below) might become + much more common if a default behaviour is changed. + +- The state of the source should not be changed or destroyed by the + outgoing migration. Migrations timing out or being failed by + higher levels of management, or failures of the destination host are + not unusual, and in that case the VM is restarted on the source. + Note that the management layer can validly revert the migration + even though the QEMU level of migration has succeeded as long as it + does it before starting execution on the destination. + +- Buses and devices should be able to explicitly specify addresses when + instantiated, and management tools should use those. For example, + when hot adding USB devices it's important to specify the ports + and addresses, since implicit ordering based on the command line order + may be different on the destination. This can result in the + device state being loaded into the wrong device. + +VMState +------- + +Most device data can be described using the ``VMSTATE`` macros (mostly defined +in ``include/migration/vmstate.h``). + +An example (from hw/input/pckbd.c) + +.. code:: c + + static const VMStateDescription vmstate_kbd = { + .name = "pckbd", + .version_id = 3, + .minimum_version_id = 3, + .fields = (const VMStateField[]) { + VMSTATE_UINT8(write_cmd, KBDState), + VMSTATE_UINT8(status, KBDState), + VMSTATE_UINT8(mode, KBDState), + VMSTATE_UINT8(pending, KBDState), + VMSTATE_END_OF_LIST() + } + }; + +We are declaring the state with name "pckbd". The ``version_id`` is +3, and there are 4 uint8_t fields in the KBDState structure. We +registered this ``VMSTATEDescription`` with one of the following +functions. The first one will generate a device ``instance_id`` +different for each registration. Use the second one if you already +have an id that is different for each instance of the device: + +.. code:: c + + vmstate_register_any(NULL, &vmstate_kbd, s); + vmstate_register(NULL, instance_id, &vmstate_kbd, s); + +For devices that are ``qdev`` based, we can register the device in the class +init function: + +.. code:: c + + dc->vmsd = &vmstate_kbd_isa; + +The VMState macros take care of ensuring that the device data section +is formatted portably (normally big endian) and make some compile time checks +against the types of the fields in the structures. + +VMState macros can include other VMStateDescriptions to store substructures +(see ``VMSTATE_STRUCT_``), arrays (``VMSTATE_ARRAY_``) and variable length +arrays (``VMSTATE_VARRAY_``). Various other macros exist for special +cases. + +Note that the format on the wire is still very raw; i.e. a VMSTATE_UINT32 +ends up with a 4 byte bigendian representation on the wire; in the future +it might be possible to use a more structured format. + +Legacy way +---------- + +This way is going to disappear as soon as all current users are ported to VMSTATE; +although converting existing code can be tricky, and thus 'soon' is relative. + +Each device has to register two functions, one to save the state and +another to load the state back. + +.. code:: c + + int register_savevm_live(const char *idstr, + int instance_id, + int version_id, + SaveVMHandlers *ops, + void *opaque); + +Two functions in the ``ops`` structure are the ``save_state`` +and ``load_state`` functions. Notice that ``load_state`` receives a version_id +parameter to know what state format is receiving. ``save_state`` doesn't +have a version_id parameter because it always uses the latest version. + +Note that because the VMState macros still save the data in a raw +format, in many cases it's possible to replace legacy code +with a carefully constructed VMState description that matches the +byte layout of the existing code. + +Changing migration data structures +---------------------------------- + +When we migrate a device, we save/load the state as a series +of fields. Sometimes, due to bugs or new functionality, we need to +change the state to store more/different information. Changing the migration +state saved for a device can break migration compatibility unless +care is taken to use the appropriate techniques. In general QEMU tries +to maintain forward migration compatibility (i.e. migrating from +QEMU n->n+1) and there are users who benefit from backward compatibility +as well. + +Subsections +----------- + +The most common structure change is adding new data, e.g. when adding +a newer form of device, or adding that state that you previously +forgot to migrate. This is best solved using a subsection. + +A subsection is "like" a device vmstate, but with a particularity, it +has a Boolean function that tells if that values are needed to be sent +or not. If this functions returns false, the subsection is not sent. +Subsections have a unique name, that is looked for on the receiving +side. + +On the receiving side, if we found a subsection for a device that we +don't understand, we just fail the migration. If we understand all +the subsections, then we load the state with success. There's no check +that a subsection is loaded, so a newer QEMU that knows about a subsection +can (with care) load a stream from an older QEMU that didn't send +the subsection. + +If the new data is only needed in a rare case, then the subsection +can be made conditional on that case and the migration will still +succeed to older QEMUs in most cases. This is OK for data that's +critical, but in some use cases it's preferred that the migration +should succeed even with the data missing. To support this the +subsection can be connected to a device property and from there +to a versioned machine type. + +The 'pre_load' and 'post_load' functions on subsections are only +called if the subsection is loaded. + +One important note is that the outer post_load() function is called "after" +loading all subsections, because a newer subsection could change the same +value that it uses. A flag, and the combination of outer pre_load and +post_load can be used to detect whether a subsection was loaded, and to +fall back on default behaviour when the subsection isn't present. + +Example: + +.. code:: c + + static bool ide_drive_pio_state_needed(void *opaque) + { + IDEState *s = opaque; + + return ((s->status & DRQ_STAT) != 0) + || (s->bus->error_status & BM_STATUS_PIO_RETRY); + } + + const VMStateDescription vmstate_ide_drive_pio_state = { + .name = "ide_drive/pio_state", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = ide_drive_pio_pre_save, + .post_load = ide_drive_pio_post_load, + .needed = ide_drive_pio_state_needed, + .fields = (const VMStateField[]) { + VMSTATE_INT32(req_nb_sectors, IDEState), + VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1, + vmstate_info_uint8, uint8_t), + VMSTATE_INT32(cur_io_buffer_offset, IDEState), + VMSTATE_INT32(cur_io_buffer_len, IDEState), + VMSTATE_UINT8(end_transfer_fn_idx, IDEState), + VMSTATE_INT32(elementary_transfer_size, IDEState), + VMSTATE_INT32(packet_transfer_size, IDEState), + VMSTATE_END_OF_LIST() + } + }; + + const VMStateDescription vmstate_ide_drive = { + .name = "ide_drive", + .version_id = 3, + .minimum_version_id = 0, + .post_load = ide_drive_post_load, + .fields = (const VMStateField[]) { + .... several fields .... + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * const []) { + &vmstate_ide_drive_pio_state, + NULL + } + }; + +Here we have a subsection for the pio state. We only need to +save/send this state when we are in the middle of a pio operation +(that is what ``ide_drive_pio_state_needed()`` checks). If DRQ_STAT is +not enabled, the values on that fields are garbage and don't need to +be sent. + +Connecting subsections to properties +------------------------------------ + +Using a condition function that checks a 'property' to determine whether +to send a subsection allows backward migration compatibility when +new subsections are added, especially when combined with versioned +machine types. + +For example: + + a) Add a new property using ``DEFINE_PROP_BOOL`` - e.g. support-foo and + default it to true. + b) Add an entry to the ``hw_compat_`` for the previous version that sets + the property to false. + c) Add a static bool support_foo function that tests the property. + d) Add a subsection with a .needed set to the support_foo function + e) (potentially) Add an outer pre_load that sets up a default value + for 'foo' to be used if the subsection isn't loaded. + +Now that subsection will not be generated when using an older +machine type and the migration stream will be accepted by older +QEMU versions. + +Not sending existing elements +----------------------------- + +Sometimes members of the VMState are no longer needed: + + - removing them will break migration compatibility + + - making them version dependent and bumping the version will break backward migration + compatibility. + +Adding a dummy field into the migration stream is normally the best way to preserve +compatibility. + +If the field really does need to be removed then: + + a) Add a new property/compatibility/function in the same way for subsections above. + b) replace the VMSTATE macro with the _TEST version of the macro, e.g.: + + ``VMSTATE_UINT32(foo, barstruct)`` + + becomes + + ``VMSTATE_UINT32_TEST(foo, barstruct, pre_version_baz)`` + + Sometime in the future when we no longer care about the ancient versions these can be killed off. + Note that for backward compatibility it's important to fill in the structure with + data that the destination will understand. + +Any difference in the predicates on the source and destination will end up +with different fields being enabled and data being loaded into the wrong +fields; for this reason conditional fields like this are very fragile. + +Versions +-------- + +Version numbers are intended for major incompatible changes to the +migration of a device, and using them breaks backward-migration +compatibility; in general most changes can be made by adding Subsections +(see above) or _TEST macros (see above) which won't break compatibility. + +Each version is associated with a series of fields saved. The ``save_state`` always saves +the state as the newer version. But ``load_state`` sometimes is able to +load state from an older version. + +You can see that there are two version fields: + +- ``version_id``: the maximum version_id supported by VMState for that device. +- ``minimum_version_id``: the minimum version_id that VMState is able to understand + for that device. + +VMState is able to read versions from minimum_version_id to version_id. + +There are *_V* forms of many ``VMSTATE_`` macros to load fields for version dependent fields, +e.g. + +.. code:: c + + VMSTATE_UINT16_V(ip_id, Slirp, 2), + +only loads that field for versions 2 and newer. + +Saving state will always create a section with the 'version_id' value +and thus can't be loaded by any older QEMU. + +Massaging functions +------------------- + +Sometimes, it is not enough to be able to save the state directly +from one structure, we need to fill the correct values there. One +example is when we are using kvm. Before saving the cpu state, we +need to ask kvm to copy to QEMU the state that it is using. And the +opposite when we are loading the state, we need a way to tell kvm to +load the state for the cpu that we have just loaded from the QEMUFile. + +The functions to do that are inside a vmstate definition, and are called: + +- ``int (*pre_load)(void *opaque);`` + + This function is called before we load the state of one device. + +- ``int (*post_load)(void *opaque, int version_id);`` + + This function is called after we load the state of one device. + +- ``int (*pre_save)(void *opaque);`` + + This function is called before we save the state of one device. + +- ``int (*post_save)(void *opaque);`` + + This function is called after we save the state of one device + (even upon failure, unless the call to pre_save returned an error). + +Example: You can look at hpet.c, that uses the first three functions +to massage the state that is transferred. + +The ``VMSTATE_WITH_TMP`` macro may be useful when the migration +data doesn't match the stored device data well; it allows an +intermediate temporary structure to be populated with migration +data and then transferred to the main structure. + +If you use memory or portio_list API functions that update memory layout outside +initialization (i.e., in response to a guest action), this is a strong +indication that you need to call these functions in a ``post_load`` callback. +Examples of such API functions are: + + - memory_region_add_subregion() + - memory_region_del_subregion() + - memory_region_set_readonly() + - memory_region_set_nonvolatile() + - memory_region_set_enabled() + - memory_region_set_address() + - memory_region_set_alias_offset() + - portio_list_set_address() + - portio_list_set_enabled() + +Iterative device migration +-------------------------- + +Some devices, such as RAM, Block storage or certain platform devices, +have large amounts of data that would mean that the CPUs would be +paused for too long if they were sent in one section. For these +devices an *iterative* approach is taken. + +The iterative devices generally don't use VMState macros +(although it may be possible in some cases) and instead use +qemu_put_*/qemu_get_* macros to read/write data to the stream. Specialist +versions exist for high bandwidth IO. + + +An iterative device must provide: + + - A ``save_setup`` function that initialises the data structures and + transmits a first section containing information on the device. In the + case of RAM this transmits a list of RAMBlocks and sizes. + + - A ``load_setup`` function that initialises the data structures on the + destination. + + - A ``state_pending_exact`` function that indicates how much more + data we must save. The core migration code will use this to + determine when to pause the CPUs and complete the migration. + + - A ``state_pending_estimate`` function that indicates how much more + data we must save. When the estimated amount is smaller than the + threshold, we call ``state_pending_exact``. + + - A ``save_live_iterate`` function should send a chunk of data until + the point that stream bandwidth limits tell it to stop. Each call + generates one section. + + - A ``save_live_complete_precopy`` function that must transmit the + last section for the device containing any remaining data. + + - A ``load_state`` function used to load sections generated by + any of the save functions that generate sections. + + - ``cleanup`` functions for both save and load that are called + at the end of migration. + +Note that the contents of the sections for iterative migration tend +to be open-coded by the devices; care should be taken in parsing +the results and structuring the stream to make them easy to validate. + +Device ordering +--------------- + +There are cases in which the ordering of device loading matters; for +example in some systems where a device may assert an interrupt during loading, +if the interrupt controller is loaded later then it might lose the state. + +Some ordering is implicitly provided by the order in which the machine +definition creates devices, however this is somewhat fragile. + +The ``MigrationPriority`` enum provides a means of explicitly enforcing +ordering. Numerically higher priorities are loaded earlier. +The priority is set by setting the ``priority`` field of the top level +``VMStateDescription`` for the device. + +Stream structure +================ + +The stream tries to be word and endian agnostic, allowing migration between hosts +of different characteristics running the same VM. + + - Header + + - Magic + - Version + - VM configuration section + + - Machine type + - Target page bits + - List of sections + Each section contains a device, or one iteration of a device save. + + - section type + - section id + - ID string (First section of each device) + - instance id (First section of each device) + - version id (First section of each device) + - + - Footer mark + - EOF mark + - VM Description structure + Consisting of a JSON description of the contents for analysis only + +The ``device data`` in each section consists of the data produced +by the code described above. For non-iterative devices they have a single +section; iterative devices have an initial and last section and a set +of parts in between. +Note that there is very little checking by the common code of the integrity +of the ``device data`` contents, that's up to the devices themselves. +The ``footer mark`` provides a little bit of protection for the case where +the receiving side reads more or less data than expected. + +The ``ID string`` is normally unique, having been formed from a bus name +and device address, PCI devices and storage devices hung off PCI controllers +fit this pattern well. Some devices are fixed single instances (e.g. "pc-ram"). +Others (especially either older devices or system devices which for +some reason don't have a bus concept) make use of the ``instance id`` +for otherwise identically named devices. + +Return path +----------- + +Only a unidirectional stream is required for normal migration, however a +``return path`` can be created when bidirectional communication is desired. +This is primarily used by postcopy, but is also used to return a success +flag to the source at the end of migration. + +``qemu_file_get_return_path(QEMUFile* fwdpath)`` gives the QEMUFile* for the return +path. + + Source side + + Forward path - written by migration thread + Return path - opened by main thread, read by return-path thread + + Destination side + + Forward path - read by main thread + Return path - opened by main thread, written by main thread AND postcopy + thread (protected by rp_mutex) + diff --git a/docs/devel/migration/mapped-ram.rst b/docs/devel/migration/mapped-ram.rst new file mode 100644 index 00000000000..fa4cefd9fcf --- /dev/null +++ b/docs/devel/migration/mapped-ram.rst @@ -0,0 +1,138 @@ +Mapped-ram +========== + +Mapped-ram is a new stream format for the RAM section designed to +supplement the existing ``file:`` migration and make it compatible +with ``multifd``. This enables parallel migration of a guest's RAM to +a file. + +The core of the feature is to ensure that RAM pages are mapped +directly to offsets in the resulting migration file. This enables the +``multifd`` threads to write exclusively to those offsets even if the +guest is constantly dirtying pages (i.e. live migration). Another +benefit is that the resulting file will have a bounded size, since +pages which are dirtied multiple times will always go to a fixed +location in the file, rather than constantly being added to a +sequential stream. Having the pages at fixed offsets also allows the +usage of O_DIRECT for save/restore of the migration stream as the +pages are ensured to be written respecting O_DIRECT alignment +restrictions (direct-io support not yet implemented). + +Usage +----- + +On both source and destination, enable the ``multifd`` and +``mapped-ram`` capabilities: + + ``migrate_set_capability multifd on`` + + ``migrate_set_capability mapped-ram on`` + +Use a ``file:`` URL for migration: + + ``migrate file:/path/to/migration/file`` + +Mapped-ram migration is best done non-live, i.e. by stopping the VM on +the source side before migrating. + +Use-cases +--------- + +The mapped-ram feature was designed for use cases where the migration +stream will be directed to a file in the filesystem and not +immediately restored on the destination VM [#]_. These could be +thought of as snapshots. We can further categorize them into live and +non-live. + +- Non-live snapshot + +If the use case requires a VM to be stopped before taking a snapshot, +that's the ideal scenario for mapped-ram migration. Not having to +track dirty pages, the migration will write the RAM pages to the disk +as fast as it can. + +Note: if a snapshot is taken of a running VM, but the VM will be +stopped after the snapshot by the admin, then consider stopping it +right before the snapshot to take benefit of the performance gains +mentioned above. + +- Live snapshot + +If the use case requires that the VM keeps running during and after +the snapshot operation, then mapped-ram migration can still be used, +but will be less performant. Other strategies such as +background-snapshot should be evaluated as well. One benefit of +mapped-ram in this scenario is portability since background-snapshot +depends on async dirty tracking (KVM_GET_DIRTY_LOG) which is not +supported outside of Linux. + +.. [#] While this same effect could be obtained with the usage of + snapshots or the ``file:`` migration alone, mapped-ram provides + a performance increase for VMs with larger RAM sizes (10s to + 100s of GiBs), specially if the VM has been stopped beforehand. + +RAM section format +------------------ + +Instead of having a sequential stream of pages that follow the +RAMBlock headers, the dirty pages for a RAMBlock follow its header +instead. This ensures that each RAM page has a fixed offset in the +resulting migration file. + +A bitmap is introduced to track which pages have been written in the +migration file. Pages are written at a fixed location for every +ramblock. Zero pages are ignored as they'd be zero in the destination +migration as well. + +:: + + Without mapped-ram: With mapped-ram: + + --------------------- -------------------------------- + | ramblock 1 header | | ramblock 1 header | + --------------------- -------------------------------- + | ramblock 2 header | | ramblock 1 mapped-ram header | + --------------------- -------------------------------- + | ... | | padding to next 1MB boundary | + --------------------- | ... | + | ramblock n header | -------------------------------- + --------------------- | ramblock 1 pages | + | RAM_SAVE_FLAG_EOS | | ... | + --------------------- -------------------------------- + | stream of pages | | ramblock 2 header | + | (iter 1) | -------------------------------- + | ... | | ramblock 2 mapped-ram header | + --------------------- -------------------------------- + | RAM_SAVE_FLAG_EOS | | padding to next 1MB boundary | + --------------------- | ... | + | stream of pages | -------------------------------- + | (iter 2) | | ramblock 2 pages | + | ... | | ... | + --------------------- -------------------------------- + | ... | | ... | + --------------------- -------------------------------- + | RAM_SAVE_FLAG_EOS | + -------------------------------- + | ... | + -------------------------------- + +where: + - ramblock header: the generic information for a ramblock, such as + idstr, used_len, etc. + + - ramblock mapped-ram header: the information added by this feature: + bitmap of pages written, bitmap size and offset of pages in the + migration file. + +Restrictions +------------ + +Since pages are written to their relative offsets and out of order +(due to the memory dirtying patterns), streaming channels such as +sockets are not supported. A seekable channel such as a file is +required. This can be verified in the QIOChannel by the presence of +the QIO_CHANNEL_FEATURE_SEEKABLE. + +The improvements brought by this feature apply only to guest physical +RAM. Other types of memory such as VRAM are migrated as part of device +states. diff --git a/docs/devel/migration/postcopy.rst b/docs/devel/migration/postcopy.rst new file mode 100644 index 00000000000..6c51e96d798 --- /dev/null +++ b/docs/devel/migration/postcopy.rst @@ -0,0 +1,313 @@ +======== +Postcopy +======== + +.. contents:: + +'Postcopy' migration is a way to deal with migrations that refuse to converge +(or take too long to converge) its plus side is that there is an upper bound on +the amount of migration traffic and time it takes, the down side is that during +the postcopy phase, a failure of *either* side causes the guest to be lost. + +In postcopy the destination CPUs are started before all the memory has been +transferred, and accesses to pages that are yet to be transferred cause +a fault that's translated by QEMU into a request to the source QEMU. + +Postcopy can be combined with precopy (i.e. normal migration) so that if precopy +doesn't finish in a given time the switch is made to postcopy. + +Enabling postcopy +================= + +To enable postcopy, issue this command on the monitor (both source and +destination) prior to the start of migration: + +``migrate_set_capability postcopy-ram on`` + +The normal commands are then used to start a migration, which is still +started in precopy mode. Issuing: + +``migrate_start_postcopy`` + +will now cause the transition from precopy to postcopy. +It can be issued immediately after migration is started or any +time later on. Issuing it after the end of a migration is harmless. + +Blocktime is a postcopy live migration metric, intended to show how +long the vCPU was in state of interruptible sleep due to pagefault. +That metric is calculated both for all vCPUs as overlapped value, and +separately for each vCPU. These values are calculated on destination +side. To enable postcopy blocktime calculation, enter following +command on destination monitor: + +``migrate_set_capability postcopy-blocktime on`` + +Postcopy blocktime can be retrieved by query-migrate qmp command. +postcopy-blocktime value of qmp command will show overlapped blocking +time for all vCPU, postcopy-vcpu-blocktime will show list of blocking +time per vCPU. + +.. note:: + During the postcopy phase, the bandwidth limits set using + ``migrate_set_parameter`` is ignored (to avoid delaying requested pages that + the destination is waiting for). + +Postcopy internals +================== + +State machine +------------- + +Postcopy moves through a series of states (see postcopy_state) from +ADVISE->DISCARD->LISTEN->RUNNING->END + + - Advise + + Set at the start of migration if postcopy is enabled, even + if it hasn't had the start command; here the destination + checks that its OS has the support needed for postcopy, and performs + setup to ensure the RAM mappings are suitable for later postcopy. + The destination will fail early in migration at this point if the + required OS support is not present. + (Triggered by reception of POSTCOPY_ADVISE command) + + - Discard + + Entered on receipt of the first 'discard' command; prior to + the first Discard being performed, hugepages are switched off + (using madvise) to ensure that no new huge pages are created + during the postcopy phase, and to cause any huge pages that + have discards on them to be broken. + + - Listen + + The first command in the package, POSTCOPY_LISTEN, switches + the destination state to Listen, and starts a new thread + (the 'listen thread') which takes over the job of receiving + pages off the migration stream, while the main thread carries + on processing the blob. With this thread able to process page + reception, the destination now 'sensitises' the RAM to detect + any access to missing pages (on Linux using the 'userfault' + system). + + - Running + + POSTCOPY_RUN causes the destination to synchronise all + state and start the CPUs and IO devices running. The main + thread now finishes processing the migration package and + now carries on as it would for normal precopy migration + (although it can't do the cleanup it would do as it + finishes a normal migration). + + - Paused + + Postcopy can run into a paused state (normally on both sides when + happens), where all threads will be temporarily halted mostly due to + network errors. When reaching paused state, migration will make sure + the qemu binary on both sides maintain the data without corrupting + the VM. To continue the migration, the admin needs to fix the + migration channel using the QMP command 'migrate-recover' on the + destination node, then resume the migration using QMP command 'migrate' + again on source node, with resume=true flag set. + + - End + + The listen thread can now quit, and perform the cleanup of migration + state, the migration is now complete. + +Device transfer +--------------- + +Loading of device data may cause the device emulation to access guest RAM +that may trigger faults that have to be resolved by the source, as such +the migration stream has to be able to respond with page data *during* the +device load, and hence the device data has to be read from the stream completely +before the device load begins to free the stream up. This is achieved by +'packaging' the device data into a blob that's read in one go. + +Source behaviour +---------------- + +Until postcopy is entered the migration stream is identical to normal +precopy, except for the addition of a 'postcopy advise' command at +the beginning, to tell the destination that postcopy might happen. +When postcopy starts the source sends the page discard data and then +forms the 'package' containing: + + - Command: 'postcopy listen' + - The device state + + A series of sections, identical to the precopy streams device state stream + containing everything except postcopiable devices (i.e. RAM) + - Command: 'postcopy run' + +The 'package' is sent as the data part of a Command: ``CMD_PACKAGED``, and the +contents are formatted in the same way as the main migration stream. + +During postcopy the source scans the list of dirty pages and sends them +to the destination without being requested (in much the same way as precopy), +however when a page request is received from the destination, the dirty page +scanning restarts from the requested location. This causes requested pages +to be sent quickly, and also causes pages directly after the requested page +to be sent quickly in the hope that those pages are likely to be used +by the destination soon. + +Destination behaviour +--------------------- + +Initially the destination looks the same as precopy, with a single thread +reading the migration stream; the 'postcopy advise' and 'discard' commands +are processed to change the way RAM is managed, but don't affect the stream +processing. + +:: + + ------------------------------------------------------------------------------ + 1 2 3 4 5 6 7 + main -----DISCARD-CMD_PACKAGED ( LISTEN DEVICE DEVICE DEVICE RUN ) + thread | | + | (page request) + | \___ + v \ + listen thread: --- page -- page -- page -- page -- page -- + + a b c + ------------------------------------------------------------------------------ + +- On receipt of ``CMD_PACKAGED`` (1) + + All the data associated with the package - the ( ... ) section in the diagram - + is read into memory, and the main thread recurses into qemu_loadvm_state_main + to process the contents of the package (2) which contains commands (3,6) and + devices (4...) + +- On receipt of 'postcopy listen' - 3 -(i.e. the 1st command in the package) + + a new thread (a) is started that takes over servicing the migration stream, + while the main thread carries on loading the package. It loads normal + background page data (b) but if during a device load a fault happens (5) + the returned page (c) is loaded by the listen thread allowing the main + threads device load to carry on. + +- The last thing in the ``CMD_PACKAGED`` is a 'RUN' command (6) + + letting the destination CPUs start running. At the end of the + ``CMD_PACKAGED`` (7) the main thread returns to normal running behaviour and + is no longer used by migration, while the listen thread carries on servicing + page data until the end of migration. + +Source side page bitmap +----------------------- + +The 'migration bitmap' in postcopy is basically the same as in the precopy, +where each of the bit to indicate that page is 'dirty' - i.e. needs +sending. During the precopy phase this is updated as the CPU dirties +pages, however during postcopy the CPUs are stopped and nothing should +dirty anything any more. Instead, dirty bits are cleared when the relevant +pages are sent during postcopy. + +Postcopy features +================= + +Postcopy recovery +----------------- + +Comparing to precopy, postcopy is special on error handlings. When any +error happens (in this case, mostly network errors), QEMU cannot easily +fail a migration because VM data resides in both source and destination +QEMU instances. On the other hand, when issue happens QEMU on both sides +will go into a paused state. It'll need a recovery phase to continue a +paused postcopy migration. + +The recovery phase normally contains a few steps: + + - When network issue occurs, both QEMU will go into PAUSED state + + - When the network is recovered (or a new network is provided), the admin + can setup the new channel for migration using QMP command + 'migrate-recover' on destination node, preparing for a resume. + + - On source host, the admin can continue the interrupted postcopy + migration using QMP command 'migrate' with resume=true flag set. + + - After the connection is re-established, QEMU will continue the postcopy + migration on both sides. + +During a paused postcopy migration, the VM can logically still continue +running, and it will not be impacted from any page access to pages that +were already migrated to destination VM before the interruption happens. +However, if any of the missing pages got accessed on destination VM, the VM +thread will be halted waiting for the page to be migrated, it means it can +be halted until the recovery is complete. + +The impact of accessing missing pages can be relevant to different +configurations of the guest. For example, when with async page fault +enabled, logically the guest can proactively schedule out the threads +accessing missing pages. + +Postcopy with hugepages +----------------------- + +Postcopy now works with hugetlbfs backed memory: + + a) The linux kernel on the destination must support userfault on hugepages. + b) The huge-page configuration on the source and destination VMs must be + identical; i.e. RAMBlocks on both sides must use the same page size. + c) Note that ``-mem-path /dev/hugepages`` will fall back to allocating normal + RAM if it doesn't have enough hugepages, triggering (b) to fail. + Using ``-mem-prealloc`` enforces the allocation using hugepages. + d) Care should be taken with the size of hugepage used; postcopy with 2MB + hugepages works well, however 1GB hugepages are likely to be problematic + since it takes ~1 second to transfer a 1GB hugepage across a 10Gbps link, + and until the full page is transferred the destination thread is blocked. + +Postcopy with shared memory +--------------------------- + +Postcopy migration with shared memory needs explicit support from the other +processes that share memory and from QEMU. There are restrictions on the type of +memory that userfault can support shared. + +The Linux kernel userfault support works on ``/dev/shm`` memory and on ``hugetlbfs`` +(although the kernel doesn't provide an equivalent to ``madvise(MADV_DONTNEED)`` +for hugetlbfs which may be a problem in some configurations). + +The vhost-user code in QEMU supports clients that have Postcopy support, +and the ``vhost-user-bridge`` (in ``tests/``) and the DPDK package have changes +to support postcopy. + +The client needs to open a userfaultfd and register the areas +of memory that it maps with userfault. The client must then pass the +userfaultfd back to QEMU together with a mapping table that allows +fault addresses in the clients address space to be converted back to +RAMBlock/offsets. The client's userfaultfd is added to the postcopy +fault-thread and page requests are made on behalf of the client by QEMU. +QEMU performs 'wake' operations on the client's userfaultfd to allow it +to continue after a page has arrived. + +.. note:: + There are two future improvements that would be nice: + a) Some way to make QEMU ignorant of the addresses in the clients + address space + b) Avoiding the need for QEMU to perform ufd-wake calls after the + pages have arrived + +Retro-fitting postcopy to existing clients is possible: + a) A mechanism is needed for the registration with userfault as above, + and the registration needs to be coordinated with the phases of + postcopy. In vhost-user extra messages are added to the existing + control channel. + b) Any thread that can block due to guest memory accesses must be + identified and the implication understood; for example if the + guest memory access is made while holding a lock then all other + threads waiting for that lock will also be blocked. + +Postcopy preemption mode +------------------------ + +Postcopy preempt is a new capability introduced in 8.0 QEMU release, it +allows urgent pages (those got page fault requested from destination QEMU +explicitly) to be sent in a separate preempt channel, rather than queued in +the background migration channel. Anyone who cares about latencies of page +faults during a postcopy migration should enable this feature. By default, +it's not enabled. diff --git a/docs/devel/vfio-migration.rst b/docs/devel/migration/vfio.rst similarity index 99% rename from docs/devel/vfio-migration.rst rename to docs/devel/migration/vfio.rst index 605fe60e969..c49482eab66 100644 --- a/docs/devel/vfio-migration.rst +++ b/docs/devel/migration/vfio.rst @@ -1,5 +1,5 @@ ===================== -VFIO device Migration +VFIO device migration ===================== Migration of virtual machine involves saving the state for each device that diff --git a/docs/devel/migration/virtio.rst b/docs/devel/migration/virtio.rst new file mode 100644 index 00000000000..611a18b8215 --- /dev/null +++ b/docs/devel/migration/virtio.rst @@ -0,0 +1,115 @@ +======================= +Virtio device migration +======================= + +Copyright 2015 IBM Corp. + +This work is licensed under the terms of the GNU GPL, version 2 or later. See +the COPYING file in the top-level directory. + +Saving and restoring the state of virtio devices is a bit of a twisty maze, +for several reasons: + +- state is distributed between several parts: + + - virtio core, for common fields like features, number of queues, ... + + - virtio transport (pci, ccw, ...), for the different proxy devices and + transport specific state (msix vectors, indicators, ...) + + - virtio device (net, blk, ...), for the different device types and their + state (mac address, request queue, ...) + +- most fields are saved via the stream interface; subsequently, subsections + have been added to make cross-version migration possible + +This file attempts to document the current procedure and point out some +caveats. + +Save state procedure +==================== + +:: + + virtio core virtio transport virtio device + ----------- ---------------- ------------- + + save() function registered + via VMState wrapper on + device class + virtio_save() <---------- + ------> save_config() + - save proxy device + - save transport-specific + device fields + - save common device + fields + - save common virtqueue + fields + ------> save_queue() + - save transport-specific + virtqueue fields + ------> save_device() + - save device-specific + fields + - save subsections + - device endianness, + if changed from + default endianness + - 64 bit features, if + any high feature bit + is set + - virtio-1 virtqueue + fields, if VERSION_1 + is set + +Load state procedure +==================== + +:: + + virtio core virtio transport virtio device + ----------- ---------------- ------------- + + load() function registered + via VMState wrapper on + device class + virtio_load() <---------- + ------> load_config() + - load proxy device + - load transport-specific + device fields + - load common device + fields + - load common virtqueue + fields + ------> load_queue() + - load transport-specific + virtqueue fields + - notify guest + ------> load_device() + - load device-specific + fields + - load subsections + - device endianness + - 64 bit features + - virtio-1 virtqueue + fields + - sanitize endianness + - sanitize features + - virtqueue index sanity + check + - feature-dependent setup + +Implications of this setup +========================== + +Devices need to be careful in their state processing during load: The +load_device() procedure is invoked by the core before subsections have +been loaded. Any code that depends on information transmitted in subsections +therefore has to be invoked in the device's load() function _after_ +virtio_load() returned (like e.g. code depending on features). + +Any extension of the state being migrated should be done in subsections +added to the core for compatibility reasons. If transport or device specific +state is added, core needs to invoke a callback from the new subsection. diff --git a/docs/devel/multi-thread-tcg.rst b/docs/devel/multi-thread-tcg.rst index c9541a7b20a..1420789fff3 100644 --- a/docs/devel/multi-thread-tcg.rst +++ b/docs/devel/multi-thread-tcg.rst @@ -109,6 +109,7 @@ including: - debugging operations (breakpoint insertion/removal) - some CPU helper functions - linux-user spawning its first thread + - operations related to TCG Plugins This is done with the async_safe_run_on_cpu() mechanism to ensure all vCPUs are quiescent when changes are being made to shared global @@ -226,10 +227,9 @@ instruction. This could be a future optimisation. Emulated hardware state ----------------------- -Currently thanks to KVM work any access to IO memory is automatically -protected by the global iothread mutex, also known as the BQL (Big -QEMU Lock). Any IO region that doesn't use global mutex is expected to -do its own locking. +Currently thanks to KVM work any access to IO memory is automatically protected +by the BQL (Big QEMU Lock). Any IO region that doesn't use the BQL is expected +to do its own locking. However IO memory isn't the only way emulated hardware state can be modified. Some architectures have model specific registers that diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt index a3e949f6b3a..de85767b124 100644 --- a/docs/devel/multiple-iothreads.txt +++ b/docs/devel/multiple-iothreads.txt @@ -5,7 +5,7 @@ the COPYING file in the top-level directory. This document explains the IOThread feature and how to write code that runs -outside the QEMU global mutex. +outside the BQL. The main loop and IOThreads --------------------------- @@ -29,13 +29,13 @@ scalability bottleneck on hosts with many CPUs. Work can be spread across several IOThreads instead of just one main loop. When set up correctly this can improve I/O latency and reduce jitter seen by the guest. -The main loop is also deeply associated with the QEMU global mutex, which is a -scalability bottleneck in itself. vCPU threads and the main loop use the QEMU -global mutex to serialize execution of QEMU code. This mutex is necessary -because a lot of QEMU's code historically was not thread-safe. +The main loop is also deeply associated with the BQL, which is a +scalability bottleneck in itself. vCPU threads and the main loop use the BQL +to serialize execution of QEMU code. This mutex is necessary because a lot of +QEMU's code historically was not thread-safe. The fact that all I/O processing is done in a single main loop and that the -QEMU global mutex is contended by all vCPU threads and the main loop explain +BQL is contended by all vCPU threads and the main loop explain why it is desirable to place work into IOThreads. The experimental virtio-blk data-plane implementation has been benchmarked and @@ -66,7 +66,7 @@ There are several old APIs that use the main loop AioContext: Since they implicitly work on the main loop they cannot be used in code that runs in an IOThread. They might cause a crash or deadlock if called from an -IOThread since the QEMU global mutex is not held. +IOThread since the BQL is not held. Instead, use the AioContext functions directly (see include/block/aio.h): * aio_set_fd_handler() - monitor a file descriptor @@ -88,27 +88,18 @@ loop, depending on which AioContext instance the caller passes in. How to synchronize with an IOThread ----------------------------------- -AioContext is not thread-safe so some rules must be followed when using file -descriptors, event notifiers, timers, or BHs across threads: +Variables that can be accessed by multiple threads require some form of +synchronization such as qemu_mutex_lock(), rcu_read_lock(), etc. -1. AioContext functions can always be called safely. They handle their -own locking internally. - -2. Other threads wishing to access the AioContext must use -aio_context_acquire()/aio_context_release() for mutual exclusion. Once the -context is acquired no other thread can access it or run event loop iterations -in this AioContext. - -Legacy code sometimes nests aio_context_acquire()/aio_context_release() calls. -Do not use nesting anymore, it is incompatible with the BDRV_POLL_WHILE() macro -used in the block layer and can lead to hangs. - -There is currently no lock ordering rule if a thread needs to acquire multiple -AioContexts simultaneously. Therefore, it is only safe for code holding the -QEMU global mutex to acquire other AioContexts. +AioContext functions like aio_set_fd_handler(), aio_set_event_notifier(), +aio_bh_new(), and aio_timer_new() are thread-safe. They can be used to trigger +activity in an IOThread. Side note: the best way to schedule a function call across threads is to call -aio_bh_schedule_oneshot(). No acquire/release or locking is needed. +aio_bh_schedule_oneshot(). + +The main loop thread can wait synchronously for a condition using +AIO_WAIT_WHILE(). AioContext and the block layer ------------------------------ @@ -124,22 +115,16 @@ Block layer code must therefore expect to run in an IOThread and avoid using old APIs that implicitly use the main loop. See the "How to program for IOThreads" above for information on how to do that. -If main loop code such as a QMP function wishes to access a BlockDriverState -it must first call aio_context_acquire(bdrv_get_aio_context(bs)) to ensure -that callbacks in the IOThread do not run in parallel. - Code running in the monitor typically needs to ensure that past requests from the guest are completed. When a block device is running in an IOThread, the IOThread can also process requests from the guest (via ioeventfd). To achieve both objects, wrap the code between bdrv_drained_begin() and bdrv_drained_end(), thus creating a "drained -section". The functions must be called between aio_context_acquire() -and aio_context_release(). You can freely release and re-acquire the -AioContext within a drained section. - -Long-running jobs (usually in the form of coroutines) are best scheduled in -the BlockDriverState's AioContext to avoid the need to acquire/release around -each bdrv_*() call. The functions bdrv_add/remove_aio_context_notifier, -or alternatively blk_add/remove_aio_context_notifier if you use BlockBackends, -can be used to get a notification whenever bdrv_try_change_aio_context() moves a +section". + +Long-running jobs (usually in the form of coroutines) are often scheduled in +the BlockDriverState's AioContext. The functions +bdrv_add/remove_aio_context_notifier, or alternatively +blk_add/remove_aio_context_notifier if you use BlockBackends, can be used to +get a notification whenever bdrv_try_change_aio_context() moves a BlockDriverState to a different AioContext. diff --git a/docs/devel/nested-papr.txt b/docs/devel/nested-papr.txt new file mode 100644 index 00000000000..90943650db9 --- /dev/null +++ b/docs/devel/nested-papr.txt @@ -0,0 +1,119 @@ +Nested PAPR API (aka KVM on PowerVM) +==================================== + +This API aims at providing support to enable nested virtualization with +KVM on PowerVM. While the existing support for nested KVM on PowerNV was +introduced with cap-nested-hv option, however, with a slight design change, +to enable this on papr/pseries, a new cap-nested-papr option is added. eg: + + qemu-system-ppc64 -cpu POWER10 -machine pseries,cap-nested-papr=true ... + +Work by: + Michael Neuling + Vaibhav Jain + Jordan Niethe + Harsh Prateek Bora + Shivaprasad G Bhat + Kautuk Consul + +Below taken from the kernel documentation: + +Introduction +============ + +This document explains how a guest operating system can act as a +hypervisor and run nested guests through the use of hypercalls, if the +hypervisor has implemented them. The terms L0, L1, and L2 are used to +refer to different software entities. L0 is the hypervisor mode entity +that would normally be called the "host" or "hypervisor". L1 is a +guest virtual machine that is directly run under L0 and is initiated +and controlled by L0. L2 is a guest virtual machine that is initiated +and controlled by L1 acting as a hypervisor. A significant design change +wrt existing API is that now the entire L2 state is maintained within L0. + +Existing Nested-HV API +====================== + +Linux/KVM has had support for Nesting as an L0 or L1 since 2018 + +The L0 code was added:: + + commit 8e3f5fc1045dc49fd175b978c5457f5f51e7a2ce + Author: Paul Mackerras + Date: Mon Oct 8 16:31:03 2018 +1100 + KVM: PPC: Book3S HV: Framework and hcall stubs for nested virtualization + +The L1 code was added:: + + commit 360cae313702cdd0b90f82c261a8302fecef030a + Author: Paul Mackerras + Date: Mon Oct 8 16:31:04 2018 +1100 + KVM: PPC: Book3S HV: Nested guest entry via hypercall + +This API works primarily using a signal hcall h_enter_nested(). This +call made by the L1 to tell the L0 to start an L2 vCPU with the given +state. The L0 then starts this L2 and runs until an L2 exit condition +is reached. Once the L2 exits, the state of the L2 is given back to +the L1 by the L0. The full L2 vCPU state is always transferred from +and to L1 when the L2 is run. The L0 doesn't keep any state on the L2 +vCPU (except in the short sequence in the L0 on L1 -> L2 entry and L2 +-> L1 exit). + +The only state kept by the L0 is the partition table. The L1 registers +it's partition table using the h_set_partition_table() hcall. All +other state held by the L0 about the L2s is cached state (such as +shadow page tables). + +The L1 may run any L2 or vCPU without first informing the L0. It +simply starts the vCPU using h_enter_nested(). The creation of L2s and +vCPUs is done implicitly whenever h_enter_nested() is called. + +In this document, we call this existing API the v1 API. + +New PAPR API +=============== + +The new PAPR API changes from the v1 API such that the creating L2 and +associated vCPUs is explicit. In this document, we call this the v2 +API. + +h_enter_nested() is replaced with H_GUEST_VCPU_RUN(). Before this can +be called the L1 must explicitly create the L2 using h_guest_create() +and any associated vCPUs() created with h_guest_create_vCPU(). Getting +and setting vCPU state can also be performed using h_guest_{g|s}et +hcall. + +The basic execution flow is for an L1 to create an L2, run it, and +delete it is: + +- L1 and L0 negotiate capabilities with H_GUEST_{G,S}ET_CAPABILITIES() + (normally at L1 boot time). + +- L1 requests the L0 to create an L2 with H_GUEST_CREATE() and receives a token + +- L1 requests the L0 to create an L2 vCPU with H_GUEST_CREATE_VCPU() + +- L1 and L0 communicate the vCPU state using the H_GUEST_{G,S}ET() hcall + +- L1 requests the L0 to run the vCPU using H_GUEST_RUN_VCPU() hcall + +- L1 deletes L2 with H_GUEST_DELETE() + +For more details, please refer: + +[1] Linux Kernel documentation (upstream documentation commit): + +commit 476652297f94a2e5e5ef29e734b0da37ade94110 +Author: Michael Neuling +Date: Thu Sep 14 13:06:00 2023 +1000 + + docs: powerpc: Document nested KVM on POWER + + Document support for nested KVM on POWER using the existing API as well + as the new PAPR API. This includes the new HCALL interface and how it + used by KVM. + + Signed-off-by: Michael Neuling + Signed-off-by: Jordan Niethe + Signed-off-by: Michael Ellerman + Link: https://msgid.link/20230914030600.16993-12-jniethe5@gmail.com diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst index 7f78183cd48..f453bd35465 100644 --- a/docs/devel/qapi-code-gen.rst +++ b/docs/devel/qapi-code-gen.rst @@ -167,6 +167,7 @@ Syntax:: '*doc-required': BOOL, '*command-name-exceptions': [ STRING, ... ], '*command-returns-exceptions': [ STRING, ... ], + '*documentation-exceptions': [ STRING, ... ], '*member-name-exceptions': [ STRING, ... ] } } The pragma directive lets you control optional generator behavior. @@ -183,6 +184,10 @@ may contain ``"_"`` instead of ``"-"``. Default is none. Pragma 'command-returns-exceptions' takes a list of commands that may violate the rules on permitted return types. Default is none. +Pragma 'documentation-exceptions' takes a list of types, commands, and +events whose members / arguments need not be documented. Default is +none. + Pragma 'member-name-exceptions' takes a list of types whose member names may contain uppercase letters, and ``"_"`` instead of ``"-"``. Default is none. @@ -594,7 +599,7 @@ blocking the guest and other background operations. Coroutine safety can be hard to prove, similar to thread safety. Common pitfalls are: -- The global mutex isn't held across ``qemu_coroutine_yield()``, so +- The BQL isn't held across ``qemu_coroutine_yield()``, so operations that used to assume that they execute atomically may have to be more careful to protect against changes in the global state. @@ -737,9 +742,8 @@ Types, commands, and events share a common namespace. Therefore, generally speaking, type definitions should always use CamelCase for user-defined type names, while built-in types are lowercase. -Type names ending with ``Kind`` or ``List`` are reserved for the -generator, which uses them for implicit union enums and array types, -respectively. +Type names ending with ``List`` are reserved for the generator, which +uses them for array types. Command names, member names within a type, and feature names should be all lower case with words separated by a hyphen. However, some @@ -969,7 +973,7 @@ commands and events), member (for structs and unions), branch (for alternates), or value (for enums), a description of each feature (if any), and finally optional tagged sections. -Descriptions start with '\@name:'. The description text should be +Descriptions start with '\@name:'. The description text must be indented like this:: # @name: Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed @@ -982,19 +986,21 @@ indented like this:: Extensions added after the definition was first released carry a "(since x.y.z)" comment. -The feature descriptions must be preceded by a line "Features:", like -this:: +The feature descriptions must be preceded by a blank line and then a +line "Features:", like this:: + # # Features: # # @feature: Description text -A tagged section starts with one of the following words: -"Note:"/"Notes:", "Since:", "Example"/"Examples", "Returns:", "TODO:". -The section ends with the start of a new section. +A tagged section begins with a paragraph that starts with one of the +following words: "Note:"/"Notes:", "Since:", "Example:"/"Examples:", +"Returns:", "Errors:", "TODO:". It ends with the start of a new +section. -The second and subsequent lines of sections other than -"Example"/"Examples" should be indented like this:: +The second and subsequent lines of tagged sections must be indented +like this:: # Note: Ut enim ad minim veniam, quis nostrud exercitation ullamco # laboris nisi ut aliquip ex ea commodo consequat. @@ -1002,6 +1008,9 @@ The second and subsequent lines of sections other than # Duis aute irure dolor in reprehenderit in voluptate velit esse # cillum dolore eu fugiat nulla pariatur. +"Returns" and "Errors" sections are only valid for commands. They +document the success and the error response, respectively. + A "Since: x.y.z" tagged section lists the release that introduced the definition. @@ -1020,11 +1029,11 @@ For example:: # @device: If the stats are for a virtual block device, the name # corresponding to the virtual block device. # - # @node-name: The node name of the device. (since 2.3) + # @node-name: The node name of the device. (Since 2.3) # # ... more members ... # - # Since: 0.14.0 + # Since: 0.14 ## { 'struct': 'BlockStats', 'data': {'*device': 'str', '*node-name': 'str', @@ -1036,19 +1045,19 @@ For example:: # Query the @BlockStats for all virtual block devices. # # @query-nodes: If true, the command will query all the block nodes - # ... explain, explain ... (since 2.3) + # ... explain, explain ... + # (Since 2.3) # # Returns: A list of @BlockStats for each virtual block devices. # - # Since: 0.14.0 + # Since: 0.14 # # Example: # - # -> { "execute": "query-blockstats" } - # <- { - # ... lots of output ... - # } - # + # -> { "execute": "query-blockstats" } + # <- { + # ... lots of output ... + # } ## { 'command': 'query-blockstats', 'data': { '*query-nodes': 'bool' }, @@ -1082,8 +1091,10 @@ need to line up with each other, like this:: # or cache associativity unknown) # (since 5.0) -Section tags are case-sensitive and end with a colon. Good example:: +Section tags are case-sensitive and end with a colon. They are only +recognized after a blank line. Good example:: + # # Since: 7.1 Bad examples (all ordinary paragraphs):: diff --git a/docs/devel/qom.rst b/docs/devel/qom.rst index 9918fac7f21..0889ca949c1 100644 --- a/docs/devel/qom.rst +++ b/docs/devel/qom.rst @@ -348,12 +348,14 @@ used. This does the same as OBJECT_DECLARE_SIMPLE_TYPE(), but without the 'struct MyDeviceClass' definition. To implement the type, the OBJECT_DEFINE macro family is available. -In the simple case the OBJECT_DEFINE_TYPE macro is suitable: +For the simplest case of a leaf class which doesn't need any of its +own virtual functions (i.e. which was declared with OBJECT_DECLARE_SIMPLE_TYPE) +the OBJECT_DEFINE_SIMPLE_TYPE macro is suitable: .. code-block:: c :caption: Defining a simple type - OBJECT_DEFINE_TYPE(MyDevice, my_device, MY_DEVICE, DEVICE) + OBJECT_DEFINE_SIMPLE_TYPE(MyDevice, my_device, MY_DEVICE, DEVICE) This is equivalent to the following: @@ -370,7 +372,6 @@ This is equivalent to the following: .instance_size = sizeof(MyDevice), .instance_init = my_device_init, .instance_finalize = my_device_finalize, - .class_size = sizeof(MyDeviceClass), .class_init = my_device_class_init, }; @@ -385,13 +386,36 @@ This is sufficient to get the type registered with the type system, and the three standard methods now need to be implemented along with any other logic required for the type. +If the class needs its own virtual methods, or has some other +per-class state it needs to store in its own class struct, +then you can use the OBJECT_DEFINE_TYPE macro. This does the +same thing as OBJECT_DEFINE_SIMPLE_TYPE, but it also sets the +class_size of the type to the size of the class struct. + +.. code-block:: c + :caption: Defining a type which needs a class struct + + OBJECT_DEFINE_TYPE(MyDevice, my_device, MY_DEVICE, DEVICE) + If the type needs to implement one or more interfaces, then the -OBJECT_DEFINE_TYPE_WITH_INTERFACES() macro can be used instead. -This accepts an array of interface type names. +OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES() and +OBJECT_DEFINE_TYPE_WITH_INTERFACES() macros can be used instead. +These accept an array of interface type names. The difference between +them is that the former is for simple leaf classes that don't need +a class struct, and the latter is for when you will be defining +a class struct. .. code-block:: c :caption: Defining a simple type implementing interfaces + OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(MyDevice, my_device, + MY_DEVICE, DEVICE, + { TYPE_USER_CREATABLE }, + { NULL }) + +.. code-block:: c + :caption: Defining a type implementing interfaces + OBJECT_DEFINE_TYPE_WITH_INTERFACES(MyDevice, my_device, MY_DEVICE, DEVICE, { TYPE_USER_CREATABLE }, diff --git a/docs/devel/replay.rst b/docs/devel/replay.rst index 0244be8b9c4..effd856f0c6 100644 --- a/docs/devel/replay.rst +++ b/docs/devel/replay.rst @@ -184,7 +184,7 @@ modes. Reading and writing requests are created by CPU thread of QEMU. Later these requests proceed to block layer which creates "bottom halves". Bottom halves consist of callback and its parameters. They are processed when -main loop locks the global mutex. These locks are not synchronized with +main loop locks the BQL. These locks are not synchronized with replaying process because main loop also processes the events that do not affect the virtual machine state (like user interaction with monitor). diff --git a/docs/devel/reset.rst b/docs/devel/reset.rst index 38ed1790f7c..2ea85e7779b 100644 --- a/docs/devel/reset.rst +++ b/docs/devel/reset.rst @@ -11,15 +11,15 @@ whole group can be reset consistently. Each individual member object does not have to care about others; in particular, problems of order (which object is reset first) are addressed. -As of now DeviceClass and BusClass implement this interface. - +The main object types which implement this interface are DeviceClass +and BusClass. Triggering reset ---------------- This section documents the APIs which "users" of a resettable object should use to control it. All resettable control functions must be called while holding -the iothread lock. +the BQL. You can apply a reset to an object using ``resettable_assert_reset()``. You need to call ``resettable_release_reset()`` to release the object from reset. To @@ -288,3 +288,43 @@ There is currently 2 cases where this function is used: 2. *hot bus change*; it means an existing live device is added, moved or removed in the bus hierarchy. At the moment, it occurs only in the raspi machines for changing the sdbus used by sd card. + +Reset of the complete system +---------------------------- + +Reset of the complete system is a little complicated. The typical +flow is: + +1. Code which wishes to reset the entire system does so by calling + ``qemu_system_reset_request()``. This schedules a reset, but the + reset will happen asynchronously after the function returns. + That makes this safe to call from, for example, device models. + +2. The function which is called to make the reset happen is + ``qemu_system_reset()``. Generally only core system code should + call this directly. + +3. ``qemu_system_reset()`` calls the ``MachineClass::reset`` method of + the current machine, if it has one. That method must call + ``qemu_devices_reset()``. If the machine has no reset method, + ``qemu_system_reset()`` calls ``qemu_devices_reset()`` directly. + +4. ``qemu_devices_reset()`` performs a reset of the system, using + the three-phase mechanism listed above. It resets all objects + that were registered with it using ``qemu_register_resettable()``. + It also calls all the functions registered with it using + ``qemu_register_reset()``. Those functions are called during the + "hold" phase of this reset. + +5. The most important object that this reset resets is the + 'sysbus' bus. The sysbus bus is the root of the qbus tree. This + means that all devices on the sysbus are reset, and all their + child buses, and all the devices on those child buses. + +6. Devices which are not on the qbus tree are *not* automatically + reset! (The most obvious example of this is CPU objects, but + anything that directly inherits from ``TYPE_OBJECT`` or ``TYPE_DEVICE`` + rather than from ``TYPE_SYS_BUS_DEVICE`` or some other plugs-into-a-bus + type will be in this category.) You need to therefore arrange for these + to be reset in some other way (e.g. using ``qemu_register_resettable()`` + or ``qemu_register_reset()``). diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst index 8ae59ea02b7..d46b625e0e8 100644 --- a/docs/devel/tcg-ops.rst +++ b/docs/devel/tcg-ops.rst @@ -253,6 +253,8 @@ Jumps/Labels | ``TCG_COND_GEU /* unsigned */`` | ``TCG_COND_LEU /* unsigned */`` | ``TCG_COND_GTU /* unsigned */`` + | ``TCG_COND_TSTEQ /* t1 & t2 == 0 */`` + | ``TCG_COND_TSTNE /* t1 & t2 != 0 */`` Arithmetic ---------- diff --git a/docs/devel/tcg-plugins.rst b/docs/devel/tcg-plugins.rst index 81dcd43a612..9cc09d8c3da 100644 --- a/docs/devel/tcg-plugins.rst +++ b/docs/devel/tcg-plugins.rst @@ -112,6 +112,55 @@ details are opaque to plugins. The plugin is able to query select details of instructions and system configuration only through the exported *qemu_plugin* functions. +However the following assumptions can be made: + +Translation Blocks +++++++++++++++++++ + +All code will go through a translation phase although not all +translations will be necessarily be executed. You need to instrument +actual executions to track what is happening. + +It is quite normal to see the same address translated multiple times. +If you want to track the code in system emulation you should examine +the underlying physical address (``qemu_plugin_insn_haddr``) to take +into account the effects of virtual memory although if the system does +paging this will change too. + +Not all instructions in a block will always execute so if its +important to track individual instruction execution you need to +instrument them directly. However asynchronous interrupts will not +change control flow mid-block. + +Instructions +++++++++++++ + +Instruction instrumentation runs before the instruction executes. You +can be can be sure the instruction will be dispatched, but you can't +be sure it will complete. Generally this will be because of a +synchronous exception (e.g. SIGILL) triggered by the instruction +attempting to execute. If you want to be sure you will need to +instrument the next instruction as well. See the ``execlog.c`` plugin +for examples of how to track this and finalise details after execution. + +Memory Accesses ++++++++++++++++ + +Memory callbacks are called after a successful load or store. +Unsuccessful operations (i.e. faults) will not be visible to memory +instrumentation although the execution side effects can be observed +(e.g. entering a exception handler). + +System Idle and Resume States ++++++++++++++++++++++++++++++ + +The ``qemu_plugin_register_vcpu_idle_cb`` and +``qemu_plugin_register_vcpu_resume_cb`` functions can be used to track +when CPUs go into and return from sleep states when waiting for +external I/O. Be aware though that these may occur less frequently +than in real HW due to the inefficiencies of emulation giving less +chance for the CPU to idle. + Internals --------- @@ -143,7 +192,7 @@ requested. The plugin isn't completely uninstalled until the safe work has executed while all vCPUs are quiescent. Example Plugins ---------------- +=============== There are a number of plugins included with QEMU and you are encouraged to contribute your own plugins plugins upstream. There is a @@ -497,6 +546,22 @@ arguments if required:: $ qemu-system-arm $(QEMU_ARGS) \ -plugin ./contrib/plugins/libexeclog.so,ifilter=st1w,afilter=0x40001808 -d plugin +This plugin can also dump registers when they change value. Specify the name of the +registers with multiple ``reg`` options. You can also use glob style matching if you wish:: + + $ qemu-system-arm $(QEMU_ARGS) \ + -plugin ./contrib/plugins/libexeclog.so,reg=\*_el2,reg=sp -d plugin + +Be aware that each additional register to check will slow down +execution quite considerably. You can optimise the number of register +checks done by using the rdisas option. This will only instrument +instructions that mention the registers in question in disassembly. +This is not foolproof as some instructions implicitly change +instructions. You can use the ifilter to catch these cases: + + $ qemu-system-arm $(QEMU_ARGS) \ + -plugin ./contrib/plugins/libexeclog.so,ifilter=msr,ifilter=blr,reg=x30,reg=\*_el1,rdisas=on + - contrib/plugins/cache.c Cache modelling plugin that measures the performance of a given L1 cache @@ -575,12 +640,11 @@ The plugin has a number of arguments, all of them are optional: configuration arguments implies ``l2=on``. (default: N = 2097152 (2MB), B = 64, A = 16) -API ---- +Plugin API +========== The following API is generated from the inline documentation in ``include/qemu/qemu-plugin.h``. Please ensure any updates to the API include the full kernel-doc annotations. .. kernel-doc:: include/qemu/qemu-plugin.h - diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst index bd132306c1d..fa28e3ecb2c 100644 --- a/docs/devel/testing.rst +++ b/docs/devel/testing.rst @@ -728,7 +728,7 @@ For example to setup the HPPA ports builds of Debian:: EXECUTABLE=(pwd)/qemu-hppa V=1 The ``DEB_`` variables are substitutions used by -``debian-boostrap.pre`` which is called to do the initial debootstrap +``debian-bootstrap.pre`` which is called to do the initial debootstrap of the rootfs before it is copied into the container. The second stage is run as part of the build. The final image will be tagged as ``qemu/debian-sid-hppa``. @@ -1346,6 +1346,17 @@ the environment. The definition of *large* is a bit arbitrary here, but it usually means an asset which occupies at least 1GB of size on disk when uncompressed. +SPEED +^^^^^ +Tests which have a long runtime will not be run unless ``SPEED=slow`` is +exported on the environment. + +The definition of *long* is a bit arbitrary here, and it depends on the +usefulness of the test too. A unique test is worth spending more time on, +small variations on existing tests perhaps less so. As a rough guide, +a test or set of similar tests which take more than 100 seconds to +complete. + AVOCADO_ALLOW_UNTRUSTED_CODE ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There are tests which will boot a kernel image or firmware that can be diff --git a/docs/devel/tracing.rst b/docs/devel/tracing.rst index d288480db11..043bed7fd0f 100644 --- a/docs/devel/tracing.rst +++ b/docs/devel/tracing.rst @@ -357,8 +357,7 @@ probes:: scripts/tracetool.py --backends=dtrace --format=stap \ --binary path/to/qemu-binary \ - --target-type system \ - --target-name x86_64 \ + --probe-prefix qemu.system.x86_64 \ --group=all \ trace-events-all \ qemu.stp diff --git a/docs/devel/vfio-iommufd.rst b/docs/devel/vfio-iommufd.rst new file mode 100644 index 00000000000..3d1c11f175e --- /dev/null +++ b/docs/devel/vfio-iommufd.rst @@ -0,0 +1,166 @@ +=============================== +IOMMUFD BACKEND usage with VFIO +=============================== + +(Same meaning for backend/container/BE) + +With the introduction of iommufd, the Linux kernel provides a generic +interface for user space drivers to propagate their DMA mappings to kernel +for assigned devices. While the legacy kernel interface is group-centric, +the new iommufd interface is device-centric, relying on device fd and iommufd. + +To support both interfaces in the QEMU VFIO device, introduce a base container +to abstract the common part of VFIO legacy and iommufd container. So that the +generic VFIO code can use either container. + +The base container implements generic functions such as memory_listener and +address space management whereas the derived container implements callbacks +specific to either legacy or iommufd. Each container has its own way to setup +secure context and dma management interface. The below diagram shows how it +looks like with both containers. + +:: + + VFIO AddressSpace/Memory + +-------+ +----------+ +-----+ +-----+ + | pci | | platform | | ap | | ccw | + +---+---+ +----+-----+ +--+--+ +--+--+ +----------------------+ + | | | | | AddressSpace | + | | | | +------------+---------+ + +---V-----------V-----------V--------V----+ / + | VFIOAddressSpace | <------------+ + | | | MemoryListener + | VFIOContainerBase list | + +-------+----------------------------+----+ + | | + | | + +-------V------+ +--------V----------+ + | iommufd | | vfio legacy | + | container | | container | + +-------+------+ +--------+----------+ + | | + | /dev/iommu | /dev/vfio/vfio + | /dev/vfio/devices/vfioX | /dev/vfio/$group_id + Userspace | | + ============+============================+=========================== + Kernel | device fd | + +---------------+ | group/container fd + | (BIND_IOMMUFD | | (SET_CONTAINER/SET_IOMMU) + | ATTACH_IOAS) | | device fd + | | | + | +-------V------------V-----------------+ + iommufd | | vfio | + (map/unmap | +---------+--------------------+-------+ + ioas_copy) | | | map/unmap + | | | + +------V------+ +-----V------+ +------V--------+ + | iommfd core | | device | | vfio iommu | + +-------------+ +------------+ +---------------+ + +* Secure Context setup + + - iommufd BE: uses device fd and iommufd to setup secure context + (bind_iommufd, attach_ioas) + - vfio legacy BE: uses group fd and container fd to setup secure context + (set_container, set_iommu) + +* Device access + + - iommufd BE: device fd is opened through ``/dev/vfio/devices/vfioX`` + - vfio legacy BE: device fd is retrieved from group fd ioctl + +* DMA Mapping flow + + 1. VFIOAddressSpace receives MemoryRegion add/del via MemoryListener + 2. VFIO populates DMA map/unmap via the container BEs + * iommufd BE: uses iommufd + * vfio legacy BE: uses container fd + +Example configuration +===================== + +Step 1: configure the host device +--------------------------------- + +It's exactly same as the VFIO device with legacy VFIO container. + +Step 2: configure QEMU +---------------------- + +Interactions with the ``/dev/iommu`` are abstracted by a new iommufd +object (compiled in with the ``CONFIG_IOMMUFD`` option). + +Any QEMU device (e.g. VFIO device) wishing to use ``/dev/iommu`` must +be linked with an iommufd object. It gets a new optional property +named iommufd which allows to pass an iommufd object. Take ``vfio-pci`` +device for example: + +.. code-block:: bash + + -object iommufd,id=iommufd0 + -device vfio-pci,host=0000:02:00.0,iommufd=iommufd0 + +Note the ``/dev/iommu`` and VFIO cdev can be externally opened by a +management layer. In such a case the fd is passed, the fd supports a +string naming the fd or a number, for example: + +.. code-block:: bash + + -object iommufd,id=iommufd0,fd=22 + -device vfio-pci,iommufd=iommufd0,fd=23 + +If the ``fd`` property is not passed, the fd is opened by QEMU. + +If no ``iommufd`` object is passed to the ``vfio-pci`` device, iommufd +is not used and the user gets the behavior based on the legacy VFIO +container: + +.. code-block:: bash + + -device vfio-pci,host=0000:02:00.0 + +Supported platform +================== + +Supports x86, ARM and s390x currently. + +Caveats +======= + +Dirty page sync +--------------- + +Dirty page sync with iommufd backend is unsupported yet, live migration is +disabled by default. But it can be force enabled like below, low efficient +though. + +.. code-block:: bash + + -object iommufd,id=iommufd0 + -device vfio-pci,host=0000:02:00.0,iommufd=iommufd0,enable-migration=on + +P2P DMA +------- + +PCI p2p DMA is unsupported as IOMMUFD doesn't support mapping hardware PCI +BAR region yet. Below warning shows for assigned PCI device, it's not a bug. + +.. code-block:: none + + qemu-system-x86_64: warning: IOMMU_IOAS_MAP failed: Bad address, PCI BAR? + qemu-system-x86_64: vfio_container_dma_map(0x560cb6cb1620, 0xe000000021000, 0x3000, 0x7f32ed55c000) = -14 (Bad address) + +FD passing with mdev +-------------------- + +``vfio-pci`` device checks sysfsdev property to decide if backend is a mdev. +If FD passing is used, there is no way to know that and the mdev is treated +like a real PCI device. There is an error as below if user wants to enable +RAM discarding for mdev. + +.. code-block:: none + + qemu-system-x86_64: -device vfio-pci,iommufd=iommufd0,x-balloon-allowed=on,fd=9: vfio VFIO_FD9: x-balloon-allowed only potentially compatible with mdev devices + +``vfio-ap`` and ``vfio-ccw`` devices don't have same issue as their backend +devices are always mdev and RAM discarding is force enabled. diff --git a/docs/devel/virtio-migration.txt b/docs/devel/virtio-migration.txt deleted file mode 100644 index 98a6b0ffb57..00000000000 --- a/docs/devel/virtio-migration.txt +++ /dev/null @@ -1,108 +0,0 @@ -Virtio devices and migration -============================ - -Copyright 2015 IBM Corp. - -This work is licensed under the terms of the GNU GPL, version 2 or later. See -the COPYING file in the top-level directory. - -Saving and restoring the state of virtio devices is a bit of a twisty maze, -for several reasons: -- state is distributed between several parts: - - virtio core, for common fields like features, number of queues, ... - - virtio transport (pci, ccw, ...), for the different proxy devices and - transport specific state (msix vectors, indicators, ...) - - virtio device (net, blk, ...), for the different device types and their - state (mac address, request queue, ...) -- most fields are saved via the stream interface; subsequently, subsections - have been added to make cross-version migration possible - -This file attempts to document the current procedure and point out some -caveats. - - -Save state procedure -==================== - -virtio core virtio transport virtio device ------------ ---------------- ------------- - - save() function registered - via VMState wrapper on - device class -virtio_save() <---------- - ------> save_config() - - save proxy device - - save transport-specific - device fields -- save common device - fields -- save common virtqueue - fields - ------> save_queue() - - save transport-specific - virtqueue fields - ------> save_device() - - save device-specific - fields -- save subsections - - device endianness, - if changed from - default endianness - - 64 bit features, if - any high feature bit - is set - - virtio-1 virtqueue - fields, if VERSION_1 - is set - - -Load state procedure -==================== - -virtio core virtio transport virtio device ------------ ---------------- ------------- - - load() function registered - via VMState wrapper on - device class -virtio_load() <---------- - ------> load_config() - - load proxy device - - load transport-specific - device fields -- load common device - fields -- load common virtqueue - fields - ------> load_queue() - - load transport-specific - virtqueue fields -- notify guest - ------> load_device() - - load device-specific - fields -- load subsections - - device endianness - - 64 bit features - - virtio-1 virtqueue - fields -- sanitize endianness -- sanitize features -- virtqueue index sanity - check - - feature-dependent setup - - -Implications of this setup -========================== - -Devices need to be careful in their state processing during load: The -load_device() procedure is invoked by the core before subsections have -been loaded. Any code that depends on information transmitted in subsections -therefore has to be invoked in the device's load() function _after_ -virtio_load() returned (like e.g. code depending on features). - -Any extension of the state being migrated should be done in subsections -added to the core for compatibility reasons. If transport or device specific -state is added, core needs to invoke a callback from the new subsection. diff --git a/docs/devel/writing-monitor-commands.rst b/docs/devel/writing-monitor-commands.rst index 2c11e716651..930da5cd068 100644 --- a/docs/devel/writing-monitor-commands.rst +++ b/docs/devel/writing-monitor-commands.rst @@ -8,8 +8,8 @@ This document doesn't discuss QMP protocol level details, nor does it dive into the QAPI framework implementation. For an in-depth introduction to the QAPI framework, please refer to -docs/devel/qapi-code-gen.txt. For documentation about the QMP protocol, -start with docs/interop/qmp-intro.txt. +:doc:`qapi-code-gen`. For the QMP protocol, see the +:doc:`/interop/qmp-spec`. New commands may be implemented in QMP only. New HMP commands should be implemented on top of QMP. The typical HMP command wraps around an @@ -66,12 +66,13 @@ Then, in a different terminal:: "version": { "qemu": { "micro": 50, - "minor": 15, - "major": 0 + "minor": 2, + "major": 8 }, - "package": "" + "package": ... }, "capabilities": [ + "oob" ] } } @@ -107,10 +108,14 @@ The first step is defining the command in the appropriate QAPI schema module. We pick module qapi/misc.json, and add the following line at the bottom:: + ## + # @hello-world: + # + # Since: 9.0 + ## { 'command': 'hello-world' } -The "command" keyword defines a new QMP command. It's an JSON object. All -schema entries are JSON objects. The line above will instruct the QAPI to +The "command" keyword defines a new QMP command. It instructs QAPI to generate any prototypes and the necessary code to marshal and unmarshal protocol data. @@ -132,57 +137,70 @@ There are a few things to be noticed: 3. It takes an "Error \*\*" argument. This is required. Later we will see how to return errors and take additional arguments. The Error argument should not be touched if the command doesn't return errors -4. We won't add the function's prototype. That's automatically done by the QAPI +4. We won't add the function's prototype. That's automatically done by QAPI 5. Printing to the terminal is discouraged for QMP commands, we do it here because it's the easiest way to demonstrate a QMP command -You're done. Now build qemu, run it as suggested in the "Testing" section, +You're done. Now build QEMU, run it as suggested in the "Testing" section, and then type the following QMP command:: { "execute": "hello-world" } -Then check the terminal running qemu and look for the "Hello, world" string. If +Then check the terminal running QEMU and look for the "Hello, world" string. If you don't see it then something went wrong. Arguments ~~~~~~~~~ -Let's add an argument called "message" to our "hello-world" command. The new -argument will contain the string to be printed to stdout. It's an optional -argument, if it's not present we print our default "Hello, World" string. +Let's add arguments to our "hello-world" command. The first change we have to do is to modify the command specification in the schema file to the following:: - { 'command': 'hello-world', 'data': { '*message': 'str' } } + ## + # @hello-world: + # + # @message: message to be printed (default: "Hello, world!") + # + # @times: how many times to print the message (default: 1) + # + # Since: 9.0 + ## + { 'command': 'hello-world', + 'data': { '*message': 'str', '*times': 'int' } } -Notice the new 'data' member in the schema. It's an JSON object whose each -element is an argument to the command in question. Also notice the asterisk, -it's used to mark the argument optional (that means that you shouldn't use it -for mandatory arguments). Finally, 'str' is the argument's type, which -stands for "string". The QAPI also supports integers, booleans, enumerations -and user defined types. +Notice the new 'data' member in the schema. It specifies an argument +'message' of QAPI type 'str', and an argument 'times' of QAPI type +'int'. Also notice the asterisk, it's used to mark the argument +optional. Now, let's update our C implementation in monitor/qmp-cmds.c:: - void qmp_hello_world(const char *message, Error **errp) + void qmp_hello_world(const char *message, bool has_times, int64_t times, + Error **errp) { - if (message) { + if (!message) { + message = "Hello, world"; + } + if (!has_times) { + times = 1; + } + + for (int i = 0; i < times; i++) { printf("%s\n", message); - } else { - printf("Hello, world\n"); } } There are two important details to be noticed: -1. All optional arguments are accompanied by a 'has\_' boolean, which is set - if the optional argument is present or false otherwise +1. Optional arguments other than pointers are accompanied by a 'has\_' + boolean, which is set if the optional argument is present or false + otherwise 2. The C implementation signature must follow the schema's argument ordering, which is defined by the "data" member -Time to test our new version of the "hello-world" command. Build qemu, run it as +Time to test our new version of the "hello-world" command. Build QEMU, run it as described in the "Testing" section and then send two commands:: { "execute": "hello-world" } @@ -191,13 +209,13 @@ described in the "Testing" section and then send two commands:: } } - { "execute": "hello-world", "arguments": { "message": "We love qemu" } } + { "execute": "hello-world", "arguments": { "message": "We love QEMU" } } { "return": { } } -You should see "Hello, world" and "We love qemu" in the terminal running qemu, +You should see "Hello, world" and "We love QEMU" in the terminal running QEMU, if you don't see these strings, then something went wrong. @@ -227,7 +245,7 @@ The first argument to the error_setg() function is the Error pointer to pointer, which is passed to all QMP functions. The next argument is a human description of the error, this is a free-form printf-like string. -Let's test the example above. Build qemu, run it as defined in the "Testing" +Let's test the example above. Build QEMU, run it as defined in the "Testing" section, and then issue the following command:: { "execute": "hello-world", "arguments": { "message": "all you need is love" } } @@ -254,44 +272,14 @@ If the failure you want to report falls into one of the two cases above, use error_set() with a second argument of an ErrorClass value. -Command Documentation -~~~~~~~~~~~~~~~~~~~~~ - -There's only one step missing to make "hello-world"'s implementation complete, -and that's its documentation in the schema file. - -There are many examples of such documentation in the schema file already, but -here goes "hello-world"'s new entry for qapi/misc.json:: - - ## - # @hello-world: - # - # Print a client provided string to the standard output stream. - # - # @message: string to be printed - # - # Returns: Nothing on success. - # - # Notes: if @message is not provided, the "Hello, world" string will - # be printed instead - # - # Since: - ## - { 'command': 'hello-world', 'data': { '*message': 'str' } } - -Please, note that the "Returns" clause is optional if a command doesn't return -any data nor any errors. - - Implementing the HMP command ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Now that the QMP command is in place, we can also make it available in the human monitor (HMP). -With the introduction of the QAPI, HMP commands make QMP calls. Most of the -time HMP commands are simple wrappers. All HMP commands implementation exist in -the monitor/hmp-cmds.c file. +With the introduction of QAPI, HMP commands make QMP calls. Most of the +time HMP commands are simple wrappers. Here's the implementation of the "hello-world" HMP command:: @@ -306,18 +294,20 @@ Here's the implementation of the "hello-world" HMP command:: } } -Also, you have to add the function's prototype to the hmp.h file. +Add it to monitor/hmp-cmds.c. Also, add its prototype to +include/monitor/hmp.h. -There are three important points to be noticed: +There are four important points to be noticed: 1. The "mon" and "qdict" arguments are mandatory for all HMP functions. The former is the monitor object. The latter is how the monitor passes arguments entered by the user to the command implementation -2. hmp_hello_world() performs error checking. In this example we just call +2. We chose not to support the "times" argument in HMP +3. hmp_hello_world() performs error checking. In this example we just call hmp_handle_error() which prints a message to the user, but we could do more, like taking different actions depending on the error qmp_hello_world() returns -3. The "err" variable must be initialized to NULL before performing the +4. The "err" variable must be initialized to NULL before performing the QMP call There's one last step to actually make the command available to monitor users, @@ -340,17 +330,17 @@ To test this you have to open a user monitor and issue the "hello-world" command. It might be instructive to check the command's documentation with HMP's "help" command. -Please, check the "-monitor" command-line option to know how to open a user +Please check the "-monitor" command-line option to know how to open a user monitor. Writing more complex commands ----------------------------- -A QMP command is capable of returning any data the QAPI supports like integers, +A QMP command is capable of returning any data QAPI supports like integers, strings, booleans, enumerations and user defined types. -In this section we will focus on user defined types. Please, check the QAPI +In this section we will focus on user defined types. Please check the QAPI documentation for information about the other types. @@ -372,7 +362,7 @@ data, it is not expected that machines will need to parse the result. The overhead of defining a fine grained QAPI type for the data may not be justified by the potential benefit. In such cases, it is permitted to have a command return a simple string that contains formatted data, -however, it is mandatory for the command to use the 'x-' name prefix. +however, it is mandatory for the command to be marked unstable. This indicates that the command is not guaranteed to be long term stable / liable to change in future and is not following QAPI design best practices. An example where this approach is taken is the QMP @@ -386,302 +376,207 @@ an illustration. User Defined Types ~~~~~~~~~~~~~~~~~~ -FIXME This example needs to be redone after commit 6d32717 +For this example we will write the query-option-roms command, which +returns information about ROMs loaded into the option ROM space. For +more information about it, please check the "-option-rom" command-line +option. -For this example we will write the query-alarm-clock command, which returns -information about QEMU's timer alarm. For more information about it, please -check the "-clock" command-line option. - -We want to return two pieces of information. The first one is the alarm clock's -name. The second one is when the next alarm will fire. The former information is -returned as a string, the latter is an integer in nanoseconds (which is not -very useful in practice, as the timer has probably already fired when the -information reaches the client). - -The best way to return that data is to create a new QAPI type, as shown below:: +For each option ROM, we want to return two pieces of information: the +ROM image's file name, and its bootindex, if any. We need to create a +new QAPI type for that, as shown below:: ## - # @QemuAlarmClock - # - # QEMU alarm clock information. + # @OptionRomInfo: # - # @clock-name: The alarm clock method's name. + # @filename: option ROM image file name # - # @next-deadline: The time (in nanoseconds) the next alarm will fire. + # @bootindex: option ROM's bootindex # - # Since: 1.0 + # Since: 9.0 ## - { 'type': 'QemuAlarmClock', - 'data': { 'clock-name': 'str', '*next-deadline': 'int' } } + { 'struct': 'OptionRomInfo', + 'data': { 'filename': 'str', '*bootindex': 'int' } } -The "type" keyword defines a new QAPI type. Its "data" member contains the -type's members. In this example our members are the "clock-name" and the -"next-deadline" one, which is optional. +The "struct" keyword defines a new QAPI type. Its "data" member +contains the type's members. In this example our members are +"filename" and "bootindex". The latter is optional. -Now let's define the query-alarm-clock command:: +Now let's define the query-option-roms command:: ## - # @query-alarm-clock + # @query-option-roms: # - # Return information about QEMU's alarm clock. + # Query information on ROMs loaded into the option ROM space. # - # Returns a @QemuAlarmClock instance describing the alarm clock method - # being currently used by QEMU (this is usually set by the '-clock' - # command-line option). + # Returns: OptionRomInfo # - # Since: 1.0 + # Since: 9.0 ## - { 'command': 'query-alarm-clock', 'returns': 'QemuAlarmClock' } + { 'command': 'query-option-roms', + 'returns': ['OptionRomInfo'] } Notice the "returns" keyword. As its name suggests, it's used to define the data returned by a command. -It's time to implement the qmp_query_alarm_clock() function, you can put it -in the qemu-timer.c file:: +Notice the syntax ['OptionRomInfo']". This should be read as "returns +a list of OptionRomInfo". - QemuAlarmClock *qmp_query_alarm_clock(Error **errp) - { - QemuAlarmClock *clock; - int64_t deadline; - - clock = g_malloc0(sizeof(*clock)); +It's time to implement the qmp_query_option_roms() function. Add to +monitor/qmp-cmds.c:: - deadline = qemu_next_alarm_deadline(); - if (deadline > 0) { - clock->has_next_deadline = true; - clock->next_deadline = deadline; + OptionRomInfoList *qmp_query_option_roms(Error **errp) + { + OptionRomInfoList *info_list = NULL; + OptionRomInfoList **tailp = &info_list; + OptionRomInfo *info; + + for (int i = 0; i < nb_option_roms; i++) { + info = g_malloc0(sizeof(*info)); + info->filename = g_strdup(option_rom[i].name); + info->has_bootindex = option_rom[i].bootindex >= 0; + if (info->has_bootindex) { + info->bootindex = option_rom[i].bootindex; + } + QAPI_LIST_APPEND(tailp, info); } - clock->clock_name = g_strdup(alarm_timer->name); - return clock; + return info_list; } There are a number of things to be noticed: -1. The QemuAlarmClock type is automatically generated by the QAPI framework, - its members correspond to the type's specification in the schema file -2. As specified in the schema file, the function returns a QemuAlarmClock - instance and takes no arguments (besides the "errp" one, which is mandatory - for all QMP functions) -3. The "clock" variable (which will point to our QAPI type instance) is - allocated by the regular g_malloc0() function. Note that we chose to - initialize the memory to zero. This is recommended for all QAPI types, as - it helps avoiding bad surprises (specially with booleans) -4. Remember that "next_deadline" is optional? Non-pointer optional - members have a 'has_TYPE_NAME' member that should be properly set +1. Type OptionRomInfo is automatically generated by the QAPI framework, + its members correspond to the type's specification in the schema + file +2. Type OptionRomInfoList is also generated. It's a singly linked + list. +3. As specified in the schema file, the function returns a + OptionRomInfoList, and takes no arguments (besides the "errp" one, + which is mandatory for all QMP functions) +4. The returned object is dynamically allocated +5. All strings are dynamically allocated. This is so because QAPI also + generates a function to free its types and it cannot distinguish + between dynamically or statically allocated strings +6. Remember that "bootindex" is optional? As a non-pointer optional + member, it comes with a 'has_bootindex' member that needs to be set by the implementation, as shown above -5. Even static strings, such as "alarm_timer->name", should be dynamically - allocated by the implementation. This is so because the QAPI also generates - a function to free its types and it cannot distinguish between dynamically - or statically allocated strings -6. You have to include "qapi/qapi-commands-misc.h" in qemu-timer.c -Time to test the new command. Build qemu, run it as described in the "Testing" +Time to test the new command. Build QEMU, run it as described in the "Testing" section and try this:: - { "execute": "query-alarm-clock" } + { "execute": "query-option-rom" } { - "return": { - "next-deadline": 2368219, - "clock-name": "dynticks" - } + "return": [ + { + "filename": "kvmvapic.bin" + } + ] } The HMP command ~~~~~~~~~~~~~~~ -Here's the HMP counterpart of the query-alarm-clock command:: +Here's the HMP counterpart of the query-option-roms command:: - void hmp_info_alarm_clock(Monitor *mon) + void hmp_info_option_roms(Monitor *mon, const QDict *qdict) { - QemuAlarmClock *clock; Error *err = NULL; + OptionRomInfoList *info_list, *tail; + OptionRomInfo *info; - clock = qmp_query_alarm_clock(&err); + info_list = qmp_query_option_roms(&err); if (hmp_handle_error(mon, err)) { return; } - monitor_printf(mon, "Alarm clock method in use: '%s'\n", clock->clock_name); - if (clock->has_next_deadline) { - monitor_printf(mon, "Next alarm will fire in %" PRId64 " nanoseconds\n", - clock->next_deadline); + for (tail = info_list; tail; tail = tail->next) { + info = tail->value; + monitor_printf(mon, "%s", info->filename); + if (info->has_bootindex) { + monitor_printf(mon, " %" PRId64, info->bootindex); + } + monitor_printf(mon, "\n"); } - qapi_free_QemuAlarmClock(clock); + qapi_free_OptionRomInfoList(info_list); } -It's important to notice that hmp_info_alarm_clock() calls -qapi_free_QemuAlarmClock() to free the data returned by qmp_query_alarm_clock(). -For user defined types, the QAPI will generate a qapi_free_QAPI_TYPE_NAME() -function and that's what you have to use to free the types you define and -qapi_free_QAPI_TYPE_NAMEList() for list types (explained in the next section). -If the QMP call returns a string, then you should g_free() to free it. - -Also note that hmp_info_alarm_clock() performs error handling. That's not -strictly required if you're sure the QMP function doesn't return errors, but -it's good practice to always check for errors. - -Another important detail is that HMP's "info" commands don't go into the -hmp-commands.hx. Instead, they go into the info_cmds[] table, which is defined -in the monitor/misc.c file. The entry for the "info alarmclock" follows:: - - { - .name = "alarmclock", - .args_type = "", - .params = "", - .help = "show information about the alarm clock", - .cmd = hmp_info_alarm_clock, - }, +It's important to notice that hmp_info_option_roms() calls +qapi_free_OptionRomInfoList() to free the data returned by +qmp_query_option_roms(). For user defined types, QAPI will generate a +qapi_free_QAPI_TYPE_NAME() function, and that's what you have to use to +free the types you define and qapi_free_QAPI_TYPE_NAMEList() for list +types (explained in the next section). If the QMP function returns a +string, then you should g_free() to free it. + +Also note that hmp_info_option_roms() performs error handling. That's +not strictly required when you're sure the QMP function doesn't return +errors; you could instead pass it &error_abort then. + +Another important detail is that HMP's "info" commands go into +hmp-commands-info.hx, not hmp-commands.hx. The entry for the "info +option-roms" follows:: + + { + .name = "option-roms", + .args_type = "", + .params = "", + .help = "show roms", + .cmd = hmp_info_option_roms, + }, + SRST + ``info option-roms`` + Show the option ROMs. + ERST -To test this, run qemu and type "info alarmclock" in the user monitor. +To test this, run QEMU and type "info option-roms" in the user monitor. -Returning Lists -~~~~~~~~~~~~~~~ +Writing a debugging aid returning unstructured text +--------------------------------------------------- -For this example, we're going to return all available methods for the timer -alarm, which is pretty much what the command-line option "-clock ?" does, -except that we're also going to inform which method is in use. +As discussed in section `Modelling data in QAPI`_, it is required that +commands expecting machine usage be using fine-grained QAPI data types. +The exception to this rule applies when the command is solely intended +as a debugging aid and allows for returning unstructured text, such as +a query command that report aspects of QEMU's internal state that are +useful only to human operators. -This first step is to define a new type:: +In this example we will consider the existing QMP command +``x-query-roms`` in qapi/machine.json. It has no parameters and +returns a ``HumanReadableText``:: ## - # @TimerAlarmMethod - # - # Timer alarm method information. + # @x-query-roms: # - # @method-name: The method's name. + # Query information on the registered ROMS # - # @current: true if this alarm method is currently in use, false otherwise + # Features: # - # Since: 1.0 - ## - { 'type': 'TimerAlarmMethod', - 'data': { 'method-name': 'str', 'current': 'bool' } } - -The command will be called "query-alarm-methods", here is its schema -specification:: - - ## - # @query-alarm-methods + # @unstable: This command is meant for debugging. # - # Returns information about available alarm methods. + # Returns: registered ROMs # - # Returns: a list of @TimerAlarmMethod for each method - # - # Since: 1.0 + # Since: 6.2 ## - { 'command': 'query-alarm-methods', 'returns': ['TimerAlarmMethod'] } - -Notice the syntax for returning lists "'returns': ['TimerAlarmMethod']", this -should be read as "returns a list of TimerAlarmMethod instances". - -The C implementation follows:: - - TimerAlarmMethodList *qmp_query_alarm_methods(Error **errp) - { - TimerAlarmMethodList *method_list = NULL; - const struct qemu_alarm_timer *p; - bool current = true; - - for (p = alarm_timers; p->name; p++) { - TimerAlarmMethod *value = g_malloc0(*value); - value->method_name = g_strdup(p->name); - value->current = current; - QAPI_LIST_PREPEND(method_list, value); - current = false; - } - - return method_list; - } - -The most important difference from the previous examples is the -TimerAlarmMethodList type, which is automatically generated by the QAPI from -the TimerAlarmMethod type. - -Each list node is represented by a TimerAlarmMethodList instance. We have to -allocate it, and that's done inside the for loop: the "info" pointer points to -an allocated node. We also have to allocate the node's contents, which is -stored in its "value" member. In our example, the "value" member is a pointer -to an TimerAlarmMethod instance. - -Notice that the "current" variable is used as "true" only in the first -iteration of the loop. That's because the alarm timer method in use is the -first element of the alarm_timers array. Also notice that QAPI lists are handled -by hand and we return the head of the list. - -Now Build qemu, run it as explained in the "Testing" section and try our new -command:: - - { "execute": "query-alarm-methods" } - { - "return": [ - { - "current": false, - "method-name": "unix" - }, - { - "current": true, - "method-name": "dynticks" - } - ] - } - -The HMP counterpart is a bit more complex than previous examples because it -has to traverse the list, it's shown below for reference:: - - void hmp_info_alarm_methods(Monitor *mon) - { - TimerAlarmMethodList *method_list, *method; - Error *err = NULL; - - method_list = qmp_query_alarm_methods(&err); - if (hmp_handle_error(mon, err)) { - return; - } - - for (method = method_list; method; method = method->next) { - monitor_printf(mon, "%c %s\n", method->value->current ? '*' : ' ', - method->value->method_name); - } - - qapi_free_TimerAlarmMethodList(method_list); - } - -Writing a debugging aid returning unstructured text ---------------------------------------------------- - -As discussed in section `Modelling data in QAPI`_, it is required that -commands expecting machine usage be using fine-grained QAPI data types. -The exception to this rule applies when the command is solely intended -as a debugging aid and allows for returning unstructured text. This is -commonly needed for query commands that report aspects of QEMU's -internal state that are useful to human operators. - -In this example we will consider a simplified variant of the HMP -command ``info roms``. Following the earlier rules, this command will -need to live under the ``x-`` name prefix, so its QMP implementation -will be called ``x-query-roms``. It will have no parameters and will -return a single text string:: - - { 'struct': 'HumanReadableText', - 'data': { 'human-readable-text': 'str' } } - { 'command': 'x-query-roms', - 'returns': 'HumanReadableText' } + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } -The ``HumanReadableText`` struct is intended to be used for all -commands, under the ``x-`` name prefix that are returning unstructured -text targeted at humans. It should never be used for commands outside -the ``x-`` name prefix, as those should be using structured QAPI types. +The ``HumanReadableText`` struct is defined in qapi/common.json as a +struct with a string member. It is intended to be used for all +commands that are returning unstructured text targeted at +humans. These should all have feature 'unstable'. Note that the +feature's documentation states why the command is unstable. We +commonly use a ``x-`` command name prefix to make lack of stability +obvious to human users. Implementing the QMP command ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The QMP implementation will typically involve creating a ``GString`` -object and printing formatted data into it:: +object and printing formatted data into it, like this:: HumanReadableText *qmp_x_query_roms(Error **errp) { @@ -698,6 +593,9 @@ object and printing formatted data into it:: return human_readable_text_from_str(buf); } +The actual implementation emits more information. You can find it in +hw/core/loader.c. + Implementing the HMP command ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -706,7 +604,7 @@ Now that the QMP command is in place, we can also make it available in the human monitor (HMP) as shown in previous examples. The HMP implementations will all look fairly similar, as all they need do is invoke the QMP command and then print the resulting text or error -message. Here's the implementation of the "info roms" HMP command:: +message. Here's an implementation of the "info roms" HMP command:: void hmp_info_roms(Monitor *mon, const QDict *qdict) { @@ -746,3 +644,5 @@ field NULL:: .help = "show roms", .cmd_info_hrt = qmp_x_query_roms, }, + +This is how the actual HMP command is done. diff --git a/docs/interop/bitmaps.rst b/docs/interop/bitmaps.rst index 1de46febdc5..ddf8947d548 100644 --- a/docs/interop/bitmaps.rst +++ b/docs/interop/bitmaps.rst @@ -166,9 +166,9 @@ Basic QMP Usage --------------- The primary interface to manipulating bitmap objects is via the QMP -interface. If you are not familiar, see docs/interop/qmp-intro.txt for a broad -overview, and `qemu-qmp-ref `_ for a full reference of all -QMP commands. +interface. If you are not familiar, see the :doc:`qmp-spec` for the +protocol, and :doc:`qemu-qmp-ref` for a full reference of all QMP +commands. Supported Commands ~~~~~~~~~~~~~~~~~~ diff --git a/docs/interop/firmware.json b/docs/interop/firmware.json index cc8f8691868..54a1fc6c104 100644 --- a/docs/interop/firmware.json +++ b/docs/interop/firmware.json @@ -223,7 +223,7 @@ ## -# @FirmwareFlashType: +# @FirmwareFlashMode: # # Describes how the firmware build handles code versus variable # persistence. @@ -435,203 +435,203 @@ # # Examples: # -# { -# "description": "SeaBIOS", -# "interface-types": [ -# "bios" -# ], -# "mapping": { -# "device": "memory", -# "filename": "/usr/share/seabios/bios-256k.bin" -# }, -# "targets": [ -# { -# "architecture": "i386", -# "machines": [ -# "pc-i440fx-*", -# "pc-q35-*" -# ] +# { +# "description": "SeaBIOS", +# "interface-types": [ +# "bios" +# ], +# "mapping": { +# "device": "memory", +# "filename": "/usr/share/seabios/bios-256k.bin" # }, -# { -# "architecture": "x86_64", -# "machines": [ -# "pc-i440fx-*", -# "pc-q35-*" -# ] -# } -# ], -# "features": [ -# "acpi-s3", -# "acpi-s4" -# ], -# "tags": [ -# "CONFIG_BOOTSPLASH=n", -# "CONFIG_ROM_SIZE=256", -# "CONFIG_USE_SMM=n" -# ] -# } -# -# { -# "description": "OVMF with SB+SMM, empty varstore", -# "interface-types": [ -# "uefi" -# ], -# "mapping": { -# "device": "flash", -# "executable": { -# "filename": "/usr/share/OVMF/OVMF_CODE.secboot.fd", -# "format": "raw" +# "targets": [ +# { +# "architecture": "i386", +# "machines": [ +# "pc-i440fx-*", +# "pc-q35-*" +# ] +# }, +# { +# "architecture": "x86_64", +# "machines": [ +# "pc-i440fx-*", +# "pc-q35-*" +# ] +# } +# ], +# "features": [ +# "acpi-s3", +# "acpi-s4" +# ], +# "tags": [ +# "CONFIG_BOOTSPLASH=n", +# "CONFIG_ROM_SIZE=256", +# "CONFIG_USE_SMM=n" +# ] +# } +# +# { +# "description": "OVMF with SB+SMM, empty varstore", +# "interface-types": [ +# "uefi" +# ], +# "mapping": { +# "device": "flash", +# "executable": { +# "filename": "/usr/share/OVMF/OVMF_CODE.secboot.fd", +# "format": "raw" +# }, +# "nvram-template": { +# "filename": "/usr/share/OVMF/OVMF_VARS.fd", +# "format": "raw" +# } # }, -# "nvram-template": { -# "filename": "/usr/share/OVMF/OVMF_VARS.fd", -# "format": "raw" -# } -# }, -# "targets": [ -# { -# "architecture": "x86_64", -# "machines": [ -# "pc-q35-*" -# ] -# } -# ], -# "features": [ -# "acpi-s3", -# "amd-sev", -# "requires-smm", -# "secure-boot", -# "verbose-dynamic" -# ], -# "tags": [ -# "-a IA32", -# "-a X64", -# "-p OvmfPkg/OvmfPkgIa32X64.dsc", -# "-t GCC48", -# "-b DEBUG", -# "-D SMM_REQUIRE", -# "-D SECURE_BOOT_ENABLE", -# "-D FD_SIZE_4MB" -# ] -# } -# -# { -# "description": "OVMF with SB+SMM, SB enabled, MS certs enrolled", -# "interface-types": [ -# "uefi" -# ], -# "mapping": { -# "device": "flash", -# "executable": { -# "filename": "/usr/share/OVMF/OVMF_CODE.secboot.fd", -# "format": "raw" +# "targets": [ +# { +# "architecture": "x86_64", +# "machines": [ +# "pc-q35-*" +# ] +# } +# ], +# "features": [ +# "acpi-s3", +# "amd-sev", +# "requires-smm", +# "secure-boot", +# "verbose-dynamic" +# ], +# "tags": [ +# "-a IA32", +# "-a X64", +# "-p OvmfPkg/OvmfPkgIa32X64.dsc", +# "-t GCC48", +# "-b DEBUG", +# "-D SMM_REQUIRE", +# "-D SECURE_BOOT_ENABLE", +# "-D FD_SIZE_4MB" +# ] +# } +# +# { +# "description": "OVMF with SB+SMM, SB enabled, MS certs enrolled", +# "interface-types": [ +# "uefi" +# ], +# "mapping": { +# "device": "flash", +# "executable": { +# "filename": "/usr/share/OVMF/OVMF_CODE.secboot.fd", +# "format": "raw" +# }, +# "nvram-template": { +# "filename": "/usr/share/OVMF/OVMF_VARS.secboot.fd", +# "format": "raw" +# } # }, -# "nvram-template": { -# "filename": "/usr/share/OVMF/OVMF_VARS.secboot.fd", -# "format": "raw" -# } -# }, -# "targets": [ -# { -# "architecture": "x86_64", -# "machines": [ -# "pc-q35-*" -# ] -# } -# ], -# "features": [ -# "acpi-s3", -# "amd-sev", -# "enrolled-keys", -# "requires-smm", -# "secure-boot", -# "verbose-dynamic" -# ], -# "tags": [ -# "-a IA32", -# "-a X64", -# "-p OvmfPkg/OvmfPkgIa32X64.dsc", -# "-t GCC48", -# "-b DEBUG", -# "-D SMM_REQUIRE", -# "-D SECURE_BOOT_ENABLE", -# "-D FD_SIZE_4MB" -# ] -# } -# -# { -# "description": "OVMF with SEV-ES support", -# "interface-types": [ -# "uefi" -# ], -# "mapping": { -# "device": "flash", -# "executable": { -# "filename": "/usr/share/OVMF/OVMF_CODE.fd", -# "format": "raw" +# "targets": [ +# { +# "architecture": "x86_64", +# "machines": [ +# "pc-q35-*" +# ] +# } +# ], +# "features": [ +# "acpi-s3", +# "amd-sev", +# "enrolled-keys", +# "requires-smm", +# "secure-boot", +# "verbose-dynamic" +# ], +# "tags": [ +# "-a IA32", +# "-a X64", +# "-p OvmfPkg/OvmfPkgIa32X64.dsc", +# "-t GCC48", +# "-b DEBUG", +# "-D SMM_REQUIRE", +# "-D SECURE_BOOT_ENABLE", +# "-D FD_SIZE_4MB" +# ] +# } +# +# { +# "description": "OVMF with SEV-ES support", +# "interface-types": [ +# "uefi" +# ], +# "mapping": { +# "device": "flash", +# "executable": { +# "filename": "/usr/share/OVMF/OVMF_CODE.fd", +# "format": "raw" +# }, +# "nvram-template": { +# "filename": "/usr/share/OVMF/OVMF_VARS.fd", +# "format": "raw" +# } # }, -# "nvram-template": { -# "filename": "/usr/share/OVMF/OVMF_VARS.fd", -# "format": "raw" -# } -# }, -# "targets": [ -# { -# "architecture": "x86_64", -# "machines": [ -# "pc-q35-*" -# ] -# } -# ], -# "features": [ -# "acpi-s3", -# "amd-sev", -# "amd-sev-es", -# "verbose-dynamic" -# ], -# "tags": [ -# "-a X64", -# "-p OvmfPkg/OvmfPkgX64.dsc", -# "-t GCC48", -# "-b DEBUG", -# "-D FD_SIZE_4MB" -# ] -# } -# -# { -# "description": "UEFI firmware for ARM64 virtual machines", -# "interface-types": [ -# "uefi" -# ], -# "mapping": { -# "device": "flash", -# "executable": { -# "filename": "/usr/share/AAVMF/AAVMF_CODE.fd", -# "format": "raw" +# "targets": [ +# { +# "architecture": "x86_64", +# "machines": [ +# "pc-q35-*" +# ] +# } +# ], +# "features": [ +# "acpi-s3", +# "amd-sev", +# "amd-sev-es", +# "verbose-dynamic" +# ], +# "tags": [ +# "-a X64", +# "-p OvmfPkg/OvmfPkgX64.dsc", +# "-t GCC48", +# "-b DEBUG", +# "-D FD_SIZE_4MB" +# ] +# } +# +# { +# "description": "UEFI firmware for ARM64 virtual machines", +# "interface-types": [ +# "uefi" +# ], +# "mapping": { +# "device": "flash", +# "executable": { +# "filename": "/usr/share/AAVMF/AAVMF_CODE.fd", +# "format": "raw" +# }, +# "nvram-template": { +# "filename": "/usr/share/AAVMF/AAVMF_VARS.fd", +# "format": "raw" +# } # }, -# "nvram-template": { -# "filename": "/usr/share/AAVMF/AAVMF_VARS.fd", -# "format": "raw" -# } -# }, -# "targets": [ -# { -# "architecture": "aarch64", -# "machines": [ -# "virt-*" -# ] -# } -# ], -# "features": [ -# -# ], -# "tags": [ -# "-a AARCH64", -# "-p ArmVirtPkg/ArmVirtQemu.dsc", -# "-t GCC48", -# "-b DEBUG", -# "-D DEBUG_PRINT_ERROR_LEVEL=0x80000000" -# ] -# } +# "targets": [ +# { +# "architecture": "aarch64", +# "machines": [ +# "virt-*" +# ] +# } +# ], +# "features": [ +# +# ], +# "tags": [ +# "-a AARCH64", +# "-p ArmVirtPkg/ArmVirtQemu.dsc", +# "-t GCC48", +# "-b DEBUG", +# "-D DEBUG_PRINT_ERROR_LEVEL=0x80000000" +# ] +# } ## { 'struct' : 'Firmware', 'data' : { 'description' : 'str', diff --git a/docs/interop/prl-xml.txt b/docs/interop/prl-xml.txt index 7031f8752cc..cf9b3fba265 100644 --- a/docs/interop/prl-xml.txt +++ b/docs/interop/prl-xml.txt @@ -122,7 +122,7 @@ Each Image element has following child elements: * Type - image type of the element. It can be: "Plain" for raw files. "Compressed" for expanding disks. - * File - path to image file. Path can be relative to DiskDecriptor.xml or + * File - path to image file. Path can be relative to DiskDescriptor.xml or absolute. == Snapshots element == diff --git a/docs/interop/qemu-ga.rst b/docs/interop/qemu-ga.rst index 461c5a35ee1..72fb75a6f55 100644 --- a/docs/interop/qemu-ga.rst +++ b/docs/interop/qemu-ga.rst @@ -81,13 +81,13 @@ Options .. option:: -b, --block-rpcs=LIST - Comma-separated list of RPCs to disable (no spaces, use ``help`` to - list available RPCs). + Comma-separated list of RPCs to disable (no spaces, use ``--block-rpcs=help`` + to list available RPCs). .. option:: -a, --allow-rpcs=LIST - Comma-separated list of RPCs to enable (no spaces, use ``help`` to - list available RPCs). + Comma-separated list of RPCs to enable (no spaces, use ``--allow-rpcs=help`` + to list available RPCs). .. option:: -D, --dump-conf diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst index ad6e142f233..d8419fd2f17 100644 --- a/docs/interop/vhost-user.rst +++ b/docs/interop/vhost-user.rst @@ -989,7 +989,7 @@ When reconnecting: #. If ``d.flags`` is not equal to the calculated flags value (means back-end has submitted the buffer to guest driver before crash, so - it has to commit the in-progres update), set ``old_free_head``, + it has to commit the in-progress update), set ``old_free_head``, ``old_used_idx``, ``old_used_wrap_counter`` to ``free_head``, ``used_idx``, ``used_wrap_counter`` @@ -1839,7 +1839,9 @@ is sent by the front-end. When the ``VHOST_USER_PROTOCOL_F_SHARED_OBJECT`` protocol feature has been successfully negotiated, this message can be submitted by the backend to remove themselves from to the virtio-dmabuf shared - table API. The shared table will remove the back-end device associated with + table API. Only the back-end owning the entry (i.e., the one that first added + it) will have permission to remove it. Otherwise, the message is ignored. + The shared table will remove the back-end device associated with the UUID. If ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` is negotiated, and the back-end sets the ``VHOST_USER_NEED_REPLY`` flag, the front-end must respond with zero when operation is successfully completed, or non-zero otherwise. diff --git a/docs/specs/fsi.rst b/docs/specs/fsi.rst new file mode 100644 index 00000000000..af878225315 --- /dev/null +++ b/docs/specs/fsi.rst @@ -0,0 +1,122 @@ +====================================== +IBM's Flexible Service Interface (FSI) +====================================== + +The QEMU FSI emulation implements hardware interfaces between ASPEED SOC, FSI +master/slave and the end engine. + +FSI is a point-to-point two wire interface which is capable of supporting +distances of up to 4 meters. FSI interfaces have been used successfully for +many years in IBM servers to attach IBM Flexible Support Processors(FSP) to +CPUs and IBM ASICs. + +FSI allows a service processor access to the internal buses of a host POWER +processor to perform configuration or debugging. FSI has long existed in POWER +processes and so comes with some baggage, including how it has been integrated +into the ASPEED SoC. + +Working backwards from the POWER processor, the fundamental pieces of interest +for the implementation are: (see the `FSI specification`_ for more details) + +1. The Common FRU Access Macro (CFAM), an address space containing various + "engines" that drive accesses on buses internal and external to the POWER + chip. Examples include the SBEFIFO and I2C masters. The engines hang off of + an internal Local Bus (LBUS) which is described by the CFAM configuration + block. + +2. The FSI slave: The slave is the terminal point of the FSI bus for FSI + symbols addressed to it. Slaves can be cascaded off of one another. The + slave's configuration registers appear in address space of the CFAM to + which it is attached. + +3. The FSI master: A controller in the platform service processor (e.g. BMC) + driving CFAM engine accesses into the POWER chip. At the hardware level + FSI is a bit-based protocol supporting synchronous and DMA-driven accesses + of engines in a CFAM. + +4. The On-Chip Peripheral Bus (OPB): A low-speed bus typically found in POWER + processors. This now makes an appearance in the ASPEED SoC due to tight + integration of the FSI master IP with the OPB, mainly the existence of an + MMIO-mapping of the CFAM address straight onto a sub-region of the OPB + address space. + +5. An APB-to-OPB bridge enabling access to the OPB from the ARM core in the + AST2600. Hardware limitations prevent the OPB from being directly mapped + into APB, so all accesses are indirect through the bridge. + +The LBUS is modelled to maintain the qdev bus hierarchy and to take advantages +of the object model to automatically generate the CFAM configuration block. +The configuration block presents engines in the order they are attached to the +CFAM's LBUS. Engine implementations should subclass the LBusDevice and set the +'config' member of LBusDeviceClass to match the engine's type. + +CFAM designs offer a lot of flexibility, for instance it is possible for a +CFAM to be simultaneously driven from multiple FSI links. The modeling is not +so complete; it's assumed that each CFAM is attached to a single FSI slave (as +a consequence the CFAM subclasses the FSI slave). + +As for FSI, its symbols and wire-protocol are not modelled at all. This is not +necessary to get FSI off the ground thanks to the mapping of the CFAM address +space onto the OPB address space - the models follow this directly and map the +CFAM memory region into the OPB's memory region. + +The following commands start the ``rainier-bmc`` machine with built-in FSI +model. There are no model specific arguments. Please check this document to +learn more about Aspeed ``rainier-bmc`` machine: (:doc:`../../system/arm/aspeed`) + +.. code-block:: console + + qemu-system-arm -M rainier-bmc -nographic \ + -kernel fitImage-linux.bin \ + -dtb aspeed-bmc-ibm-rainier.dtb \ + -initrd obmc-phosphor-initramfs.rootfs.cpio.xz \ + -drive file=obmc-phosphor-image.rootfs.wic.qcow2,if=sd,index=2 \ + -append "rootwait console=ttyS4,115200n8 root=PARTLABEL=rofs-a" + +The implementation appears as following in the qemu device tree: + +.. code-block:: console + + (qemu) info qtree + bus: main-system-bus + type System + ... + dev: aspeed.apb2opb, id "" + gpio-out "sysbus-irq" 1 + mmio 000000001e79b000/0000000000001000 + bus: opb.1 + type opb + dev: fsi.master, id "" + bus: fsi.bus.1 + type fsi.bus + dev: cfam.config, id "" + dev: cfam, id "" + bus: lbus.1 + type lbus + dev: scratchpad, id "" + address = 0 (0x0) + bus: opb.0 + type opb + dev: fsi.master, id "" + bus: fsi.bus.0 + type fsi.bus + dev: cfam.config, id "" + dev: cfam, id "" + bus: lbus.0 + type lbus + dev: scratchpad, id "" + address = 0 (0x0) + +pdbg is a simple application to allow debugging of the host POWER processors +from the BMC. (see the `pdbg source repository`_ for more details) + +.. code-block:: console + + root@p10bmc:~# pdbg -a getcfam 0x0 + p0: 0x0 = 0xc0022d15 + +.. _FSI specification: + https://openpowerfoundation.org/specifications/fsi/ + +.. _pdbg source repository: + https://github.com/open-power/pdbg diff --git a/docs/specs/index.rst b/docs/specs/index.rst index b3f482b0aa5..1484e3e7607 100644 --- a/docs/specs/index.rst +++ b/docs/specs/index.rst @@ -24,6 +24,7 @@ guest hardware that is specific to QEMU. acpi_erst sev-guest-firmware fw_cfg + fsi vmw_pvscsi-spec edu ivshmem-spec diff --git a/docs/specs/pvpanic.rst b/docs/specs/pvpanic.rst index f894bc19555..b0f27860ec3 100644 --- a/docs/specs/pvpanic.rst +++ b/docs/specs/pvpanic.rst @@ -29,6 +29,8 @@ bit 1 a guest panic has happened and will be handled by the guest; the host should record it or report it, but should not affect the execution of the guest. +bit 2 (to be implemented) + a regular guest shutdown has happened and should be processed by the host PCI Interface ------------- diff --git a/docs/specs/tpm.rst b/docs/specs/tpm.rst index c96776a369d..68cb8cf7e65 100644 --- a/docs/specs/tpm.rst +++ b/docs/specs/tpm.rst @@ -343,9 +343,9 @@ In case an Arm virt machine is emulated, use the following command line: .. code-block:: console - qemu-system-aarch64 -machine virt,gic-version=3,accel=kvm \ + qemu-system-aarch64 -machine virt,gic-version=3,acpi=off \ -cpu host -m 4G \ - -nographic -no-acpi \ + -nographic -accel kvm \ -chardev socket,id=chrtpm,path=/tmp/mytpm1/swtpm-sock \ -tpmdev emulator,id=tpm0,chardev=chrtpm \ -device tpm-tis-device,tpmdev=tpm0 \ diff --git a/docs/sphinx/hxtool.py b/docs/sphinx/hxtool.py index 9f6b9d87dcc..3729084a36c 100644 --- a/docs/sphinx/hxtool.py +++ b/docs/sphinx/hxtool.py @@ -78,6 +78,14 @@ def parse_archheading(file, lnum, line): serror(file, lnum, "Invalid ARCHHEADING line") return match.group(1) +def parse_srst(file, lnum, line): + """Handle an SRST directive""" + # The input should be either "SRST", or "SRST(label)". + match = re.match(r'SRST(\((.*?)\))?', line) + if match is None: + serror(file, lnum, "Invalid SRST line") + return match.group(2) + class HxtoolDocDirective(Directive): """Extract rST fragments from the specified .hx file""" required_argument = 1 @@ -113,6 +121,14 @@ def run(self): serror(hxfile, lnum, 'expected ERST, found SRST') else: state = HxState.RST + label = parse_srst(hxfile, lnum, line) + if label: + rstlist.append("", hxfile, lnum - 1) + # Build label as _DOCNAME-HXNAME-LABEL + hx = os.path.splitext(os.path.basename(hxfile))[0] + refline = ".. _" + env.docname + "-" + hx + \ + "-" + label + ":" + rstlist.append(refline, hxfile, lnum - 1) elif directive == 'ERST': if state == HxState.CTEXT: serror(hxfile, lnum, 'expected SRST, found ERST') diff --git a/docs/sphinx/qapidoc.py b/docs/sphinx/qapidoc.py index 658c288f8fe..8d428c64b0c 100644 --- a/docs/sphinx/qapidoc.py +++ b/docs/sphinx/qapidoc.py @@ -168,12 +168,6 @@ def _nodes_for_members(self, doc, what, base=None, variants=None): # TODO drop fallbacks when undocumented members are outlawed if section.text: defn = section.text - elif (variants and variants.tag_member == section.member - and not section.member.type.doc_type()): - values = section.member.type.member_names() - defn = [nodes.Text('One of ')] - defn.extend(intersperse([nodes.literal('', v) for v in values], - nodes.Text(', '))) else: defn = [nodes.Text('Not documented')] @@ -186,17 +180,13 @@ def _nodes_for_members(self, doc, what, base=None, variants=None): if variants: for v in variants.variants: - if v.type.is_implicit(): - assert not v.type.base and not v.type.variants - for m in v.type.local_members: - term = self._nodes_for_one_member(m) - term.extend(self._nodes_for_variant_when(variants, v)) - dlnode += self._make_dlitem(term, None) - else: - term = [nodes.Text('The members of '), - nodes.literal('', v.type.doc_type())] - term.extend(self._nodes_for_variant_when(variants, v)) - dlnode += self._make_dlitem(term, None) + if v.type.name == 'q_empty': + continue + assert not v.type.is_implicit() + term = [nodes.Text('The members of '), + nodes.literal('', v.type.doc_type())] + term.extend(self._nodes_for_variant_when(variants, v)) + dlnode += self._make_dlitem(term, None) if not dlnode.children: return [] @@ -249,8 +239,8 @@ def _nodes_for_features(self, doc): seen_item = False dlnode = nodes.definition_list() for section in doc.features.values(): - dlnode += self._make_dlitem([nodes.literal('', section.name)], - section.text) + dlnode += self._make_dlitem( + [nodes.literal('', section.member.name)], section.text) seen_item = True if not seen_item: @@ -268,11 +258,11 @@ def _nodes_for_sections(self, doc): """Return list of doctree nodes for additional sections""" nodelist = [] for section in doc.sections: - if section.name and section.name == 'TODO': + if section.tag and section.tag == 'TODO': # Hide TODO: sections continue - snode = self._make_section(section.name) - if section.name and section.name.startswith('Example'): + snode = self._make_section(section.tag) + if section.tag and section.tag.startswith('Example'): snode += self._nodes_for_example(section.text) else: self._parse_text_into_node(section.text, snode) diff --git a/docs/system/arm/b-l475e-iot01a.rst b/docs/system/arm/b-l475e-iot01a.rst new file mode 100644 index 00000000000..0afef8e4f45 --- /dev/null +++ b/docs/system/arm/b-l475e-iot01a.rst @@ -0,0 +1,45 @@ +B-L475E-IOT01A IoT Node (``b-l475e-iot01a``) +============================================ + +The B-L475E-IOT01A IoT Node uses the STM32L475VG SoC which is based on +ARM Cortex-M4F core. It is part of STMicroelectronics +:doc:`STM32 boards ` and more specifically the STM32L4 +ultra-low power series. The STM32L4x5 chip runs at up to 80 MHz and +integrates 128 KiB of SRAM and up to 1MiB of Flash. The B-L475E-IOT01A board +namely features 64 Mibit QSPI Flash, BT, WiFi and RF connectivity, +USART, I2C, SPI, CAN and USB OTG, as well as a variety of sensors. + +Supported devices +""""""""""""""""" + +Currently B-L475E-IOT01A machine's only supports the following devices: + +- Cortex-M4F based STM32L4x5 SoC +- STM32L4x5 EXTI (Extended interrupts and events controller) +- STM32L4x5 SYSCFG (System configuration controller) +- STM32L4x5 RCC (Reset and clock control) +- STM32L4x5 GPIOs (General-purpose I/Os) + +Missing devices +""""""""""""""" + +The B-L475E-IOT01A does *not* support the following devices: + +- Serial ports (UART) +- Analog to Digital Converter (ADC) +- SPI controller +- Timer controller (TIMER) + +See the complete list of unimplemented peripheral devices +in the STM32L4x5 module : ``./hw/arm/stm32l4x5_soc.c`` + +Boot options +"""""""""""" + +The B-L475E-IOT01A machine can be started using the ``-kernel`` +option to load a firmware. Example: + +.. code-block:: bash + + $ qemu-system-arm -M b-l475e-iot01a -kernel firmware.bin + diff --git a/docs/system/arm/bananapi_m2u.rst b/docs/system/arm/bananapi_m2u.rst index b09ba5c5486..587b4886553 100644 --- a/docs/system/arm/bananapi_m2u.rst +++ b/docs/system/arm/bananapi_m2u.rst @@ -22,7 +22,10 @@ The Banana Pi M2U machine supports the following devices: * EMAC ethernet * GMAC ethernet * Clock Control Unit + * SATA * TWI (I2C) + * USB 2.0 + * Hardware Watchdog Limitations """"""""""" @@ -31,9 +34,7 @@ Currently, Banana Pi M2U does *not* support the following features: - Graphical output via HDMI, GPU and/or the Display Engine - Audio output -- Hardware Watchdog - Real Time Clock -- USB 2.0 interfaces Also see the 'unimplemented' array in the Allwinner R40 SoC module for a complete list of unimplemented I/O devices: ``./hw/arm/allwinner-r40.c`` diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 0b604f90059..2a7bbb82dc4 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -28,6 +28,7 @@ the following architecture extensions: - FEAT_DotProd (Advanced SIMD dot product instructions) - FEAT_DoubleFault (Double Fault Extension) - FEAT_E0PD (Preventing EL0 access to halves of address maps) +- FEAT_ECV (Enhanced Counter Virtualization) - FEAT_EPAC (Enhanced pointer authentication) - FEAT_ETS (Enhanced Translation Synchronization) - FEAT_EVT (Enhanced Virtualization Traps) @@ -63,6 +64,8 @@ the following architecture extensions: - FEAT_MTE (Memory Tagging Extension) - FEAT_MTE2 (Memory Tagging Extension) - FEAT_MTE3 (MTE Asymmetric Fault Handling) +- FEAT_NV (Nested Virtualization) +- FEAT_NV2 (Enhanced nested virtualization support) - FEAT_PACIMP (Pointer authentication - IMPLEMENTATION DEFINED algorithm) - FEAT_PACQARMA3 (Pointer authentication - QARMA3 algorithm) - FEAT_PACQARMA5 (Pointer authentication - QARMA5 algorithm) diff --git a/docs/system/arm/mps2.rst b/docs/system/arm/mps2.rst index 8a75beb3a08..a305935cc49 100644 --- a/docs/system/arm/mps2.rst +++ b/docs/system/arm/mps2.rst @@ -1,7 +1,7 @@ -Arm MPS2 and MPS3 boards (``mps2-an385``, ``mps2-an386``, ``mps2-an500``, ``mps2-an505``, ``mps2-an511``, ``mps2-an521``, ``mps3-an524``, ``mps3-an547``) -========================================================================================================================================================= +Arm MPS2 and MPS3 boards (``mps2-an385``, ``mps2-an386``, ``mps2-an500``, ``mps2-an505``, ``mps2-an511``, ``mps2-an521``, ``mps3-an524``, ``mps3-an536``, ``mps3-an547``) +========================================================================================================================================================================= -These board models all use Arm M-profile CPUs. +These board models use Arm M-profile or R-profile CPUs. The Arm MPS2, MPS2+ and MPS3 dev boards are FPGA based (the 2+ has a bigger FPGA but is otherwise the same as the 2; the 3 has a bigger @@ -13,6 +13,8 @@ FPGA image. QEMU models the following FPGA images: +FPGA images using M-profile CPUs: + ``mps2-an385`` Cortex-M3 as documented in Arm Application Note AN385 ``mps2-an386`` @@ -30,6 +32,11 @@ QEMU models the following FPGA images: ``mps3-an547`` Cortex-M55 on an MPS3, as documented in Arm Application Note AN547 +FPGA images using R-profile CPUs: + +``mps3-an536`` + Dual Cortex-R52 on an MPS3, as documented in Arm Application Note AN536 + Differences between QEMU and real hardware: - AN385/AN386 remapping of low 16K of memory to either ZBT SSRAM1 or to @@ -45,6 +52,30 @@ Differences between QEMU and real hardware: flash, but only as simple ROM, so attempting to rewrite the flash from the guest will fail - QEMU does not model the USB controller in MPS3 boards +- AN536 does not support runtime control of CPU reset and halt via + the SCC CFG_REG0 register. +- AN536 does not support enabling or disabling the flash and ATCM + interfaces via the SCC CFG_REG1 register. +- AN536 does not support setting of the initial vector table + base address via the SCC CFG_REG6 and CFG_REG7 register config, + and does not provide a mechanism for specifying these values at + startup, so all guest images must be built to start from TCM + (i.e. to expect the interrupt vector base at 0 from reset). +- AN536 defaults to only creating a single CPU; this is the equivalent + of the way the real FPGA image usually runs with the second Cortex-R52 + held in halt via the initial SCC CFG_REG0 register setting. You can + create the second CPU with ``-smp 2``; both CPUs will then start + execution immediately on startup. + +Note that for the AN536 the first UART is accessible only by +CPU0, and the second UART is accessible only by CPU1. The +first UART accessible shared between both CPUs is the third +UART. Guest software might therefore be built to use either +the first UART or the third UART; if you don't see any output +from the UART you are looking at, try one of the others. +(Even if the AN536 machine is started with a single CPU and so +no "CPU1-only UART", the UART numbering remains the same, +with the third UART being the first of the shared ones.) Machine-specific options """""""""""""""""""""""" diff --git a/docs/system/arm/palm.rst b/docs/system/arm/palm.rst index 47ff9b36d46..61bc8d34f40 100644 --- a/docs/system/arm/palm.rst +++ b/docs/system/arm/palm.rst @@ -14,7 +14,7 @@ following elements: - On-chip Real Time Clock - TI TSC2102i touchscreen controller / analog-digital converter / - Audio CODEC, connected through MicroWire and |I2S| busses + Audio CODEC, connected through MicroWire and |I2S| buses - GPIO-connected matrix keypad diff --git a/docs/system/arm/raspi.rst b/docs/system/arm/raspi.rst index 922fe375a67..fbec1da6a1e 100644 --- a/docs/system/arm/raspi.rst +++ b/docs/system/arm/raspi.rst @@ -1,5 +1,5 @@ -Raspberry Pi boards (``raspi0``, ``raspi1ap``, ``raspi2b``, ``raspi3ap``, ``raspi3b``) -====================================================================================== +Raspberry Pi boards (``raspi0``, ``raspi1ap``, ``raspi2b``, ``raspi3ap``, ``raspi3b``, ``raspi4b``) +=================================================================================================== QEMU provides models of the following Raspberry Pi boards: @@ -12,12 +12,13 @@ QEMU provides models of the following Raspberry Pi boards: Cortex-A53 (4 cores), 512 MiB of RAM ``raspi3b`` Cortex-A53 (4 cores), 1 GiB of RAM - +``raspi4b`` + Cortex-A72 (4 cores), 2 GiB of RAM Implemented devices ------------------- - * ARM1176JZF-S, Cortex-A7 or Cortex-A53 CPU + * ARM1176JZF-S, Cortex-A7, Cortex-A53 or Cortex-A72 CPU * Interrupt controller * DMA controller * Clock and reset controller (CPRMAN) @@ -33,11 +34,13 @@ Implemented devices * USB2 host controller (DWC2 and MPHI) * MailBox controller (MBOX) * VideoCore firmware (property) - + * Peripheral SPI controller (SPI) + * Broadcom Serial Controller (I2C) Missing devices --------------- - * Peripheral SPI controller (SPI) * Analog to Digital Converter (ADC) * Pulse Width Modulation (PWM) + * PCIE Root Port (raspi4b) + * GENET Ethernet Controller (raspi4b) diff --git a/docs/system/arm/sbsa.rst b/docs/system/arm/sbsa.rst index bca61608ff8..2bf22a1d0b0 100644 --- a/docs/system/arm/sbsa.rst +++ b/docs/system/arm/sbsa.rst @@ -1,12 +1,16 @@ Arm Server Base System Architecture Reference board (``sbsa-ref``) ================================================================== -While the ``virt`` board is a generic board platform that doesn't match -any real hardware the ``sbsa-ref`` board intends to look like real -hardware. The `Server Base System Architecture -`_ defines a -minimum base line of hardware support and importantly how the firmware -reports that to any operating system. +The ``sbsa-ref`` board intends to look like real hardware (while the ``virt`` +board is a generic board platform that doesn't match any real hardware). + +The hardware part is defined by two specifications: + + - `Base System Architecture `__ (BSA) + - `Server Base System Architecture `__ (SBSA) + +The `Arm Base Boot Requirements `__ (BBR) +specification defines how the firmware reports that to any operating system. It is intended to be a machine for developing firmware and testing standards compliance with operating systems. @@ -35,16 +39,29 @@ includes both internal hardware and parts affected by the qemu command line (i.e. CPUs and memory). As a result it must have a firmware specifically built to expect a certain hardware layout (as you would in a real machine). +Note +'''' + +QEMU provides the guest EL3 firmware with minimal information about hardware +platform using minimalistic devicetree. This is not a Linux devicetree. It is +not even a firmware devicetree. + +It is information passed from QEMU to describe the information a hardware +platform would have other mechanisms to discover at runtime, that are affected +by the QEMU command line. + +Ultimately this devicetree may be replaced by IPC calls to an emulated SCP. + DeviceTree information '''''''''''''''''''''' -The devicetree provided by the board model to the firmware is not intended -to be a complete compliant DT. It currently reports: +The devicetree reports: - CPUs - memory - platform version - GIC addresses + - NUMA node id for CPUs and memory Platform version '''''''''''''''' @@ -70,4 +87,4 @@ Platform version changes: GIC ITS information is present in devicetree. 0.3 - The USB controller is an XHCI device, not EHCI + The USB controller is an XHCI device, not EHCI. diff --git a/docs/system/arm/stm32.rst b/docs/system/arm/stm32.rst index d7265b763d4..3b640f3ee07 100644 --- a/docs/system/arm/stm32.rst +++ b/docs/system/arm/stm32.rst @@ -16,11 +16,13 @@ based on this chip : - ``netduino2`` Netduino 2 board with STM32F205RFT6 microcontroller -The STM32F4 series is based on ARM Cortex-M4F core. This series is pin-to-pin -compatible with STM32F2 series. The following machines are based on this chip : +The STM32F4 series is based on ARM Cortex-M4F core, as well as the STM32L4 +ultra-low-power series. The STM32F4 series is pin-to-pin compatible with STM32F2 series. +The following machines are based on this ARM Cortex-M4F chip : - ``netduinoplus2`` Netduino Plus 2 board with STM32F405RGT6 microcontroller - ``olimex-stm32-h405`` Olimex STM32 H405 board with STM32F405RGT6 microcontroller +- ``b-l475e-iot01a`` :doc:`B-L475E-IOT01A IoT Node ` board with STM32L475VG microcontroller There are many other STM32 series that are currently not supported by QEMU. diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst index 7c4c80180c6..26fcba00b76 100644 --- a/docs/system/arm/virt.rst +++ b/docs/system/arm/virt.rst @@ -69,6 +69,19 @@ Supported guest CPU types: Note that the default is ``cortex-a15``, so for an AArch64 guest you must specify a CPU type. +Also, please note that passing ``max`` CPU (i.e. ``-cpu max``) won't +enable all the CPU features for a given ``virt`` machine. Where a CPU +architectural feature requires support in both the CPU itself and in the +wider system (e.g. the MTE feature), it may not be enabled by default, +but instead requires a machine option to enable it. + +For example, MTE support must be enabled with ``-machine virt,mte=on``, +as well as by selecting an MTE-capable CPU (e.g., ``max``) with the +``-cpu`` option. + +See the machine-specific options below, or check them for a given machine +by passing the ``help`` suboption, like: ``-machine virt-9.0,help``. + Graphics output is available, but unlike the x86 PC machine types there is no default display device enabled: you should select one from the Display devices section of "-device help". The recommended option @@ -96,7 +109,13 @@ mte highmem Set ``on``/``off`` to enable/disable placing devices and RAM in physical address space above 32 bits. The default is ``on`` for machine types - later than ``virt-2.12``. + later than ``virt-2.12`` when the CPU supports an address space + bigger than 32 bits (i.e. 64-bit CPUs, and 32-bit CPUs with the + Large Physical Address Extension (LPAE) feature). If you want to + boot a 32-bit kernel which does not have ``CONFIG_LPAE`` enabled on + a CPU type which implements LPAE, you will need to manually set + this to ``off``; otherwise some devices, such as the PCI controller, + will not be accessible. compact-highmem Set ``on``/``off`` to enable/disable the compact layout for high memory regions. diff --git a/docs/system/arm/xlnx-versal-virt.rst b/docs/system/arm/xlnx-versal-virt.rst index d2d1b266926..0bafc76469d 100644 --- a/docs/system/arm/xlnx-versal-virt.rst +++ b/docs/system/arm/xlnx-versal-virt.rst @@ -194,7 +194,7 @@ To use a different index value, N, from default of 0, add: .. code-block:: bash - -global xlnx,bbram-ctrl.drive-index=N + -global driver=xlnx.bbram-ctrl,property=drive-index,value=N eFUSE File Backend """""""""""""""""" @@ -212,7 +212,7 @@ To use a different index value, N, from default of 1, add: .. code-block:: bash - -global xlnx,efuse.drive-index=N + -global xlnx-efuse.drive-index=N .. warning:: In actual physical Versal, BBRAM and eFUSE contain sensitive data. diff --git a/docs/system/arm/xscale.rst b/docs/system/arm/xscale.rst index d2d5949e102..e239136c3c7 100644 --- a/docs/system/arm/xscale.rst +++ b/docs/system/arm/xscale.rst @@ -32,4 +32,4 @@ The clamshell PDA models emulation includes the following peripherals: - Three on-chip UARTs -- WM8750 audio CODEC on |I2C| and |I2S| busses +- WM8750 audio CODEC on |I2C| and |I2S| buses diff --git a/docs/system/cpu-models-x86-abi.csv b/docs/system/cpu-models-x86-abi.csv index f3f3b60be10..38b9bae3102 100644 --- a/docs/system/cpu-models-x86-abi.csv +++ b/docs/system/cpu-models-x86-abi.csv @@ -8,27 +8,37 @@ Cascadelake-Server-v1,✅,✅,✅,✅ Cascadelake-Server-v2,✅,✅,✅,✅ Cascadelake-Server-v3,✅,✅,✅,✅ Cascadelake-Server-v4,✅,✅,✅,✅ +Cascadelake-Server-v5,✅,✅,✅,✅ Conroe-v1,✅,,, Cooperlake-v1,✅,✅,✅,✅ +Cooperlake-v2,✅,✅,✅,✅ Denverton-v1,✅,✅,, Denverton-v2,✅,✅,, +Denverton-v3,✅,✅,, Dhyana-v1,✅,✅,✅, +Dhyana-v2,✅,✅,✅, +EPYC-Genoa-v1,✅,✅,✅,✅ EPYC-Milan-v1,✅,✅,✅, +EPYC-Milan-v2,✅,✅,✅, EPYC-Rome-v1,✅,✅,✅, EPYC-Rome-v2,✅,✅,✅, +EPYC-Rome-v3,✅,✅,✅, +EPYC-Rome-v4,✅,✅,✅, EPYC-v1,✅,✅,✅, EPYC-v2,✅,✅,✅, EPYC-v3,✅,✅,✅, +EPYC-v4,✅,✅,✅, +GraniteRapids-v1,✅,✅,✅,✅ Haswell-v1,✅,✅,✅, Haswell-v2,✅,✅,✅, Haswell-v3,✅,✅,✅, Haswell-v4,✅,✅,✅, -Icelake-Client-v1,✅,✅,✅, -Icelake-Client-v2,✅,✅,✅, Icelake-Server-v1,✅,✅,✅,✅ Icelake-Server-v2,✅,✅,✅,✅ Icelake-Server-v3,✅,✅,✅,✅ Icelake-Server-v4,✅,✅,✅,✅ +Icelake-Server-v5,✅,✅,✅,✅ +Icelake-Server-v6,✅,✅,✅,✅ IvyBridge-v1,✅,✅,, IvyBridge-v2,✅,✅,, KnightsMill-v1,✅,✅,✅, @@ -42,15 +52,21 @@ Opteron_G5-v1,✅,✅,, Penryn-v1,✅,,, SandyBridge-v1,✅,✅,, SandyBridge-v2,✅,✅,, +SapphireRapids-v1,✅,✅,✅,✅ +SapphireRapids-v2,✅,✅,✅,✅ Skylake-Client-v1,✅,✅,✅, Skylake-Client-v2,✅,✅,✅, Skylake-Client-v3,✅,✅,✅, +Skylake-Client-v4,✅,✅,✅, Skylake-Server-v1,✅,✅,✅,✅ Skylake-Server-v2,✅,✅,✅,✅ Skylake-Server-v3,✅,✅,✅,✅ Skylake-Server-v4,✅,✅,✅,✅ +Skylake-Server-v5,✅,✅,✅,✅ Snowridge-v1,✅,✅,, Snowridge-v2,✅,✅,, +Snowridge-v3,✅,✅,, +Snowridge-v4,✅,✅,, Westmere-v1,✅,✅,, Westmere-v2,✅,✅,, athlon-v1,,,, diff --git a/docs/system/cpu-models-x86.rst.inc b/docs/system/cpu-models-x86.rst.inc index 7f6368f999b..ba27b5683fb 100644 --- a/docs/system/cpu-models-x86.rst.inc +++ b/docs/system/cpu-models-x86.rst.inc @@ -58,7 +58,7 @@ depending on the machine type is in use. .. csv-table:: x86-64 ABI compatibility levels :file: cpu-models-x86-abi.csv :widths: 40,15,15,15,15 - :header-rows: 2 + :header-rows: 1 Preferred CPU models for Intel x86 hosts diff --git a/docs/system/device-emulation.rst b/docs/system/device-emulation.rst index d1f3277cb02..f19777411cd 100644 --- a/docs/system/device-emulation.rst +++ b/docs/system/device-emulation.rst @@ -94,6 +94,7 @@ Emulated Devices devices/virtio-gpu.rst devices/virtio-pmem.rst devices/virtio-snd.rst + devices/vhost-user-input.rst devices/vhost-user-rng.rst devices/canokey.rst devices/usb-u2f.rst diff --git a/docs/system/devices/can.rst b/docs/system/devices/can.rst index 0af3d9912a6..09121836fdb 100644 --- a/docs/system/devices/can.rst +++ b/docs/system/devices/can.rst @@ -1,12 +1,12 @@ CAN Bus Emulation Support ========================= The CAN bus emulation provides mechanism to connect multiple -emulated CAN controller chips together by one or multiple CAN busses -(the controller device "canbus" parameter). The individual busses +emulated CAN controller chips together by one or multiple CAN buses +(the controller device "canbus" parameter). The individual buses can be connected to host system CAN API (at this time only Linux SocketCAN is supported). -The concept of busses is generic and different CAN controllers +The concept of buses is generic and different CAN controllers can be implemented. The initial submission implemented SJA1000 controller which diff --git a/docs/system/devices/canokey.rst b/docs/system/devices/canokey.rst index cfa6186e483..7f3664963f3 100644 --- a/docs/system/devices/canokey.rst +++ b/docs/system/devices/canokey.rst @@ -14,7 +14,7 @@ CanoKey [1]_ is an open-source secure key with supports of All these platform-independent features are in canokey-core [3]_. For different platforms, CanoKey has different implementations, -including both hardware implementions and virtual cards: +including both hardware implementations and virtual cards: * CanoKey STM32 [4]_ * CanoKey Pigeon [5]_ diff --git a/docs/system/devices/cxl.rst b/docs/system/devices/cxl.rst index 6ab5f724737..10a0e9bc9ff 100644 --- a/docs/system/devices/cxl.rst +++ b/docs/system/devices/cxl.rst @@ -411,5 +411,4 @@ References - Consortium website for specifications etc: http://www.computeexpresslink.org - - Compute Express link Revision 2 specification, October 2020 - - CEDT CFMWS & QTG _DSM ECN May 2021 + - Compute Express Link (CXL) Specification, Revision 3.1, August 2023 diff --git a/docs/system/devices/nvme.rst b/docs/system/devices/nvme.rst index 4ea957baed1..d2b1ca96455 100644 --- a/docs/system/devices/nvme.rst +++ b/docs/system/devices/nvme.rst @@ -81,6 +81,13 @@ There are a number of parameters available: Set the UUID of the namespace. This will be reported as a "Namespace UUID" descriptor in the Namespace Identification Descriptor List. +``nguid`` + Set the NGUID of the namespace. This will be reported as a "Namespace Globally + Unique Identifier" descriptor in the Namespace Identification Descriptor List. + It is specified as a string of hexadecimal digits containing exactly 16 bytes + or "auto" for a random value. An optional '-' separator could be used to group + bytes. If not specified the NGUID will remain all zeros. + ``eui64`` Set the EUI-64 of the namespace. This will be reported as a "IEEE Extended Unique Identifier" descriptor in the Namespace Identification Descriptor List. diff --git a/docs/system/devices/vhost-user-input.rst b/docs/system/devices/vhost-user-input.rst new file mode 100644 index 00000000000..118eb78101c --- /dev/null +++ b/docs/system/devices/vhost-user-input.rst @@ -0,0 +1,45 @@ +.. _vhost_user_input: + +QEMU vhost-user-input - Input emulation +======================================= + +This document describes the setup and usage of the Virtio input device. +The Virtio input device is a paravirtualized device for input events. + +Description +----------- + +The vhost-user-input device implementation was designed to work with a daemon +polling on input devices and passes input events to the guest. + +QEMU provides a backend implementation in contrib/vhost-user-input. + +Linux kernel support +-------------------- + +Virtio input requires a guest Linux kernel built with the +``CONFIG_VIRTIO_INPUT`` option. + +Examples +-------- + +The backend daemon should be started first: + +:: + + host# vhost-user-input --socket-path=input.sock \ + --evdev-path=/dev/input/event17 + +The QEMU invocation needs to create a chardev socket to communicate with the +backend daemon and access the VirtIO queues with the guest over the +:ref:`shared memory `. + +:: + + host# qemu-system \ + -chardev socket,path=/tmp/input.sock,id=mouse0 \ + -device vhost-user-input-pci,chardev=mouse0 \ + -m 4096 \ + -object memory-backend-file,id=mem,size=4G,mem-path=/dev/shm,share=on \ + -numa node,memdev=mem \ + ... diff --git a/docs/system/devices/vhost-user-rng.rst b/docs/system/devices/vhost-user-rng.rst index a145d4105c1..ead14053264 100644 --- a/docs/system/devices/vhost-user-rng.rst +++ b/docs/system/devices/vhost-user-rng.rst @@ -1,3 +1,5 @@ +.. _vhost_user_rng: + QEMU vhost-user-rng - RNG emulation =================================== diff --git a/docs/system/devices/vhost-user.rst b/docs/system/devices/vhost-user.rst index a80e95a48ae..9b2da106cec 100644 --- a/docs/system/devices/vhost-user.rst +++ b/docs/system/devices/vhost-user.rst @@ -8,13 +8,81 @@ outside of QEMU itself. To do this there are a number of things required. vhost-user device -=================== +================= These are simple stub devices that ensure the VirtIO device is visible to the guest. The code is mostly boilerplate although each device has a ``chardev`` option which specifies the ID of the ``--chardev`` device that connects via a socket to the vhost-user *daemon*. +Each device will have an virtio-mmio and virtio-pci variant. See your +platform details for what sort of virtio bus to use. + +.. list-table:: vhost-user devices + :widths: 20 20 60 + :header-rows: 1 + + * - Device + - Type + - Notes + * - vhost-user-blk + - Block storage + - See contrib/vhost-user-blk + * - vhost-user-fs + - File based storage driver + - See https://gitlab.com/virtio-fs/virtiofsd + * - vhost-user-gpio + - Proxy gpio pins to host + - See https://github.com/rust-vmm/vhost-device + * - vhost-user-gpu + - GPU driver + - See contrib/vhost-user-gpu + * - vhost-user-i2c + - Proxy i2c devices to host + - See https://github.com/rust-vmm/vhost-device + * - vhost-user-input + - Generic input driver + - :ref:`vhost_user_input` + * - vhost-user-rng + - Entropy driver + - :ref:`vhost_user_rng` + * - vhost-user-scmi + - System Control and Management Interface + - See https://github.com/rust-vmm/vhost-device + * - vhost-user-snd + - Audio device + - See https://github.com/rust-vmm/vhost-device/staging + * - vhost-user-scsi + - SCSI based storage + - See contrib/vhost-user-scsi + * - vhost-user-vsock + - Socket based communication + - See https://github.com/rust-vmm/vhost-device + +The referenced *daemons* are not exhaustive, any conforming backend +implementing the device and using the vhost-user protocol should work. + +vhost-user-device +^^^^^^^^^^^^^^^^^ + +The vhost-user-device is a generic development device intended for +expert use while developing new backends. The user needs to specify +all the required parameters including: + + - Device ``virtio-id`` + - The ``num_vqs`` it needs and their ``vq_size`` + - The ``config_size`` if needed + +.. note:: + To prevent user confusion you cannot currently instantiate + vhost-user-device without first patching out:: + + /* Reason: stop inexperienced users confusing themselves */ + dc->user_creatable = false; + + in ``vhost-user-device.c`` and ``vhost-user-device-pci.c`` file and + rebuilding. + vhost-user daemon ================= @@ -23,6 +91,8 @@ following the :ref:`vhost_user_proto`. There are a number of daemons that can be built when enabled by the project although any daemon that meets the specification for a given device can be used. +.. _shared_memory_object: + Shared memory object ==================== diff --git a/docs/system/i386/sgx.rst b/docs/system/i386/sgx.rst index 0f0a73f7587..ab58b293923 100644 --- a/docs/system/i386/sgx.rst +++ b/docs/system/i386/sgx.rst @@ -6,7 +6,7 @@ Overview Intel Software Guard eXtensions (SGX) is a set of instructions and mechanisms for memory accesses in order to provide security accesses for sensitive -applications and data. SGX allows an application to use it's pariticular +applications and data. SGX allows an application to use its particular address space as an *enclave*, which is a protected area provides confidentiality and integrity even in the presence of privileged malware. Accesses to the enclave memory area from any software not resident in the enclave are prevented, diff --git a/docs/system/i386/xen.rst b/docs/system/i386/xen.rst index 81898768baa..46db5f34c1d 100644 --- a/docs/system/i386/xen.rst +++ b/docs/system/i386/xen.rst @@ -132,7 +132,8 @@ The example above provides the guest kernel command line after a separator (" ``--`` ") on the Xen command line, and does not provide the guest kernel with an actual initramfs, which would need to listed as a second multiboot module. For more complicated alternatives, see the command line -documentation for the ``-initrd`` option. +:ref:`documentation ` for the +``-initrd`` option. Host OS requirements -------------------- diff --git a/docs/system/introduction.rst b/docs/system/introduction.rst index 51ac132d6cb..746707eb00e 100644 --- a/docs/system/introduction.rst +++ b/docs/system/introduction.rst @@ -1,6 +1,8 @@ Introduction ============ +.. _Accelerators: + Virtualisation Accelerators --------------------------- diff --git a/docs/system/invocation.rst b/docs/system/invocation.rst index 4ba38fc23d2..14b7db1c102 100644 --- a/docs/system/invocation.rst +++ b/docs/system/invocation.rst @@ -10,6 +10,11 @@ Invocation disk_image is a raw hard disk image for IDE hard disk 0. Some targets do not need a disk image. +When dealing with options parameters as arbitrary strings containing +commas, such as in "file=my,file" and "string=a,b", it's necessary to +double the commas. For instance,"-fw_cfg name=z,string=a,,b" will be +parsed as "-fw_cfg name=z,string=a,b". + .. hxtool-doc:: qemu-options.hx Device URL Syntax diff --git a/docs/system/ppc/amigang.rst b/docs/system/ppc/amigang.rst new file mode 100644 index 00000000000..e2c9cb74b7f --- /dev/null +++ b/docs/system/ppc/amigang.rst @@ -0,0 +1,161 @@ +========================================================= +AmigaNG boards (``amigaone``, ``pegasos2``, ``sam460ex``) +========================================================= + +These PowerPC machines emulate boards that are primarily used for +running Amiga like OSes (AmigaOS 4, MorphOS and AROS) but these can +also run Linux which is what this section documents. + +Eyetech AmigaOne/Mai Logic Teron (``amigaone``) +=============================================== + +The ``amigaone`` machine emulates an AmigaOne XE mainboard by Eyetech +which is a rebranded Mai Logic Teron board with modified U-Boot +firmware to support AmigaOS 4. + +Emulated devices +---------------- + + * PowerPC 7457 CPU (can also use ``-cpu g3, 750cxe, 750fx`` or ``750gx``) + * Articia S north bridge + * VIA VT82C686B south bridge + * PCI VGA compatible card (guests may need other card instead) + * PS/2 keyboard and mouse + +Firmware +-------- + +A firmware binary is necessary for the boot process. It is a modified +U-Boot under GPL but its source is lost so it cannot be included in +QEMU. A binary is available at +https://www.hyperion-entertainment.com/index.php/downloads?view=files&parent=28. +The ROM image is in the last 512kB which can be extracted with the +following command: + +.. code-block:: bash + + $ tail -c 524288 updater.image > u-boot-amigaone.bin + +The BIOS emulator in the firmware is unable to run QEMU‘s standard +vgabios so ``VGABIOS-lgpl-latest.bin`` is needed instead which can be +downloaded from http://www.nongnu.org/vgabios. + +Running Linux +------------- + +There are some Linux images under the following link that work on the +``amigaone`` machine: +https://sourceforge.net/projects/amigaone-linux/files/debian-installer/. +To boot the system run: + +.. code-block:: bash + + $ qemu-system-ppc -machine amigaone -bios u-boot-amigaone.bin \ + -cdrom "A1 Linux Net Installer.iso" \ + -device ati-vga,model=rv100,romfile=VGABIOS-lgpl-latest.bin + +From the firmware menu that appears select ``Boot sequence`` → +``Amiga Multiboot Options`` and set ``Boot device 1`` to +``Onboard VIA IDE CDROM``. Then hit escape until the main screen appears again, +hit escape once more and from the exit menu that appears select either +``Save settings and exit`` or ``Use settings for this session only``. It may +take a long time loading the kernel into memory but eventually it boots and the +installer becomes visible. The ``ati-vga`` RV100 emulation is not +complete yet so only frame buffer works, DRM and 3D is not available. + +Genesi/bPlan Pegasos II (``pegasos2``) +====================================== + +The ``pegasos2`` machine emulates the Pegasos II sold by Genesi and +designed by bPlan. Its schematics are available at +https://www.powerdeveloper.org/platforms/pegasos/schematics. + +Emulated devices +---------------- + + * PowerPC 7457 CPU (can also use ``-cpu g3`` or ``750cxe``) + * Marvell MV64361 Discovery II north bridge + * VIA VT8231 south bridge + * PCI VGA compatible card (guests may need other card instead) + * PS/2 keyboard and mouse + +Firmware +-------- + +The Pegasos II board has an Open Firmware compliant ROM based on +SmartFirmware with some changes that are not open-sourced therefore +the ROM binary cannot be included in QEMU. An updater was available +from bPlan, it can be found in the `Internet Archive +`_. +The ROM image can be extracted from it with the following command: + +.. code-block:: bash + + $ tail -c +85581 up050404 | head -c 524288 > pegasos2.rom + +Running Linux +------------- + +The PowerPC version of Debian 8.11 supported Pegasos II. The BIOS +emulator in the firmware binary is unable to run QEMU‘s standard +vgabios so it needs to be disabled. To boot the system run: + +.. code-block:: bash + + $ qemu-system-ppc -machine pegasos2 -bios pegasos2.rom \ + -cdrom debian-8.11.0-powerpc-netinst.iso \ + -device VGA,romfile="" -serial stdio + +At the firmware ``ok`` prompt enter ``boot cd install/pegasos``. + +Alternatively, it is possible to boot the kernel directly without +firmware ROM using the QEMU built-in minimal Virtual Open Firmware +(VOF) emulation which is also supported on ``pegasos2``. For this, +extract the kernel ``install/powerpc/vmlinuz-chrp.initrd`` from the CD +image, then run: + +.. code-block:: bash + + $ qemu-system-ppc -machine pegasos2 -serial stdio \ + -kernel vmlinuz-chrp.initrd -append "---" \ + -cdrom debian-8.11.0-powerpc-netinst.iso + +aCube Sam460ex (``sam460ex``) +============================= + +The ``sam460ex`` machine emulates the Sam460ex board by aCube which is +based on the AMCC PowerPC 460EX SoC (that despite its name has a +PPC440 CPU core). + +Firmware +-------- + +The board has a firmware based on an older U-Boot version with +modifications to support booting AmigaOS 4. The firmware ROM is +included with QEMU. + +Emulated devices +---------------- + + * PowerPC 460EX SoC + * M41T80 serial RTC chip + * Silicon Motion SM501 display parts (identical to SM502 on real board) + * Silicon Image SiI3112 2 port SATA controller + * USB keyboard and mouse + +Running Linux +------------- + +The only Linux distro that supported Sam460ex out of box was CruxPPC +2.x. It can be booted by running: + +.. code-block:: bash + + $ qemu-system-ppc -machine sam460ex -serial stdio \ + -drive if=none,id=cd,format=raw,file=crux-ppc-2.7a.iso \ + -device ide-cd,drive=cd,bus=ide.1 + +There are some other kernels and instructions for booting other +distros on aCube's product page at +https://www.acube-systems.biz/index.php?page=hardware&pid=5 +but those are untested. diff --git a/docs/system/qemu-manpage.rst b/docs/system/qemu-manpage.rst index c47a4127582..3ade4ee45b5 100644 --- a/docs/system/qemu-manpage.rst +++ b/docs/system/qemu-manpage.rst @@ -31,6 +31,11 @@ Options disk_image is a raw hard disk image for IDE hard disk 0. Some targets do not need a disk image. +When dealing with options parameters as arbitrary strings containing +commas, such as in "file=my,file" and "string=a,b", it's necessary to +double the commas. For instance,"-fw_cfg name=z,string=a,,b" will be +parsed as "-fw_cfg name=z,string=a,b". + .. hxtool-doc:: qemu-options.hx .. include:: keys.rst.inc diff --git a/docs/system/riscv/sifive_u.rst b/docs/system/riscv/sifive_u.rst index 7b166567f97..8f55ae8e313 100644 --- a/docs/system/riscv/sifive_u.rst +++ b/docs/system/riscv/sifive_u.rst @@ -210,7 +210,7 @@ command line options with ``qemu-system-riscv32``. Running U-Boot -------------- -U-Boot mainline v2021.07 release is tested at the time of writing. To build a +U-Boot mainline v2024.01 release is tested at the time of writing. To build a U-Boot mainline bootloader that can be booted by the ``sifive_u`` machine, use the sifive_unleashed_defconfig with similar commands as described above for Linux: @@ -325,15 +325,10 @@ configuration of U-Boot: $ export CROSS_COMPILE=riscv64-linux- $ make sifive_unleashed_defconfig - $ make menuconfig - -then manually select the following configuration: - - * Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB - -and unselect the following configuration: - - * Library routines ---> Allow access to binman information in the device tree + $ ./scripts/config --enable OF_BOARD + $ ./scripts/config --disable BINMAN_FDT + $ ./scripts/config --disable SPL + $ make olddefconfig This changes U-Boot to use the QEMU generated device tree blob, and bypass running the U-Boot SPL stage. @@ -352,17 +347,13 @@ It's possible to create a 32-bit U-Boot S-mode image as well. $ export CROSS_COMPILE=riscv64-linux- $ make sifive_unleashed_defconfig - $ make menuconfig - -then manually update the following configuration in U-Boot: - - * Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB - * RISC-V architecture ---> Base ISA ---> RV32I - * Boot options ---> Boot images ---> Text Base ---> 0x80400000 - -and unselect the following configuration: - - * Library routines ---> Allow access to binman information in the device tree + $ ./scripts/config --disable ARCH_RV64I + $ ./scripts/config --enable ARCH_RV32I + $ ./scripts/config --set-val TEXT_BASE 0x80400000 + $ ./scripts/config --enable OF_BOARD + $ ./scripts/config --disable BINMAN_FDT + $ ./scripts/config --disable SPL + $ make olddefconfig Use the same command line options to boot the 32-bit U-Boot S-mode image: diff --git a/docs/system/riscv/virt.rst b/docs/system/riscv/virt.rst index f5fa7b8b29e..9a06f95a344 100644 --- a/docs/system/riscv/virt.rst +++ b/docs/system/riscv/virt.rst @@ -95,6 +95,11 @@ The following machine-specific options are supported: SiFive CLINT. When not specified, this option is assumed to be "off". This option is restricted to the TCG accelerator. +- acpi=[on|off|auto] + + When this option is "on" (which is the default), ACPI tables are generated and + exposed as firmware tables etc/acpi/rsdp and etc/acpi/tables. + - aia=[none|aplic|aplic-imsic] This option allows selecting interrupt controller defined by the AIA diff --git a/docs/system/s390x/cpu-topology.rst b/docs/system/s390x/cpu-topology.rst index 5133fdc3623..d5b506ee5c9 100644 --- a/docs/system/s390x/cpu-topology.rst +++ b/docs/system/s390x/cpu-topology.rst @@ -25,17 +25,19 @@ monitor polarization changes, see ``docs/devel/s390-cpu-topology.rst``. Prerequisites ------------- -To use the CPU topology, you need to run with KVM on a s390x host that -uses the Linux kernel v6.0 or newer (which provide the so-called +To use the CPU topology, you currently need to choose the KVM accelerator. +See :ref:`Accelerators` for more details about accelerators and how to select them. + +The s390x host needs to use a Linux kernel v6.0 or newer (which provides the so-called ``KVM_CAP_S390_CPU_TOPOLOGY`` capability that allows QEMU to signal the CPU topology facility via the so-called STFLE bit 11 to the VM). Enabling CPU topology --------------------- -Currently, CPU topology is only enabled in the host model by default. +Currently, CPU topology is enabled by default only in the "host" CPU model. -Enabling CPU topology in a CPU model is done by setting the CPU flag +Enabling CPU topology in another CPU model is done by setting the CPU flag ``ctop`` to ``on`` as in: .. code-block:: bash @@ -132,7 +134,7 @@ In the following machine we define 8 sockets with 4 cores each. .. code-block:: bash - $ qemu-system-s390x -m 2G \ + $ qemu-system-s390x -accel kvm -m 2G \ -cpu gen16b,ctop=on \ -smp cpus=5,sockets=8,cores=4,maxcpus=32 \ -device host-s390x-cpu,core-id=14 \ @@ -227,7 +229,7 @@ with vertical high entitlement. .. code-block:: bash - $ qemu-system-s390x -m 2G \ + $ qemu-system-s390x -accel kvm -m 2G \ -cpu gen16b,ctop=on \ -smp cpus=1,sockets=8,cores=4,maxcpus=32 \ \ diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst index 790ac1b8a2b..c9d7c0dda7e 100644 --- a/docs/system/target-arm.rst +++ b/docs/system/target-arm.rst @@ -84,6 +84,7 @@ undocumented; you can get a complete list by running arm/vexpress arm/aspeed arm/bananapi_m2u.rst + arm/b-l475e-iot01a.rst arm/sabrelite arm/digic arm/cubieboard diff --git a/docs/system/target-ppc.rst b/docs/system/target-ppc.rst index 4f6eb93b177..87bf412ce5c 100644 --- a/docs/system/target-ppc.rst +++ b/docs/system/target-ppc.rst @@ -17,6 +17,7 @@ help``. .. toctree:: :maxdepth: 1 + ppc/amigang ppc/embedded ppc/powermac ppc/powernv diff --git a/docs/tools/qemu-img.rst b/docs/tools/qemu-img.rst index 4459c065f19..3653adb963e 100644 --- a/docs/tools/qemu-img.rst +++ b/docs/tools/qemu-img.rst @@ -406,7 +406,7 @@ Command description: Compare exits with ``0`` in case the images are equal and with ``1`` in case the images differ. Other exit codes mean an error occurred during execution and standard error output should contain an error message. - The following table sumarizes all exit codes of the compare subcommand: + The following table summarizes all exit codes of the compare subcommand: 0 Images are identical (or requested help was printed) diff --git a/docs/user/main.rst b/docs/user/main.rst index f4786353965..d5fbb78d3c8 100644 --- a/docs/user/main.rst +++ b/docs/user/main.rst @@ -87,9 +87,6 @@ Debug options: Activate logging of the specified items (use '-d help' for a list of log items) -``-p pagesize`` - Act as if the host page size was 'pagesize' bytes - ``-g port`` Wait gdb connection to port @@ -98,9 +95,6 @@ Debug options: This slows down emulation a lot, but can be useful in some situations, such as when trying to analyse the logs produced by the ``-d`` option. -``-singlestep`` - This is a deprecated synonym for the ``-one-insn-per-tb`` option. - Environment variables: QEMU_STRACE @@ -251,6 +245,3 @@ Debug options: Run the emulation with one guest instruction per translation block. This slows down emulation a lot, but can be useful in some situations, such as when trying to analyse the logs produced by the ``-d`` option. - -``-singlestep`` - This is a deprecated synonym for the ``-one-insn-per-tb`` option. diff --git a/dump/dump-hmp-cmds.c b/dump/dump-hmp-cmds.c index b428ec33df6..d9340427c30 100644 --- a/dump/dump-hmp-cmds.c +++ b/dump/dump-hmp-cmds.c @@ -41,7 +41,7 @@ void hmp_dump_guest_memory(Monitor *mon, const QDict *qdict) dump_format = DUMP_GUEST_MEMORY_FORMAT_WIN_DMP; } - if (zlib && raw) { + if (zlib) { if (raw) { dump_format = DUMP_GUEST_MEMORY_FORMAT_KDUMP_RAW_ZLIB; } else { diff --git a/dump/dump.c b/dump/dump.c index 48190507649..84064d890d2 100644 --- a/dump/dump.c +++ b/dump/dump.c @@ -108,11 +108,11 @@ static int dump_cleanup(DumpState *s) s->guest_note = NULL; if (s->resume) { if (s->detached) { - qemu_mutex_lock_iothread(); + bql_lock(); } vm_start(); if (s->detached) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } } migrate_del_blocker(&dump_migration_blocker); diff --git a/ebpf/ebpf.c b/ebpf/ebpf.c new file mode 100644 index 00000000000..2d73beb4796 --- /dev/null +++ b/ebpf/ebpf.c @@ -0,0 +1,69 @@ +/* + * QEMU eBPF binary declaration routine. + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Andrew Melnychenko + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/queue.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-ebpf.h" +#include "ebpf/ebpf.h" + +typedef struct ElfBinaryDataEntry { + int id; + const void *data; + size_t datalen; + + QSLIST_ENTRY(ElfBinaryDataEntry) node; +} ElfBinaryDataEntry; + +static QSLIST_HEAD(, ElfBinaryDataEntry) ebpf_elf_obj_list = + QSLIST_HEAD_INITIALIZER(); + +void ebpf_register_binary_data(int id, const void *data, size_t datalen) +{ + struct ElfBinaryDataEntry *dataentry = NULL; + + dataentry = g_new0(struct ElfBinaryDataEntry, 1); + dataentry->data = data; + dataentry->datalen = datalen; + dataentry->id = id; + + QSLIST_INSERT_HEAD(&ebpf_elf_obj_list, dataentry, node); +} + +const void *ebpf_find_binary_by_id(int id, size_t *sz, Error **errp) +{ + struct ElfBinaryDataEntry *it = NULL; + QSLIST_FOREACH(it, &ebpf_elf_obj_list, node) { + if (id == it->id) { + *sz = it->datalen; + return it->data; + } + } + + error_setg(errp, "can't find eBPF object with id: %d", id); + + return NULL; +} + +EbpfObject *qmp_request_ebpf(EbpfProgramID id, Error **errp) +{ + EbpfObject *ret = NULL; + size_t size = 0; + const void *data = ebpf_find_binary_by_id(id, &size, errp); + if (!data) { + return NULL; + } + + ret = g_new0(EbpfObject, 1); + ret->object = g_base64_encode(data, size); + + return ret; +} diff --git a/ebpf/ebpf.h b/ebpf/ebpf.h new file mode 100644 index 00000000000..378d4e9c706 --- /dev/null +++ b/ebpf/ebpf.h @@ -0,0 +1,29 @@ +/* + * QEMU eBPF binary declaration routine. + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Andrew Melnychenko + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef EBPF_H +#define EBPF_H + + +void ebpf_register_binary_data(int id, const void *data, + size_t datalen); +const void *ebpf_find_binary_by_id(int id, size_t *sz, + struct Error **errp); + +#define ebpf_binary_init(id, fn) \ +static void __attribute__((constructor)) ebpf_binary_init_ ## fn(void) \ +{ \ + size_t datalen = 0; \ + const void *data = fn(&datalen); \ + ebpf_register_binary_data(id, data, datalen); \ +} + +#endif /* EBPF_H */ diff --git a/ebpf/ebpf_rss-stub.c b/ebpf/ebpf_rss-stub.c index e71e229190d..8d7fae2ad92 100644 --- a/ebpf/ebpf_rss-stub.c +++ b/ebpf/ebpf_rss-stub.c @@ -28,6 +28,12 @@ bool ebpf_rss_load(struct EBPFRSSContext *ctx) return false; } +bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd, + int config_fd, int toeplitz_fd, int table_fd) +{ + return false; +} + bool ebpf_rss_set_all(struct EBPFRSSContext *ctx, struct EBPFRSSConfig *config, uint16_t *indirections_table, uint8_t *toeplitz_key) { diff --git a/ebpf/ebpf_rss.c b/ebpf/ebpf_rss.c index cee658c158b..d102f3dd092 100644 --- a/ebpf/ebpf_rss.c +++ b/ebpf/ebpf_rss.c @@ -13,6 +13,8 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" +#include "qapi/qapi-types-misc.h" +#include "qapi/qapi-commands-ebpf.h" #include #include @@ -21,38 +23,97 @@ #include "ebpf/ebpf_rss.h" #include "ebpf/rss.bpf.skeleton.h" -#include "trace.h" +#include "ebpf/ebpf.h" void ebpf_rss_init(struct EBPFRSSContext *ctx) { if (ctx != NULL) { ctx->obj = NULL; + ctx->program_fd = -1; + ctx->map_configuration = -1; + ctx->map_toeplitz_key = -1; + ctx->map_indirections_table = -1; + + ctx->mmap_configuration = NULL; + ctx->mmap_toeplitz_key = NULL; + ctx->mmap_indirections_table = NULL; } } bool ebpf_rss_is_loaded(struct EBPFRSSContext *ctx) { - return ctx != NULL && ctx->obj != NULL; + return ctx != NULL && (ctx->obj != NULL || ctx->program_fd != -1); +} + +static bool ebpf_rss_mmap(struct EBPFRSSContext *ctx) +{ + if (!ebpf_rss_is_loaded(ctx)) { + return false; + } + + ctx->mmap_configuration = mmap(NULL, qemu_real_host_page_size(), + PROT_READ | PROT_WRITE, MAP_SHARED, + ctx->map_configuration, 0); + if (ctx->mmap_configuration == MAP_FAILED) { + return false; + } + ctx->mmap_toeplitz_key = mmap(NULL, qemu_real_host_page_size(), + PROT_READ | PROT_WRITE, MAP_SHARED, + ctx->map_toeplitz_key, 0); + if (ctx->mmap_toeplitz_key == MAP_FAILED) { + goto toeplitz_fail; + } + ctx->mmap_indirections_table = mmap(NULL, qemu_real_host_page_size(), + PROT_READ | PROT_WRITE, MAP_SHARED, + ctx->map_indirections_table, 0); + if (ctx->mmap_indirections_table == MAP_FAILED) { + goto indirection_fail; + } + + return true; + +indirection_fail: + munmap(ctx->mmap_toeplitz_key, qemu_real_host_page_size()); + ctx->mmap_toeplitz_key = NULL; +toeplitz_fail: + munmap(ctx->mmap_configuration, qemu_real_host_page_size()); + ctx->mmap_configuration = NULL; + + ctx->mmap_indirections_table = NULL; + return false; +} + +static void ebpf_rss_munmap(struct EBPFRSSContext *ctx) +{ + if (!ebpf_rss_is_loaded(ctx)) { + return; + } + + munmap(ctx->mmap_indirections_table, qemu_real_host_page_size()); + munmap(ctx->mmap_toeplitz_key, qemu_real_host_page_size()); + munmap(ctx->mmap_configuration, qemu_real_host_page_size()); + + ctx->mmap_configuration = NULL; + ctx->mmap_toeplitz_key = NULL; + ctx->mmap_indirections_table = NULL; } bool ebpf_rss_load(struct EBPFRSSContext *ctx) { struct rss_bpf *rss_bpf_ctx; - if (ctx == NULL) { + if (ebpf_rss_is_loaded(ctx)) { return false; } rss_bpf_ctx = rss_bpf__open(); if (rss_bpf_ctx == NULL) { - trace_ebpf_error("eBPF RSS", "can not open eBPF RSS object"); goto error; } bpf_program__set_type(rss_bpf_ctx->progs.tun_rss_steering_prog, BPF_PROG_TYPE_SOCKET_FILTER); if (rss_bpf__load(rss_bpf_ctx)) { - trace_ebpf_error("eBPF RSS", "can not load RSS program"); goto error; } @@ -66,26 +127,57 @@ bool ebpf_rss_load(struct EBPFRSSContext *ctx) ctx->map_toeplitz_key = bpf_map__fd( rss_bpf_ctx->maps.tap_rss_map_toeplitz_key); + if (!ebpf_rss_mmap(ctx)) { + goto error; + } + return true; error: rss_bpf__destroy(rss_bpf_ctx); ctx->obj = NULL; + ctx->program_fd = -1; + ctx->map_configuration = -1; + ctx->map_toeplitz_key = -1; + ctx->map_indirections_table = -1; return false; } -static bool ebpf_rss_set_config(struct EBPFRSSContext *ctx, - struct EBPFRSSConfig *config) +bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd, + int config_fd, int toeplitz_fd, int table_fd) { - uint32_t map_key = 0; + if (ebpf_rss_is_loaded(ctx)) { + return false; + } - if (!ebpf_rss_is_loaded(ctx)) { + if (program_fd < 0 || config_fd < 0 || toeplitz_fd < 0 || table_fd < 0) { return false; } - if (bpf_map_update_elem(ctx->map_configuration, - &map_key, config, 0) < 0) { + + ctx->program_fd = program_fd; + ctx->map_configuration = config_fd; + ctx->map_toeplitz_key = toeplitz_fd; + ctx->map_indirections_table = table_fd; + + if (!ebpf_rss_mmap(ctx)) { + ctx->program_fd = -1; + ctx->map_configuration = -1; + ctx->map_toeplitz_key = -1; + ctx->map_indirections_table = -1; return false; } + + return true; +} + +static bool ebpf_rss_set_config(struct EBPFRSSContext *ctx, + struct EBPFRSSConfig *config) +{ + if (!ebpf_rss_is_loaded(ctx)) { + return false; + } + + memcpy(ctx->mmap_configuration, config, sizeof(*config)); return true; } @@ -93,27 +185,24 @@ static bool ebpf_rss_set_indirections_table(struct EBPFRSSContext *ctx, uint16_t *indirections_table, size_t len) { - uint32_t i = 0; + char *cursor = ctx->mmap_indirections_table; if (!ebpf_rss_is_loaded(ctx) || indirections_table == NULL || len > VIRTIO_NET_RSS_MAX_TABLE_LEN) { return false; } - for (; i < len; ++i) { - if (bpf_map_update_elem(ctx->map_indirections_table, &i, - indirections_table + i, 0) < 0) { - return false; - } + for (size_t i = 0; i < len; i++) { + *(uint16_t *)cursor = indirections_table[i]; + cursor += 8; } + return true; } static bool ebpf_rss_set_toepliz_key(struct EBPFRSSContext *ctx, uint8_t *toeplitz_key) { - uint32_t map_key = 0; - /* prepare toeplitz key */ uint8_t toe[VIRTIO_NET_RSS_MAX_KEY_SIZE] = {}; @@ -123,10 +212,7 @@ static bool ebpf_rss_set_toepliz_key(struct EBPFRSSContext *ctx, memcpy(toe, toeplitz_key, VIRTIO_NET_RSS_MAX_KEY_SIZE); *(uint32_t *)toe = ntohl(*(uint32_t *)toe); - if (bpf_map_update_elem(ctx->map_toeplitz_key, &map_key, toe, - 0) < 0) { - return false; - } + memcpy(ctx->mmap_toeplitz_key, toe, VIRTIO_NET_RSS_MAX_KEY_SIZE); return true; } @@ -160,6 +246,22 @@ void ebpf_rss_unload(struct EBPFRSSContext *ctx) return; } - rss_bpf__destroy(ctx->obj); + ebpf_rss_munmap(ctx); + + if (ctx->obj) { + rss_bpf__destroy(ctx->obj); + } else { + close(ctx->program_fd); + close(ctx->map_configuration); + close(ctx->map_toeplitz_key); + close(ctx->map_indirections_table); + } + ctx->obj = NULL; + ctx->program_fd = -1; + ctx->map_configuration = -1; + ctx->map_toeplitz_key = -1; + ctx->map_indirections_table = -1; } + +ebpf_binary_init(EBPF_PROGRAMID_RSS, rss_bpf__elf_bytes) diff --git a/ebpf/ebpf_rss.h b/ebpf/ebpf_rss.h index bf3f2572c7c..239242b0d26 100644 --- a/ebpf/ebpf_rss.h +++ b/ebpf/ebpf_rss.h @@ -14,12 +14,19 @@ #ifndef QEMU_EBPF_RSS_H #define QEMU_EBPF_RSS_H +#define EBPF_RSS_MAX_FDS 4 + struct EBPFRSSContext { void *obj; int program_fd; int map_configuration; int map_toeplitz_key; int map_indirections_table; + + /* mapped eBPF maps for direct access to omit bpf_map_update_elem() */ + void *mmap_configuration; + void *mmap_toeplitz_key; + void *mmap_indirections_table; }; struct EBPFRSSConfig { @@ -36,6 +43,9 @@ bool ebpf_rss_is_loaded(struct EBPFRSSContext *ctx); bool ebpf_rss_load(struct EBPFRSSContext *ctx); +bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd, + int config_fd, int toeplitz_fd, int table_fd); + bool ebpf_rss_set_all(struct EBPFRSSContext *ctx, struct EBPFRSSConfig *config, uint16_t *indirections_table, uint8_t *toeplitz_key); diff --git a/ebpf/meson.build b/ebpf/meson.build index 2f627d6c7d0..c5bf9295a20 100644 --- a/ebpf/meson.build +++ b/ebpf/meson.build @@ -1 +1 @@ -system_ss.add(when: libbpf, if_true: files('ebpf_rss.c'), if_false: files('ebpf_rss-stub.c')) +common_ss.add(when: libbpf, if_true: files('ebpf.c', 'ebpf_rss.c'), if_false: files('ebpf_rss-stub.c')) diff --git a/ebpf/rss.bpf.skeleton.h b/ebpf/rss.bpf.skeleton.h index 18eb2adb12c..aed4ef9a033 100644 --- a/ebpf/rss.bpf.skeleton.h +++ b/ebpf/rss.bpf.skeleton.h @@ -176,642 +176,647 @@ rss_bpf__create_skeleton(struct rss_bpf *obj) static inline const void *rss_bpf__elf_bytes(size_t *sz) { - *sz = 20440; + *sz = 20600; return (const void *)"\ \x7f\x45\x4c\x46\x02\x01\x01\0\0\0\0\0\0\0\0\0\x01\0\xf7\0\x01\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\x98\x4c\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\x40\0\x0d\0\ -\x01\0\xbf\x19\0\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\x54\xff\0\0\0\0\xbf\xa7\ -\0\0\0\0\0\0\x07\x07\0\0\x54\xff\xff\xff\x18\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\x38\x4d\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\x40\0\x0d\0\ +\x01\0\xbf\x19\0\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\x4c\xff\0\0\0\0\xbf\xa7\ +\0\0\0\0\0\0\x07\x07\0\0\x4c\xff\xff\xff\x18\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ \xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x06\0\0\0\0\0\0\x18\x01\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x08\0\0\0\0\0\0\ -\x18\0\0\0\xff\xff\xff\xff\0\0\0\0\0\0\0\0\x15\x06\x67\x02\0\0\0\0\xbf\x87\0\0\ -\0\0\0\0\x15\x07\x65\x02\0\0\0\0\x71\x61\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\ -\0\x5e\x02\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xc8\xff\0\0\0\0\x7b\x1a\xc0\xff\ -\0\0\0\0\x7b\x1a\xb8\xff\0\0\0\0\x7b\x1a\xb0\xff\0\0\0\0\x7b\x1a\xa8\xff\0\0\0\ -\0\x63\x1a\xa0\xff\0\0\0\0\x7b\x1a\x98\xff\0\0\0\0\x7b\x1a\x90\xff\0\0\0\0\x7b\ -\x1a\x88\xff\0\0\0\0\x7b\x1a\x80\xff\0\0\0\0\x7b\x1a\x78\xff\0\0\0\0\x7b\x1a\ -\x70\xff\0\0\0\0\x7b\x1a\x68\xff\0\0\0\0\x7b\x1a\x60\xff\0\0\0\0\x7b\x1a\x58\ -\xff\0\0\0\0\x15\x09\x4d\x02\0\0\0\0\x6b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\ -\0\x07\x03\0\0\xd0\xff\xff\xff\xbf\x91\0\0\0\0\0\0\xb7\x02\0\0\x0c\0\0\0\xb7\ +\0\0\0\0\0\0\0\0\0\xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x07\0\0\0\0\0\0\ +\x18\0\0\0\xff\xff\xff\xff\0\0\0\0\0\0\0\0\x15\x06\x61\x02\0\0\0\0\xbf\x78\0\0\ +\0\0\0\0\x15\x08\x5f\x02\0\0\0\0\x71\x61\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\ +\0\x58\x02\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xc0\xff\0\0\0\0\x7b\x1a\xb8\xff\ +\0\0\0\0\x7b\x1a\xb0\xff\0\0\0\0\x7b\x1a\xa8\xff\0\0\0\0\x7b\x1a\xa0\xff\0\0\0\ +\0\x63\x1a\x98\xff\0\0\0\0\x7b\x1a\x90\xff\0\0\0\0\x7b\x1a\x88\xff\0\0\0\0\x7b\ +\x1a\x80\xff\0\0\0\0\x7b\x1a\x78\xff\0\0\0\0\x7b\x1a\x70\xff\0\0\0\0\x7b\x1a\ +\x68\xff\0\0\0\0\x7b\x1a\x60\xff\0\0\0\0\x7b\x1a\x58\xff\0\0\0\0\x7b\x1a\x50\ +\xff\0\0\0\0\x15\x09\x47\x02\0\0\0\0\x6b\x1a\xc8\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\ +\0\x07\x03\0\0\xc8\xff\xff\xff\xbf\x91\0\0\0\0\0\0\xb7\x02\0\0\x0c\0\0\0\xb7\ \x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\ -\x77\0\0\0\x20\0\0\0\x55\0\x42\x02\0\0\0\0\xb7\x02\0\0\x10\0\0\0\x69\xa1\xd0\ +\x77\0\0\0\x20\0\0\0\x55\0\x3c\x02\0\0\0\0\xb7\x02\0\0\x10\0\0\0\x69\xa1\xc8\ \xff\0\0\0\0\xbf\x13\0\0\0\0\0\0\xdc\x03\0\0\x10\0\0\0\x15\x03\x02\0\0\x81\0\0\ \x55\x03\x0b\0\xa8\x88\0\0\xb7\x02\0\0\x14\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\ -\0\xd0\xff\xff\xff\xbf\x91\0\0\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\ -\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\x32\x02\0\ -\0\0\0\x69\xa1\xd0\xff\0\0\0\0\x15\x01\x30\x02\0\0\0\0\x7b\x7a\x38\xff\0\0\0\0\ -\x7b\x9a\x40\xff\0\0\0\0\x15\x01\x55\0\x86\xdd\0\0\x55\x01\x39\0\x08\0\0\0\xb7\ -\x07\0\0\x01\0\0\0\x73\x7a\x58\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xe0\xff\ -\0\0\0\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\ -\x07\x03\0\0\xd0\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\xb7\x02\0\0\0\0\0\0\xb7\ -\x04\0\0\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\ -\0\x77\0\0\0\x20\0\0\0\x55\0\x1c\x02\0\0\0\0\x69\xa1\xd6\xff\0\0\0\0\x55\x01\ -\x01\0\0\0\0\0\xb7\x07\0\0\0\0\0\0\x61\xa1\xdc\xff\0\0\0\0\x63\x1a\x64\xff\0\0\ -\0\0\x61\xa1\xe0\xff\0\0\0\0\x63\x1a\x68\xff\0\0\0\0\x71\xa9\xd9\xff\0\0\0\0\ -\x73\x7a\x5e\xff\0\0\0\0\x71\xa1\xd0\xff\0\0\0\0\x67\x01\0\0\x02\0\0\0\x57\x01\ -\0\0\x3c\0\0\0\x7b\x1a\x48\xff\0\0\0\0\xbf\x91\0\0\0\0\0\0\x57\x01\0\0\xff\0\0\ -\0\x15\x01\x19\0\0\0\0\0\x57\x07\0\0\xff\0\0\0\x55\x07\x17\0\0\0\0\0\x57\x09\0\ -\0\xff\0\0\0\x15\x09\x5a\x01\x11\0\0\0\x55\x09\x14\0\x06\0\0\0\xb7\x01\0\0\x01\ -\0\0\0\x73\x1a\x5b\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xe0\xff\0\0\0\0\x7b\ -\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\ -\xd0\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\ -\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\ -\0\0\x20\0\0\0\x55\0\xf7\x01\0\0\0\0\x69\xa1\xd0\xff\0\0\0\0\x6b\x1a\x60\xff\0\ -\0\0\0\x69\xa1\xd2\xff\0\0\0\0\x6b\x1a\x62\xff\0\0\0\0\x71\xa1\x58\xff\0\0\0\0\ -\x15\x01\xdb\0\0\0\0\0\x71\x62\x03\0\0\0\0\0\x67\x02\0\0\x08\0\0\0\x71\x61\x02\ -\0\0\0\0\0\x4f\x12\0\0\0\0\0\0\x71\x63\x04\0\0\0\0\0\x71\x61\x05\0\0\0\0\0\x67\ -\x01\0\0\x08\0\0\0\x4f\x31\0\0\0\0\0\0\x67\x01\0\0\x10\0\0\0\x4f\x21\0\0\0\0\0\ -\0\x71\xa2\x5b\xff\0\0\0\0\x79\xa0\x38\xff\0\0\0\0\x15\x02\x0c\x01\0\0\0\0\xbf\ -\x12\0\0\0\0\0\0\x57\x02\0\0\x02\0\0\0\x15\x02\x09\x01\0\0\0\0\x61\xa1\x64\xff\ -\0\0\0\0\x63\x1a\xa8\xff\0\0\0\0\x61\xa1\x68\xff\0\0\0\0\x63\x1a\xac\xff\0\0\0\ -\0\x69\xa1\x60\xff\0\0\0\0\x6b\x1a\xb0\xff\0\0\0\0\x69\xa1\x62\xff\0\0\0\0\x6b\ -\x1a\xb2\xff\0\0\0\0\x05\0\x6b\x01\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x59\ -\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\xf0\xff\0\0\0\0\x7b\x1a\xe8\xff\0\0\0\ -\0\x7b\x1a\xe0\xff\0\0\0\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\ -\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\xff\xff\xb7\x01\0\0\x28\0\0\0\x7b\x1a\x48\ +\0\xc8\xff\xff\xff\xbf\x91\0\0\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\ +\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\x2c\x02\0\ +\0\0\0\x69\xa1\xc8\xff\0\0\0\0\x15\x01\x2a\x02\0\0\0\0\x7b\x9a\x38\xff\0\0\0\0\ +\x15\x01\x56\0\x86\xdd\0\0\x55\x01\x3b\0\x08\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\ +\x1a\x50\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\ +\xff\0\0\0\0\x7b\x1a\xc8\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xc8\xff\ +\xff\xff\x79\xa1\x38\xff\0\0\0\0\xb7\x02\0\0\0\0\0\0\xb7\x04\0\0\x14\0\0\0\xb7\ +\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\ +\x55\0\x17\x02\0\0\0\0\x69\xa1\xce\xff\0\0\0\0\x57\x01\0\0\x3f\xff\0\0\xb7\x02\ +\0\0\x01\0\0\0\x55\x01\x01\0\0\0\0\0\xb7\x02\0\0\0\0\0\0\x61\xa1\xd4\xff\0\0\0\ +\0\x63\x1a\x5c\xff\0\0\0\0\x61\xa1\xd8\xff\0\0\0\0\x63\x1a\x60\xff\0\0\0\0\x71\ +\xa9\xd1\xff\0\0\0\0\x71\xa1\xc8\xff\0\0\0\0\x67\x01\0\0\x02\0\0\0\x57\x01\0\0\ +\x3c\0\0\0\x7b\x1a\x40\xff\0\0\0\0\x73\x2a\x56\xff\0\0\0\0\xbf\x91\0\0\0\0\0\0\ +\x57\x01\0\0\xff\0\0\0\x15\x01\x19\0\0\0\0\0\x57\x02\0\0\xff\0\0\0\x55\x02\x17\ +\0\0\0\0\0\x57\x09\0\0\xff\0\0\0\x15\x09\x57\x01\x11\0\0\0\x55\x09\x14\0\x06\0\ +\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x53\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\ +\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\x7b\x1a\xc8\xff\0\0\0\0\xbf\xa3\0\0\0\ +\0\0\0\x07\x03\0\0\xc8\xff\xff\xff\x79\xa1\x38\xff\0\0\0\0\x79\xa2\x40\xff\0\0\ +\0\0\xb7\x04\0\0\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\ +\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\xf0\x01\0\0\0\0\x69\xa1\xc8\xff\0\0\0\0\ +\x6b\x1a\x58\xff\0\0\0\0\x69\xa1\xca\xff\0\0\0\0\x6b\x1a\x5a\xff\0\0\0\0\x71\ +\xa1\x50\xff\0\0\0\0\x15\x01\xd8\0\0\0\0\0\x71\x62\x03\0\0\0\0\0\x67\x02\0\0\ +\x08\0\0\0\x71\x61\x02\0\0\0\0\0\x4f\x12\0\0\0\0\0\0\x71\x63\x04\0\0\0\0\0\x67\ +\x03\0\0\x10\0\0\0\x71\x61\x05\0\0\0\0\0\x67\x01\0\0\x18\0\0\0\x4f\x31\0\0\0\0\ +\0\0\x4f\x21\0\0\0\0\0\0\x71\xa2\x53\xff\0\0\0\0\x15\x02\x09\x01\0\0\0\0\xbf\ +\x12\0\0\0\0\0\0\x57\x02\0\0\x02\0\0\0\x15\x02\x06\x01\0\0\0\0\x61\xa1\x5c\xff\ +\0\0\0\0\x63\x1a\xa0\xff\0\0\0\0\x61\xa1\x60\xff\0\0\0\0\x63\x1a\xa4\xff\0\0\0\ +\0\x69\xa1\x58\xff\0\0\0\0\x6b\x1a\xa8\xff\0\0\0\0\x69\xa1\x5a\xff\0\0\0\0\x6b\ +\x1a\xaa\xff\0\0\0\0\x05\0\x68\x01\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x51\ +\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\xe8\xff\0\0\0\0\x7b\x1a\xe0\xff\0\0\0\ +\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\x7b\x1a\xc8\xff\0\0\0\0\xbf\ +\xa3\0\0\0\0\0\0\x07\x03\0\0\xc8\xff\xff\xff\xb7\x01\0\0\x28\0\0\0\x7b\x1a\x40\ \xff\0\0\0\0\xbf\x91\0\0\0\0\0\0\xb7\x02\0\0\0\0\0\0\xb7\x04\0\0\x28\0\0\0\xb7\ \x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\ -\x55\0\xfe\0\0\0\0\0\x79\xa1\xe0\xff\0\0\0\0\x63\x1a\x6c\xff\0\0\0\0\x77\x01\0\ -\0\x20\0\0\0\x63\x1a\x70\xff\0\0\0\0\x79\xa1\xd8\xff\0\0\0\0\x63\x1a\x64\xff\0\ -\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x68\xff\0\0\0\0\x79\xa1\xe8\xff\0\0\0\0\ -\x63\x1a\x74\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x78\xff\0\0\0\0\x79\xa1\ -\xf0\xff\0\0\0\0\x63\x1a\x7c\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x80\xff\ -\0\0\0\0\x71\xa9\xd6\xff\0\0\0\0\x25\x09\x13\x01\x3c\0\0\0\xb7\x01\0\0\x01\0\0\ +\x55\0\xfc\0\0\0\0\0\x79\xa1\xd8\xff\0\0\0\0\x63\x1a\x64\xff\0\0\0\0\x77\x01\0\ +\0\x20\0\0\0\x63\x1a\x68\xff\0\0\0\0\x79\xa1\xd0\xff\0\0\0\0\x63\x1a\x5c\xff\0\ +\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x60\xff\0\0\0\0\x79\xa1\xe0\xff\0\0\0\0\ +\x63\x1a\x6c\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x70\xff\0\0\0\0\x79\xa1\ +\xe8\xff\0\0\0\0\x63\x1a\x74\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x78\xff\ +\0\0\0\0\x71\xa9\xce\xff\0\0\0\0\x25\x09\x11\x01\x3c\0\0\0\xb7\x01\0\0\x01\0\0\ \0\x6f\x91\0\0\0\0\0\0\x18\x02\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x5f\x21\0\0\0\ -\0\0\0\x55\x01\x01\0\0\0\0\0\x05\0\x0c\x01\0\0\0\0\xb7\x01\0\0\0\0\0\0\x6b\x1a\ -\xfe\xff\0\0\0\0\xb7\x01\0\0\x28\0\0\0\x7b\x1a\x48\xff\0\0\0\0\xbf\xa1\0\0\0\0\ -\0\0\x07\x01\0\0\x94\xff\xff\xff\x7b\x1a\x20\xff\0\0\0\0\xbf\xa1\0\0\0\0\0\0\ -\x07\x01\0\0\x84\xff\xff\xff\x7b\x1a\x18\xff\0\0\0\0\x18\x07\0\0\x01\0\0\0\0\0\ -\0\0\0\x18\0\x1c\xb7\x02\0\0\0\0\0\0\x7b\x8a\x28\xff\0\0\0\0\x7b\x2a\x30\xff\0\ -\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xfe\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\ -\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\ -\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x15\0\x01\0\0\0\0\0\x05\0\ -\x91\x01\0\0\0\0\xbf\x91\0\0\0\0\0\0\x15\x01\x26\0\x3c\0\0\0\x15\x01\x5f\0\x2c\ -\0\0\0\x55\x01\x60\0\x2b\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xf8\xff\0\0\0\0\xbf\ -\xa3\0\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\x79\xa7\x40\xff\0\0\0\0\xbf\x71\0\ -\0\0\0\0\0\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\x04\0\0\0\xb7\x05\0\0\x01\0\0\0\ -\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\ -\0\0\0\x55\x01\x06\x01\0\0\0\0\x71\xa1\xfa\xff\0\0\0\0\x55\x01\x11\0\x02\0\0\0\ -\x71\xa1\xf9\xff\0\0\0\0\x55\x01\x0f\0\x02\0\0\0\x71\xa1\xfb\xff\0\0\0\0\x55\ -\x01\x0d\0\x01\0\0\0\x79\xa2\x48\xff\0\0\0\0\x07\x02\0\0\x08\0\0\0\xbf\x71\0\0\ -\0\0\0\0\x79\xa3\x20\xff\0\0\0\0\xb7\x04\0\0\x10\0\0\0\xb7\x05\0\0\x01\0\0\0\ -\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\ -\0\0\0\x55\x01\xf5\0\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x5d\xff\0\0\0\0\x18\ -\x07\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x05\0\x3c\0\0\0\0\0\xb7\x08\0\0\x02\0\0\ -\0\xb7\x07\0\0\0\0\0\0\x6b\x7a\xf8\xff\0\0\0\0\x05\0\x13\0\0\0\0\0\x0f\x81\0\0\ -\0\0\0\0\xbf\x12\0\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\x71\xa3\xff\xff\0\0\0\0\x67\ -\x03\0\0\x03\0\0\0\x3d\x32\x09\0\0\0\0\0\xbf\x72\0\0\0\0\0\0\x07\x02\0\0\x01\0\ -\0\0\x67\x07\0\0\x20\0\0\0\xbf\x73\0\0\0\0\0\0\x77\x03\0\0\x20\0\0\0\xbf\x27\0\ -\0\0\0\0\0\xbf\x18\0\0\0\0\0\0\xb7\x01\0\0\x1d\0\0\0\x2d\x31\x04\0\0\0\0\0\x79\ -\xa8\x28\xff\0\0\0\0\x18\x07\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x05\0\x25\0\0\0\ -\0\0\xbf\x89\0\0\0\0\0\0\x79\xa1\x48\xff\0\0\0\0\x0f\x19\0\0\0\0\0\0\xbf\xa3\0\ -\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\xbf\x92\0\0\0\0\ -\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\ -\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x79\0\0\0\0\0\ -\x71\xa2\xf8\xff\0\0\0\0\x55\x02\x0e\0\xc9\0\0\0\x07\x09\0\0\x02\0\0\0\x79\xa1\ -\x40\xff\0\0\0\0\xbf\x92\0\0\0\0\0\0\x79\xa3\x18\xff\0\0\0\0\xb7\x04\0\0\x10\0\ -\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\ -\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x6c\0\0\0\0\0\xb7\x01\0\0\x01\0\0\0\ -\x73\x1a\x5c\xff\0\0\0\0\x05\0\xde\xff\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x15\x02\ -\xcd\xff\0\0\0\0\x71\xa1\xf9\xff\0\0\0\0\x07\x01\0\0\x02\0\0\0\x05\0\xca\xff\0\ -\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x5e\xff\0\0\0\0\x71\xa1\xff\xff\0\0\0\0\ -\x67\x01\0\0\x03\0\0\0\x79\xa2\x48\xff\0\0\0\0\x0f\x12\0\0\0\0\0\0\x07\x02\0\0\ -\x08\0\0\0\x7b\x2a\x48\xff\0\0\0\0\x71\xa9\xfe\xff\0\0\0\0\x79\xa2\x30\xff\0\0\ -\0\0\x25\x09\x0c\0\x3c\0\0\0\xb7\x01\0\0\x01\0\0\0\x6f\x91\0\0\0\0\0\0\x5f\x71\ -\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\0\x07\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\ -\xbf\x21\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x7d\ -\xff\x0b\0\0\0\x71\xa7\x5e\xff\0\0\0\0\x05\0\x09\xff\0\0\0\0\x15\x09\xf8\xff\ -\x87\0\0\0\x05\0\xfc\xff\0\0\0\0\x71\xa1\x59\xff\0\0\0\0\x79\xa0\x38\xff\0\0\0\ -\0\x15\x01\x13\x01\0\0\0\0\x71\x62\x03\0\0\0\0\0\x67\x02\0\0\x08\0\0\0\x71\x61\ -\x02\0\0\0\0\0\x4f\x12\0\0\0\0\0\0\x71\x63\x04\0\0\0\0\0\x71\x61\x05\0\0\0\0\0\ -\x67\x01\0\0\x08\0\0\0\x4f\x31\0\0\0\0\0\0\x67\x01\0\0\x10\0\0\0\x4f\x21\0\0\0\ -\0\0\0\x71\xa2\x5b\xff\0\0\0\0\x15\x02\x42\0\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\ -\x02\0\0\x10\0\0\0\x15\x02\x3f\0\0\0\0\0\x57\x01\0\0\x80\0\0\0\xb7\x02\0\0\x10\ +\0\0\0\x55\x01\x01\0\0\0\0\0\x05\0\x0a\x01\0\0\0\0\xb7\x01\0\0\0\0\0\0\x6b\x1a\ +\xf8\xff\0\0\0\0\xb7\x01\0\0\x28\0\0\0\x7b\x1a\x40\xff\0\0\0\0\xbf\xa1\0\0\0\0\ +\0\0\x07\x01\0\0\x8c\xff\xff\xff\x7b\x1a\x18\xff\0\0\0\0\xbf\xa1\0\0\0\0\0\0\ +\x07\x01\0\0\x7c\xff\xff\xff\x7b\x1a\x10\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\ +\x1a\x30\xff\0\0\0\0\x7b\x7a\x28\xff\0\0\0\0\x7b\x8a\x20\xff\0\0\0\0\xbf\xa3\0\ +\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\x79\xa1\x38\xff\0\0\0\0\x79\xa2\x40\xff\ +\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\ +\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\xc7\0\0\0\ +\0\0\xbf\x91\0\0\0\0\0\0\x15\x01\x24\0\x3c\0\0\0\x15\x01\x5c\0\x2c\0\0\0\x55\ +\x01\x5d\0\x2b\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xf0\xff\0\0\0\0\xbf\xa3\0\0\0\ +\0\0\0\x07\x03\0\0\xf0\xff\xff\xff\x79\xa9\x38\xff\0\0\0\0\xbf\x91\0\0\0\0\0\0\ +\x79\xa2\x40\xff\0\0\0\0\xb7\x04\0\0\x04\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\ +\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\ +\x01\x06\x01\0\0\0\0\x71\xa1\xf2\xff\0\0\0\0\x55\x01\x4d\0\x02\0\0\0\x71\xa1\ +\xf1\xff\0\0\0\0\x55\x01\x4b\0\x02\0\0\0\x71\xa1\xf3\xff\0\0\0\0\x55\x01\x49\0\ +\x01\0\0\0\x79\xa2\x40\xff\0\0\0\0\x07\x02\0\0\x08\0\0\0\xbf\x91\0\0\0\0\0\0\ +\x79\xa3\x18\xff\0\0\0\0\xb7\x04\0\0\x10\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\ +\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\ +\x01\xf5\0\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x55\xff\0\0\0\0\x05\0\x3b\0\0\ +\0\0\0\xb7\x08\0\0\x02\0\0\0\xb7\x07\0\0\0\0\0\0\x6b\x7a\xf0\xff\0\0\0\0\x05\0\ +\x12\0\0\0\0\0\x0f\x81\0\0\0\0\0\0\xbf\x12\0\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\ +\x71\xa3\xf9\xff\0\0\0\0\x67\x03\0\0\x03\0\0\0\x3d\x32\x09\0\0\0\0\0\xbf\x72\0\ +\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\x67\x07\0\0\x20\0\0\0\xbf\x73\0\0\0\0\0\0\x77\ +\x03\0\0\x20\0\0\0\xbf\x27\0\0\0\0\0\0\xbf\x18\0\0\0\0\0\0\xb7\x01\0\0\x1d\0\0\ +\0\x2d\x31\x03\0\0\0\0\0\x79\xa7\x28\xff\0\0\0\0\x79\xa8\x20\xff\0\0\0\0\x05\0\ +\x25\0\0\0\0\0\xbf\x89\0\0\0\0\0\0\x79\xa1\x40\xff\0\0\0\0\x0f\x19\0\0\0\0\0\0\ +\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xf0\xff\xff\xff\x79\xa1\x38\xff\0\0\0\0\xbf\ +\x92\0\0\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\ +\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x7a\ +\0\0\0\0\0\x71\xa2\xf0\xff\0\0\0\0\x55\x02\x0e\0\xc9\0\0\0\x07\x09\0\0\x02\0\0\ +\0\x79\xa1\x38\xff\0\0\0\0\xbf\x92\0\0\0\0\0\0\x79\xa3\x10\xff\0\0\0\0\xb7\x04\ +\0\0\x10\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\ +\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x6d\0\0\0\0\0\xb7\x01\0\0\ +\x01\0\0\0\x73\x1a\x54\xff\0\0\0\0\x05\0\xdf\xff\0\0\0\0\xb7\x01\0\0\x01\0\0\0\ +\x15\x02\xce\xff\0\0\0\0\x71\xa1\xf1\xff\0\0\0\0\x07\x01\0\0\x02\0\0\0\x05\0\ +\xcb\xff\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x56\xff\0\0\0\0\x71\xa1\xf9\xff\ +\0\0\0\0\x67\x01\0\0\x03\0\0\0\x79\xa2\x40\xff\0\0\0\0\x0f\x12\0\0\0\0\0\0\x07\ +\x02\0\0\x08\0\0\0\x7b\x2a\x40\xff\0\0\0\0\x71\xa9\xf8\xff\0\0\0\0\x25\x09\x0f\ +\0\x3c\0\0\0\xb7\x01\0\0\x01\0\0\0\x6f\x91\0\0\0\0\0\0\x18\x02\0\0\x01\0\0\0\0\ +\0\0\0\0\x18\0\x1c\x5f\x21\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\0\x08\0\0\0\0\ +\0\x79\xa1\x30\xff\0\0\0\0\x07\x01\0\0\x01\0\0\0\x7b\x1a\x30\xff\0\0\0\0\x67\ +\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x7f\xff\x0b\0\0\0\x71\xa2\x56\ +\xff\0\0\0\0\x05\0\x0c\xff\0\0\0\0\x15\x09\xf7\xff\x87\0\0\0\x05\0\xfc\xff\0\0\ +\0\0\x71\xa1\x51\xff\0\0\0\0\x15\x01\x10\x01\0\0\0\0\x71\x62\x03\0\0\0\0\0\x67\ +\x02\0\0\x08\0\0\0\x71\x61\x02\0\0\0\0\0\x4f\x12\0\0\0\0\0\0\x71\x63\x04\0\0\0\ +\0\0\x67\x03\0\0\x10\0\0\0\x71\x61\x05\0\0\0\0\0\x67\x01\0\0\x18\0\0\0\x4f\x31\ +\0\0\0\0\0\0\x4f\x21\0\0\0\0\0\0\x71\xa2\x53\xff\0\0\0\0\x15\x02\x43\0\0\0\0\0\ +\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x10\0\0\0\x15\x02\x40\0\0\0\0\0\x57\x01\0\0\ +\x80\0\0\0\xb7\x02\0\0\x10\0\0\0\xb7\x03\0\0\x10\0\0\0\x15\x01\x01\0\0\0\0\0\ +\xb7\x03\0\0\x30\0\0\0\x71\xa4\x55\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\0\ +\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x5c\xff\xff\xff\xbf\x34\0\0\0\0\0\0\ +\x15\x01\x02\0\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\x7c\xff\xff\xff\x71\xa5\ +\x54\xff\0\0\0\0\xbf\x31\0\0\0\0\0\0\x15\x05\x01\0\0\0\0\0\xbf\x41\0\0\0\0\0\0\ +\x61\x14\x04\0\0\0\0\0\x67\x04\0\0\x20\0\0\0\x61\x15\0\0\0\0\0\0\x4f\x54\0\0\0\ +\0\0\0\x7b\x4a\xa0\xff\0\0\0\0\x61\x14\x08\0\0\0\0\0\x61\x11\x0c\0\0\0\0\0\x67\ +\x01\0\0\x20\0\0\0\x4f\x41\0\0\0\0\0\0\x7b\x1a\xa8\xff\0\0\0\0\x0f\x23\0\0\0\0\ +\0\0\x61\x31\0\0\0\0\0\0\x61\x32\x04\0\0\0\0\0\x61\x34\x08\0\0\0\0\0\x61\x33\ +\x0c\0\0\0\0\0\x69\xa5\x5a\xff\0\0\0\0\x6b\x5a\xc2\xff\0\0\0\0\x69\xa5\x58\xff\ +\0\0\0\0\x6b\x5a\xc0\xff\0\0\0\0\x67\x03\0\0\x20\0\0\0\x4f\x43\0\0\0\0\0\0\x7b\ +\x3a\xb8\xff\0\0\0\0\x67\x02\0\0\x20\0\0\0\x4f\x12\0\0\0\0\0\0\x7b\x2a\xb0\xff\ +\0\0\0\0\x05\0\x6b\0\0\0\0\0\x71\xa2\x52\xff\0\0\0\0\x15\x02\x04\0\0\0\0\0\xbf\ +\x12\0\0\0\0\0\0\x57\x02\0\0\x04\0\0\0\x15\x02\x01\0\0\0\0\0\x05\0\xf4\xfe\0\0\ +\0\0\x57\x01\0\0\x01\0\0\0\x15\x01\xcd\0\0\0\0\0\x61\xa1\x5c\xff\0\0\0\0\x63\ +\x1a\xa0\xff\0\0\0\0\x61\xa1\x60\xff\0\0\0\0\x63\x1a\xa4\xff\0\0\0\0\x05\0\x5e\ +\0\0\0\0\0\xb7\x09\0\0\x3c\0\0\0\x79\xa7\x28\xff\0\0\0\0\x79\xa8\x20\xff\0\0\0\ +\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x15\0\xac\xff\0\0\0\0\x05\0\xc1\0\0\ +\0\0\0\x71\xa2\x52\xff\0\0\0\0\x15\x02\x26\0\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\ +\x02\0\0\x20\0\0\0\x15\x02\x23\0\0\0\0\0\x57\x01\0\0\0\x01\0\0\xb7\x02\0\0\x10\ \0\0\0\xb7\x03\0\0\x10\0\0\0\x15\x01\x01\0\0\0\0\0\xb7\x03\0\0\x30\0\0\0\x71\ -\xa4\x5d\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\0\0\0\0\0\0\xbf\xa3\0\0\0\0\ -\0\0\x07\x03\0\0\x64\xff\xff\xff\xbf\x34\0\0\0\0\0\0\x15\x01\x02\0\0\0\0\0\xbf\ -\xa4\0\0\0\0\0\0\x07\x04\0\0\x84\xff\xff\xff\x71\xa5\x5c\xff\0\0\0\0\xbf\x31\0\ -\0\0\0\0\0\x15\x05\x01\0\0\0\0\0\xbf\x41\0\0\0\0\0\0\x61\x14\x04\0\0\0\0\0\x67\ -\x04\0\0\x20\0\0\0\x61\x15\0\0\0\0\0\0\x4f\x54\0\0\0\0\0\0\x7b\x4a\xa8\xff\0\0\ -\0\0\x61\x14\x08\0\0\0\0\0\x61\x11\x0c\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x4f\x41\ -\0\0\0\0\0\0\x7b\x1a\xb0\xff\0\0\0\0\x0f\x23\0\0\0\0\0\0\x61\x31\0\0\0\0\0\0\ -\x61\x32\x04\0\0\0\0\0\x61\x34\x08\0\0\0\0\0\x61\x33\x0c\0\0\0\0\0\x69\xa5\x62\ -\xff\0\0\0\0\x6b\x5a\xca\xff\0\0\0\0\x69\xa5\x60\xff\0\0\0\0\x6b\x5a\xc8\xff\0\ -\0\0\0\x67\x03\0\0\x20\0\0\0\x4f\x43\0\0\0\0\0\0\x7b\x3a\xc0\xff\0\0\0\0\x67\ -\x02\0\0\x20\0\0\0\x4f\x12\0\0\0\0\0\0\x7b\x2a\xb8\xff\0\0\0\0\x05\0\x6b\0\0\0\ -\0\0\x71\xa2\x5a\xff\0\0\0\0\x15\x02\x04\0\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\ -\0\0\x04\0\0\0\x15\x02\x01\0\0\0\0\0\x05\0\xf1\xfe\0\0\0\0\x57\x01\0\0\x01\0\0\ -\0\x15\x01\xd0\0\0\0\0\0\x61\xa1\x64\xff\0\0\0\0\x63\x1a\xa8\xff\0\0\0\0\x61\ -\xa1\x68\xff\0\0\0\0\x63\x1a\xac\xff\0\0\0\0\x05\0\x5e\0\0\0\0\0\xb7\x09\0\0\ -\x3c\0\0\0\x79\xa8\x28\xff\0\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x15\ -\0\xac\xff\0\0\0\0\x05\0\xc5\0\0\0\0\0\x71\xa2\x5a\xff\0\0\0\0\x15\x02\x26\0\0\ -\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x20\0\0\0\x15\x02\x23\0\0\0\0\0\x57\x01\ -\0\0\0\x01\0\0\xb7\x02\0\0\x10\0\0\0\xb7\x03\0\0\x10\0\0\0\x15\x01\x01\0\0\0\0\ -\0\xb7\x03\0\0\x30\0\0\0\x71\xa4\x5d\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\ -\0\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x64\xff\xff\xff\xbf\x34\0\0\0\0\0\ -\0\x15\x01\x02\0\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\x84\xff\xff\xff\x71\ -\xa5\x5c\xff\0\0\0\0\xbf\x31\0\0\0\0\0\0\x15\x05\xbd\xff\0\0\0\0\x05\0\xbb\xff\ -\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x5a\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\ -\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\xff\xff\x79\xa1\ -\x40\xff\0\0\0\0\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\x08\0\0\0\xb7\x05\0\0\x01\ -\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\xa0\0\ -\0\0\0\0\x05\0\xa8\xfe\0\0\0\0\x15\x09\xf3\xfe\x87\0\0\0\x05\0\x83\xff\0\0\0\0\ -\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x08\0\0\0\x15\x02\x9a\0\0\0\0\0\x57\x01\0\0\ -\x40\0\0\0\xb7\x02\0\0\x0c\0\0\0\xb7\x03\0\0\x0c\0\0\0\x15\x01\x01\0\0\0\0\0\ -\xb7\x03\0\0\x2c\0\0\0\x71\xa4\x5c\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\0\ -\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x58\xff\xff\xff\x0f\x23\0\0\0\0\0\0\ -\x61\x32\x04\0\0\0\0\0\x67\x02\0\0\x20\0\0\0\x61\x34\0\0\0\0\0\0\x4f\x42\0\0\0\ -\0\0\0\x7b\x2a\xa8\xff\0\0\0\0\x61\x32\x08\0\0\0\0\0\x61\x33\x0c\0\0\0\0\0\x67\ -\x03\0\0\x20\0\0\0\x4f\x23\0\0\0\0\0\0\x7b\x3a\xb0\xff\0\0\0\0\x71\xa2\x5d\xff\ -\0\0\0\0\x15\x02\x0c\0\0\0\0\0\x15\x01\x0b\0\0\0\0\0\x61\xa1\xa0\xff\0\0\0\0\ -\x67\x01\0\0\x20\0\0\0\x61\xa2\x9c\xff\0\0\0\0\x4f\x21\0\0\0\0\0\0\x7b\x1a\xc0\ -\xff\0\0\0\0\x61\xa1\x98\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\x94\xff\0\0\ -\0\0\x05\0\x0a\0\0\0\0\0\xb7\x09\0\0\x2b\0\0\0\x05\0\xae\xff\0\0\0\0\x61\xa1\ -\x80\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\x7c\xff\0\0\0\0\x4f\x21\0\0\0\0\ -\0\0\x7b\x1a\xc0\xff\0\0\0\0\x61\xa1\x78\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\ -\xa2\x74\xff\0\0\0\0\x4f\x21\0\0\0\0\0\0\x7b\x1a\xb8\xff\0\0\0\0\xb7\x02\0\0\0\ -\0\0\0\x07\x08\0\0\x04\0\0\0\x61\x03\0\0\0\0\0\0\xb7\x05\0\0\0\0\0\0\xbf\xa1\0\ -\0\0\0\0\0\x07\x01\0\0\xa8\xff\xff\xff\x0f\x21\0\0\0\0\0\0\x71\x14\0\0\0\0\0\0\ -\xbf\x41\0\0\0\0\0\0\x67\x01\0\0\x38\0\0\0\xc7\x01\0\0\x3f\0\0\0\x5f\x31\0\0\0\ -\0\0\0\xaf\x51\0\0\0\0\0\0\xbf\x85\0\0\0\0\0\0\x0f\x25\0\0\0\0\0\0\x71\x55\0\0\ -\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x07\0\0\0\x4f\x03\ -\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x39\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\ -\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x06\0\0\0\ -\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\ -\0\0\x67\0\0\0\x3a\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\ -\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x05\0\0\0\x57\0\0\0\ -\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3b\0\0\0\xc7\0\0\ -\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\ -\x50\0\0\0\0\0\0\x77\0\0\0\x04\0\0\0\x57\0\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\ -\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3c\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\ -\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x03\0\0\0\x57\0\0\0\x01\0\ +\xa4\x55\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\0\0\0\0\0\0\xbf\xa3\0\0\0\0\ +\0\0\x07\x03\0\0\x5c\xff\xff\xff\xbf\x34\0\0\0\0\0\0\x15\x01\x02\0\0\0\0\0\xbf\ +\xa4\0\0\0\0\0\0\x07\x04\0\0\x7c\xff\xff\xff\x71\xa5\x54\xff\0\0\0\0\xbf\x31\0\ +\0\0\0\0\0\x15\x05\xbc\xff\0\0\0\0\x05\0\xba\xff\0\0\0\0\xb7\x01\0\0\x01\0\0\0\ +\x73\x1a\x52\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\xc8\xff\0\0\0\0\xbf\xa3\0\ +\0\0\0\0\0\x07\x03\0\0\xc8\xff\xff\xff\x79\xa1\x38\xff\0\0\0\0\x79\xa2\x40\xff\ +\0\0\0\0\xb7\x04\0\0\x08\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\ +\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\x9c\0\0\0\0\0\x05\0\xab\xfe\0\0\0\0\ +\x15\x09\xf5\xfe\x87\0\0\0\x05\0\x83\xff\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\ +\0\x08\0\0\0\x15\x02\x96\0\0\0\0\0\x57\x01\0\0\x40\0\0\0\xb7\x02\0\0\x0c\0\0\0\ +\xb7\x03\0\0\x0c\0\0\0\x15\x01\x01\0\0\0\0\0\xb7\x03\0\0\x2c\0\0\0\x71\xa4\x54\ +\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\0\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\ +\x03\0\0\x50\xff\xff\xff\x0f\x23\0\0\0\0\0\0\x61\x32\x04\0\0\0\0\0\x67\x02\0\0\ +\x20\0\0\0\x61\x34\0\0\0\0\0\0\x4f\x42\0\0\0\0\0\0\x7b\x2a\xa0\xff\0\0\0\0\x61\ +\x32\x08\0\0\0\0\0\x61\x33\x0c\0\0\0\0\0\x67\x03\0\0\x20\0\0\0\x4f\x23\0\0\0\0\ +\0\0\x7b\x3a\xa8\xff\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\xb0\xff\xff\xff\ +\x71\xa3\x55\xff\0\0\0\0\x15\x03\x0b\0\0\0\0\0\x15\x01\x0a\0\0\0\0\0\x61\xa1\ +\x98\xff\0\0\0\0\x63\x12\x0c\0\0\0\0\0\x61\xa1\x94\xff\0\0\0\0\x63\x12\x08\0\0\ +\0\0\0\x61\xa1\x90\xff\0\0\0\0\x63\x12\x04\0\0\0\0\0\x61\xa1\x8c\xff\0\0\0\0\ +\x05\0\x09\0\0\0\0\0\xb7\x09\0\0\x2b\0\0\0\x05\0\xad\xff\0\0\0\0\x61\xa1\x78\ +\xff\0\0\0\0\x63\x12\x0c\0\0\0\0\0\x61\xa1\x74\xff\0\0\0\0\x63\x12\x08\0\0\0\0\ +\0\x61\xa1\x70\xff\0\0\0\0\x63\x12\x04\0\0\0\0\0\x61\xa1\x6c\xff\0\0\0\0\x63\ +\x12\0\0\0\0\0\0\xb7\x02\0\0\0\0\0\0\x07\x07\0\0\x04\0\0\0\x61\x83\0\0\0\0\0\0\ +\xb7\x05\0\0\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\xa0\xff\xff\xff\x0f\x21\0\ +\0\0\0\0\0\x71\x14\0\0\0\0\0\0\xbf\x41\0\0\0\0\0\0\x67\x01\0\0\x38\0\0\0\xc7\ +\x01\0\0\x3f\0\0\0\x5f\x31\0\0\0\0\0\0\xaf\x51\0\0\0\0\0\0\xbf\x75\0\0\0\0\0\0\ +\x0f\x25\0\0\0\0\0\0\x71\x55\0\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\ +\0\0\x77\0\0\0\x07\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x39\ +\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\ +\0\0\0\0\x77\0\0\0\x06\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\ +\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3a\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\ +\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\0\0\ +\x77\0\0\0\x05\0\0\0\x57\0\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\ +\0\x67\0\0\0\x3b\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\ +\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x04\0\0\0\x57\0\0\0\ +\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3c\0\0\0\xc7\0\0\ +\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\ +\0\0\x03\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\ +\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3d\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\ +\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x02\0\0\0\x57\0\0\0\x01\0\ \0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\ -\x3d\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\ -\0\0\0\0\0\0\x77\0\0\0\x02\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\ -\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3e\0\0\0\xc7\0\0\0\x3f\0\0\0\ -\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x01\0\0\ -\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\x57\x04\0\0\ -\x01\0\0\0\x87\x04\0\0\0\0\0\0\x5f\x34\0\0\0\0\0\0\xaf\x41\0\0\0\0\0\0\x57\x05\ -\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x53\0\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\ -\xbf\x15\0\0\0\0\0\0\x15\x02\x01\0\x24\0\0\0\x05\0\xa9\xff\0\0\0\0\xbf\x12\0\0\ -\0\0\0\0\x67\x02\0\0\x20\0\0\0\x77\x02\0\0\x20\0\0\0\x15\x02\x0e\0\0\0\0\0\x71\ -\x63\x06\0\0\0\0\0\x71\x64\x07\0\0\0\0\0\x67\x04\0\0\x08\0\0\0\x4f\x34\0\0\0\0\ -\0\0\x3f\x42\0\0\0\0\0\0\x2f\x42\0\0\0\0\0\0\x1f\x21\0\0\0\0\0\0\x63\x1a\x58\ -\xff\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\x58\xff\xff\xff\x18\x01\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\x55\0\x05\0\0\0\0\0\x71\x61\x08\0\0\0\0\ -\0\x71\x60\x09\0\0\0\0\0\x67\0\0\0\x08\0\0\0\x4f\x10\0\0\0\0\0\0\x95\0\0\0\0\0\ -\0\0\x69\0\0\0\0\0\0\0\x05\0\xfd\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\x3e\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\ +\0\0\0\0\0\0\x77\0\0\0\x01\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\ +\x03\0\0\0\0\0\0\x57\x04\0\0\x01\0\0\0\x87\x04\0\0\0\0\0\0\x5f\x34\0\0\0\0\0\0\ +\xaf\x41\0\0\0\0\0\0\x57\x05\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x53\0\0\0\ +\0\0\0\x07\x02\0\0\x01\0\0\0\xbf\x15\0\0\0\0\0\0\x15\x02\x01\0\x24\0\0\0\x05\0\ +\xa9\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x15\x01\x0c\0\0\0\ +\0\0\x71\x62\x06\0\0\0\0\0\x71\x63\x07\0\0\0\0\0\x67\x03\0\0\x08\0\0\0\x4f\x23\ +\0\0\0\0\0\0\x9f\x31\0\0\0\0\0\0\x63\x1a\x50\xff\0\0\0\0\xbf\xa2\0\0\0\0\0\0\ +\x07\x02\0\0\x50\xff\xff\xff\x18\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x85\0\0\0\x01\ +\0\0\0\x55\0\x05\0\0\0\0\0\x71\x61\x08\0\0\0\0\0\x71\x60\x09\0\0\0\0\0\x67\0\0\ +\0\x08\0\0\0\x4f\x10\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\x69\0\0\0\0\0\0\0\x05\0\xfd\ +\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\x47\x50\x4c\x20\x76\x32\0\0\x9f\xeb\x01\0\x18\0\0\0\0\0\0\0\x10\x05\0\0\x10\ -\x05\0\0\x65\x11\0\0\0\0\0\0\0\0\0\x02\x03\0\0\0\x01\0\0\0\0\0\0\x01\x04\0\0\0\ -\x20\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x02\0\0\0\x05\0\0\0\ -\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\x02\x06\0\0\0\0\0\0\0\0\0\0\x03\0\ -\0\0\0\x02\0\0\0\x04\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\x02\x08\0\0\0\0\0\0\0\0\0\0\ -\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x0a\0\0\0\0\0\0\0\0\0\0\x02\x0a\0\0\0\0\0\0\0\ -\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x01\0\0\0\0\0\0\0\x04\0\0\x04\x20\0\0\0\ -\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\x40\0\0\0\x27\0\0\0\x07\0\0\0\ -\x80\0\0\0\x32\0\0\0\x09\0\0\0\xc0\0\0\0\x3e\0\0\0\0\0\0\x0e\x0b\0\0\0\x01\0\0\ -\0\0\0\0\0\0\0\0\x02\x0e\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\ -\x28\0\0\0\0\0\0\0\x04\0\0\x04\x20\0\0\0\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\ -\x05\0\0\0\x40\0\0\0\x27\0\0\0\x0d\0\0\0\x80\0\0\0\x32\0\0\0\x09\0\0\0\xc0\0\0\ -\0\x59\0\0\0\0\0\0\x0e\x0f\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\x02\x12\0\0\0\0\0\0\0\ -\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x80\0\0\0\0\0\0\0\x04\0\0\x04\x20\0\0\0\ -\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\x40\0\0\0\x27\0\0\0\x01\0\0\0\ -\x80\0\0\0\x32\0\0\0\x11\0\0\0\xc0\0\0\0\x72\0\0\0\0\0\0\x0e\x13\0\0\0\x01\0\0\ -\0\0\0\0\0\0\0\0\x02\x16\0\0\0\x90\0\0\0\x22\0\0\x04\xc0\0\0\0\x9a\0\0\0\x17\0\ -\0\0\0\0\0\0\x9e\0\0\0\x17\0\0\0\x20\0\0\0\xa7\0\0\0\x17\0\0\0\x40\0\0\0\xac\0\ -\0\0\x17\0\0\0\x60\0\0\0\xba\0\0\0\x17\0\0\0\x80\0\0\0\xc3\0\0\0\x17\0\0\0\xa0\ -\0\0\0\xd0\0\0\0\x17\0\0\0\xc0\0\0\0\xd9\0\0\0\x17\0\0\0\xe0\0\0\0\xe4\0\0\0\ -\x17\0\0\0\0\x01\0\0\xed\0\0\0\x17\0\0\0\x20\x01\0\0\xfd\0\0\0\x17\0\0\0\x40\ -\x01\0\0\x05\x01\0\0\x17\0\0\0\x60\x01\0\0\x0e\x01\0\0\x19\0\0\0\x80\x01\0\0\ -\x11\x01\0\0\x17\0\0\0\x20\x02\0\0\x16\x01\0\0\x17\0\0\0\x40\x02\0\0\x21\x01\0\ -\0\x17\0\0\0\x60\x02\0\0\x26\x01\0\0\x17\0\0\0\x80\x02\0\0\x2f\x01\0\0\x17\0\0\ -\0\xa0\x02\0\0\x37\x01\0\0\x17\0\0\0\xc0\x02\0\0\x3e\x01\0\0\x17\0\0\0\xe0\x02\ -\0\0\x49\x01\0\0\x17\0\0\0\0\x03\0\0\x53\x01\0\0\x1a\0\0\0\x20\x03\0\0\x5e\x01\ -\0\0\x1a\0\0\0\xa0\x03\0\0\x68\x01\0\0\x17\0\0\0\x20\x04\0\0\x74\x01\0\0\x17\0\ -\0\0\x40\x04\0\0\x7f\x01\0\0\x17\0\0\0\x60\x04\0\0\0\0\0\0\x1b\0\0\0\x80\x04\0\ -\0\x89\x01\0\0\x1d\0\0\0\xc0\x04\0\0\x90\x01\0\0\x17\0\0\0\0\x05\0\0\x99\x01\0\ -\0\x17\0\0\0\x20\x05\0\0\0\0\0\0\x1f\0\0\0\x40\x05\0\0\xa2\x01\0\0\x17\0\0\0\ -\x80\x05\0\0\xab\x01\0\0\x21\0\0\0\xa0\x05\0\0\xb7\x01\0\0\x1d\0\0\0\xc0\x05\0\ -\0\xc0\x01\0\0\0\0\0\x08\x18\0\0\0\xc6\x01\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\ -\0\0\0\0\0\0\x03\0\0\0\0\x17\0\0\0\x04\0\0\0\x05\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\ -\0\x17\0\0\0\x04\0\0\0\x04\0\0\0\0\0\0\0\x01\0\0\x05\x08\0\0\0\xd3\x01\0\0\x1c\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\x2a\0\0\0\xdd\x01\0\0\0\0\0\x08\x1e\0\0\0\xe3\ -\x01\0\0\0\0\0\x01\x08\0\0\0\x40\0\0\0\0\0\0\0\x01\0\0\x05\x08\0\0\0\xf6\x01\0\ -\0\x20\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\x2b\0\0\0\xf9\x01\0\0\0\0\0\x08\x22\0\0\ -\0\xfe\x01\0\0\0\0\0\x01\x01\0\0\0\x08\0\0\0\0\0\0\0\x01\0\0\x0d\x02\0\0\0\x0c\ -\x02\0\0\x15\0\0\0\x10\x02\0\0\x01\0\0\x0c\x23\0\0\0\x32\x11\0\0\0\0\0\x01\x01\ -\0\0\0\x08\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x25\0\0\0\x04\0\0\0\x07\0\0\0\x37\ -\x11\0\0\0\0\0\x0e\x26\0\0\0\x01\0\0\0\x40\x11\0\0\x03\0\0\x0f\0\0\0\0\x0c\0\0\ -\0\0\0\0\0\x20\0\0\0\x10\0\0\0\0\0\0\0\x20\0\0\0\x14\0\0\0\0\0\0\0\x20\0\0\0\ -\x46\x11\0\0\x01\0\0\x0f\0\0\0\0\x27\0\0\0\0\0\0\0\x07\0\0\0\x4e\x11\0\0\0\0\0\ -\x07\0\0\0\0\x5c\x11\0\0\0\0\0\x07\0\0\0\0\0\x69\x6e\x74\0\x5f\x5f\x41\x52\x52\ -\x41\x59\x5f\x53\x49\x5a\x45\x5f\x54\x59\x50\x45\x5f\x5f\0\x74\x79\x70\x65\0\ -\x6b\x65\x79\x5f\x73\x69\x7a\x65\0\x76\x61\x6c\x75\x65\x5f\x73\x69\x7a\x65\0\ -\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\0\x74\x61\x70\x5f\x72\x73\x73\x5f\ -\x6d\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\x75\x72\x61\x74\x69\x6f\x6e\x73\0\x74\ -\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\ -\x5f\x6b\x65\x79\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x69\x6e\x64\ -\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\x62\x6c\x65\0\x5f\x5f\x73\x6b\x5f\ -\x62\x75\x66\x66\0\x6c\x65\x6e\0\x70\x6b\x74\x5f\x74\x79\x70\x65\0\x6d\x61\x72\ -\x6b\0\x71\x75\x65\x75\x65\x5f\x6d\x61\x70\x70\x69\x6e\x67\0\x70\x72\x6f\x74\ -\x6f\x63\x6f\x6c\0\x76\x6c\x61\x6e\x5f\x70\x72\x65\x73\x65\x6e\x74\0\x76\x6c\ -\x61\x6e\x5f\x74\x63\x69\0\x76\x6c\x61\x6e\x5f\x70\x72\x6f\x74\x6f\0\x70\x72\ -\x69\x6f\x72\x69\x74\x79\0\x69\x6e\x67\x72\x65\x73\x73\x5f\x69\x66\x69\x6e\x64\ -\x65\x78\0\x69\x66\x69\x6e\x64\x65\x78\0\x74\x63\x5f\x69\x6e\x64\x65\x78\0\x63\ -\x62\0\x68\x61\x73\x68\0\x74\x63\x5f\x63\x6c\x61\x73\x73\x69\x64\0\x64\x61\x74\ -\x61\0\x64\x61\x74\x61\x5f\x65\x6e\x64\0\x6e\x61\x70\x69\x5f\x69\x64\0\x66\x61\ -\x6d\x69\x6c\x79\0\x72\x65\x6d\x6f\x74\x65\x5f\x69\x70\x34\0\x6c\x6f\x63\x61\ -\x6c\x5f\x69\x70\x34\0\x72\x65\x6d\x6f\x74\x65\x5f\x69\x70\x36\0\x6c\x6f\x63\ -\x61\x6c\x5f\x69\x70\x36\0\x72\x65\x6d\x6f\x74\x65\x5f\x70\x6f\x72\x74\0\x6c\ -\x6f\x63\x61\x6c\x5f\x70\x6f\x72\x74\0\x64\x61\x74\x61\x5f\x6d\x65\x74\x61\0\ -\x74\x73\x74\x61\x6d\x70\0\x77\x69\x72\x65\x5f\x6c\x65\x6e\0\x67\x73\x6f\x5f\ -\x73\x65\x67\x73\0\x67\x73\x6f\x5f\x73\x69\x7a\x65\0\x74\x73\x74\x61\x6d\x70\ -\x5f\x74\x79\x70\x65\0\x68\x77\x74\x73\x74\x61\x6d\x70\0\x5f\x5f\x75\x33\x32\0\ -\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\x74\0\x66\x6c\x6f\x77\x5f\x6b\x65\ -\x79\x73\0\x5f\x5f\x75\x36\x34\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x6c\x6f\ -\x6e\x67\x20\x6c\x6f\x6e\x67\0\x73\x6b\0\x5f\x5f\x75\x38\0\x75\x6e\x73\x69\x67\ -\x6e\x65\x64\x20\x63\x68\x61\x72\0\x73\x6b\x62\0\x74\x75\x6e\x5f\x72\x73\x73\ -\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\x5f\x70\x72\x6f\x67\0\x74\x75\x6e\x5f\x72\ -\x73\x73\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\0\x2f\x68\x6f\x6d\x65\x2f\x73\x68\ -\x72\x65\x65\x73\x68\x2f\x63\x2f\x71\x65\x6d\x75\x2f\x74\x6f\x6f\x6c\x73\x2f\ -\x65\x62\x70\x66\x2f\x72\x73\x73\x2e\x62\x70\x66\x2e\x63\0\x69\x6e\x74\x20\x74\ -\x75\x6e\x5f\x72\x73\x73\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\x5f\x70\x72\x6f\ -\x67\x28\x73\x74\x72\x75\x63\x74\x20\x5f\x5f\x73\x6b\x5f\x62\x75\x66\x66\x20\ -\x2a\x73\x6b\x62\x29\0\x20\x20\x20\x20\x5f\x5f\x75\x33\x32\x20\x6b\x65\x79\x20\ -\x3d\x20\x30\x3b\0\x20\x20\x20\x20\x63\x6f\x6e\x66\x69\x67\x20\x3d\x20\x62\x70\ -\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\x6b\x75\x70\x5f\x65\x6c\x65\x6d\x28\x26\ -\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\x75\ -\x72\x61\x74\x69\x6f\x6e\x73\x2c\x20\x26\x6b\x65\x79\x29\x3b\0\x20\x20\x20\x20\ -\x74\x6f\x65\x20\x3d\x20\x62\x70\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\x6b\x75\ -\x70\x5f\x65\x6c\x65\x6d\x28\x26\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\ -\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\x5f\x6b\x65\x79\x2c\x20\x26\x6b\x65\x79\ -\x29\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x63\x6f\x6e\x66\x69\x67\x20\x26\x26\ -\x20\x74\x6f\x65\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\ -\x21\x63\x6f\x6e\x66\x69\x67\x2d\x3e\x72\x65\x64\x69\x72\x65\x63\x74\x29\x20\ -\x7b\0\x20\x20\x20\x20\x5f\x5f\x75\x38\x20\x72\x73\x73\x5f\x69\x6e\x70\x75\x74\ -\x5b\x48\x41\x53\x48\x5f\x43\x41\x4c\x43\x55\x4c\x41\x54\x49\x4f\x4e\x5f\x42\ -\x55\x46\x46\x45\x52\x5f\x53\x49\x5a\x45\x5d\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\ -\x20\x20\x73\x74\x72\x75\x63\x74\x20\x70\x61\x63\x6b\x65\x74\x5f\x68\x61\x73\ -\x68\x5f\x69\x6e\x66\x6f\x5f\x74\x20\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\ -\x6f\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x21\x69\x6e\x66\ -\x6f\x20\x7c\x7c\x20\x21\x73\x6b\x62\x29\x20\x7b\0\x20\x20\x20\x20\x5f\x5f\x62\ -\x65\x31\x36\x20\x72\x65\x74\x20\x3d\x20\x30\x3b\0\x20\x20\x20\x20\x65\x72\x72\ -\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\ -\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x6f\x66\ -\x66\x73\x65\x74\x2c\x20\x26\x72\x65\x74\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\ -\x72\x65\x74\x29\x2c\0\x20\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\ -\0\x20\x20\x20\x20\x73\x77\x69\x74\x63\x68\x20\x28\x62\x70\x66\x5f\x6e\x74\x6f\ -\x68\x73\x28\x72\x65\x74\x29\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\ -\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\ -\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\ -\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x72\x65\x74\x2c\x20\x73\x69\x7a\x65\x6f\ -\x66\x28\x72\x65\x74\x29\x2c\0\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\ -\x65\x74\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x6c\x33\x5f\x70\x72\x6f\x74\x6f\ -\x63\x6f\x6c\x20\x3d\x3d\x20\x30\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\ -\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x34\x20\x3d\x20\x31\x3b\0\x20\ -\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x69\x70\x68\x64\x72\ -\x20\x69\x70\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\ -\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\ -\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x30\ -\x2c\x20\x26\x69\x70\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x69\x70\x29\x2c\0\x20\ -\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\ -\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x66\x72\x61\x67\ -\x6d\x65\x6e\x74\x65\x64\x20\x3d\x20\x21\x21\x69\x70\x2e\x66\x72\x61\x67\x5f\ -\x6f\x66\x66\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\ -\x6e\x5f\x73\x72\x63\x20\x3d\x20\x69\x70\x2e\x73\x61\x64\x64\x72\x3b\0\x20\x20\ -\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x6e\x5f\x64\x73\x74\x20\ -\x3d\x20\x69\x70\x2e\x64\x61\x64\x64\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\ -\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\x20\x69\x70\x2e\x70\x72\ -\x6f\x74\x6f\x63\x6f\x6c\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x6c\x34\x5f\x6f\ -\x66\x66\x73\x65\x74\x20\x3d\x20\x69\x70\x2e\x69\x68\x6c\x20\x2a\x20\x34\x3b\0\ +\0\0\0\0\0\0\0\0\0\x47\x50\x4c\x20\x76\x32\0\0\x9f\xeb\x01\0\x18\0\0\0\0\0\0\0\ +\x58\x05\0\0\x58\x05\0\0\x85\x11\0\0\0\0\0\0\0\0\0\x02\x03\0\0\0\x01\0\0\0\0\0\ +\0\x01\x04\0\0\0\x20\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x02\ +\0\0\0\x05\0\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\x02\x06\0\0\0\0\0\ +\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\x02\x08\0\0\ +\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x0a\0\0\0\0\0\0\0\0\0\0\x02\ +\x0a\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x01\0\0\0\0\0\0\0\0\0\ +\0\x02\x0c\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\0\x04\0\0\0\0\0\ +\0\x05\0\0\x04\x28\0\0\0\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\x40\0\ +\0\0\x27\0\0\0\x07\0\0\0\x80\0\0\0\x32\0\0\0\x09\0\0\0\xc0\0\0\0\x3e\0\0\0\x0b\ +\0\0\0\0\x01\0\0\x48\0\0\0\0\0\0\x0e\x0d\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\x02\x10\ +\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x28\0\0\0\0\0\0\0\x05\0\0\ +\x04\x28\0\0\0\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\x40\0\0\0\x27\0\ +\0\0\x0f\0\0\0\x80\0\0\0\x32\0\0\0\x09\0\0\0\xc0\0\0\0\x3e\0\0\0\x0b\0\0\0\0\ +\x01\0\0\x63\0\0\0\0\0\0\x0e\x11\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\x02\x14\0\0\0\0\ +\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x80\0\0\0\0\0\0\0\x05\0\0\x04\x28\ +\0\0\0\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\x40\0\0\0\x27\0\0\0\x01\ +\0\0\0\x80\0\0\0\x32\0\0\0\x13\0\0\0\xc0\0\0\0\x3e\0\0\0\x0b\0\0\0\0\x01\0\0\ +\x7c\0\0\0\0\0\0\x0e\x15\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\x02\x18\0\0\0\x9a\0\0\0\ +\x22\0\0\x04\xc0\0\0\0\xa4\0\0\0\x19\0\0\0\0\0\0\0\xa8\0\0\0\x19\0\0\0\x20\0\0\ +\0\xb1\0\0\0\x19\0\0\0\x40\0\0\0\xb6\0\0\0\x19\0\0\0\x60\0\0\0\xc4\0\0\0\x19\0\ +\0\0\x80\0\0\0\xcd\0\0\0\x19\0\0\0\xa0\0\0\0\xda\0\0\0\x19\0\0\0\xc0\0\0\0\xe3\ +\0\0\0\x19\0\0\0\xe0\0\0\0\xee\0\0\0\x19\0\0\0\0\x01\0\0\xf7\0\0\0\x19\0\0\0\ +\x20\x01\0\0\x07\x01\0\0\x19\0\0\0\x40\x01\0\0\x0f\x01\0\0\x19\0\0\0\x60\x01\0\ +\0\x18\x01\0\0\x1b\0\0\0\x80\x01\0\0\x1b\x01\0\0\x19\0\0\0\x20\x02\0\0\x20\x01\ +\0\0\x19\0\0\0\x40\x02\0\0\x2b\x01\0\0\x19\0\0\0\x60\x02\0\0\x30\x01\0\0\x19\0\ +\0\0\x80\x02\0\0\x39\x01\0\0\x19\0\0\0\xa0\x02\0\0\x41\x01\0\0\x19\0\0\0\xc0\ +\x02\0\0\x48\x01\0\0\x19\0\0\0\xe0\x02\0\0\x53\x01\0\0\x19\0\0\0\0\x03\0\0\x5d\ +\x01\0\0\x1c\0\0\0\x20\x03\0\0\x68\x01\0\0\x1c\0\0\0\xa0\x03\0\0\x72\x01\0\0\ +\x19\0\0\0\x20\x04\0\0\x7e\x01\0\0\x19\0\0\0\x40\x04\0\0\x89\x01\0\0\x19\0\0\0\ +\x60\x04\0\0\0\0\0\0\x1d\0\0\0\x80\x04\0\0\x93\x01\0\0\x1f\0\0\0\xc0\x04\0\0\ +\x9a\x01\0\0\x19\0\0\0\0\x05\0\0\xa3\x01\0\0\x19\0\0\0\x20\x05\0\0\0\0\0\0\x21\ +\0\0\0\x40\x05\0\0\xac\x01\0\0\x19\0\0\0\x80\x05\0\0\xb5\x01\0\0\x23\0\0\0\xa0\ +\x05\0\0\xc1\x01\0\0\x1f\0\0\0\xc0\x05\0\0\xca\x01\0\0\0\0\0\x08\x1a\0\0\0\xd0\ +\x01\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x19\0\0\0\x04\ +\0\0\0\x05\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x19\0\0\0\x04\0\0\0\x04\0\0\0\0\0\0\ +\0\x01\0\0\x05\x08\0\0\0\xdd\x01\0\0\x1e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\x2c\0\ +\0\0\xe7\x01\0\0\0\0\0\x08\x20\0\0\0\xed\x01\0\0\0\0\0\x01\x08\0\0\0\x40\0\0\0\ +\0\0\0\0\x01\0\0\x05\x08\0\0\0\0\x02\0\0\x22\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\ +\x2d\0\0\0\x03\x02\0\0\0\0\0\x08\x24\0\0\0\x08\x02\0\0\0\0\0\x01\x01\0\0\0\x08\ +\0\0\0\0\0\0\0\x01\0\0\x0d\x02\0\0\0\x16\x02\0\0\x17\0\0\0\x1a\x02\0\0\x01\0\0\ +\x0c\x25\0\0\0\x52\x11\0\0\0\0\0\x01\x01\0\0\0\x08\0\0\x01\0\0\0\0\0\0\0\x03\0\ +\0\0\0\x27\0\0\0\x04\0\0\0\x07\0\0\0\x57\x11\0\0\0\0\0\x0e\x28\0\0\0\x01\0\0\0\ +\x60\x11\0\0\x03\0\0\x0f\0\0\0\0\x0e\0\0\0\0\0\0\0\x28\0\0\0\x12\0\0\0\0\0\0\0\ +\x28\0\0\0\x16\0\0\0\0\0\0\0\x28\0\0\0\x66\x11\0\0\x01\0\0\x0f\0\0\0\0\x29\0\0\ +\0\0\0\0\0\x07\0\0\0\x6e\x11\0\0\0\0\0\x07\0\0\0\0\x7c\x11\0\0\0\0\0\x07\0\0\0\ +\0\0\x69\x6e\x74\0\x5f\x5f\x41\x52\x52\x41\x59\x5f\x53\x49\x5a\x45\x5f\x54\x59\ +\x50\x45\x5f\x5f\0\x74\x79\x70\x65\0\x6b\x65\x79\x5f\x73\x69\x7a\x65\0\x76\x61\ +\x6c\x75\x65\x5f\x73\x69\x7a\x65\0\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\ +\0\x6d\x61\x70\x5f\x66\x6c\x61\x67\x73\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\ +\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\x75\x72\x61\x74\x69\x6f\x6e\x73\0\x74\x61\ +\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\x5f\ +\x6b\x65\x79\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x69\x6e\x64\x69\ +\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\x62\x6c\x65\0\x5f\x5f\x73\x6b\x5f\x62\ +\x75\x66\x66\0\x6c\x65\x6e\0\x70\x6b\x74\x5f\x74\x79\x70\x65\0\x6d\x61\x72\x6b\ +\0\x71\x75\x65\x75\x65\x5f\x6d\x61\x70\x70\x69\x6e\x67\0\x70\x72\x6f\x74\x6f\ +\x63\x6f\x6c\0\x76\x6c\x61\x6e\x5f\x70\x72\x65\x73\x65\x6e\x74\0\x76\x6c\x61\ +\x6e\x5f\x74\x63\x69\0\x76\x6c\x61\x6e\x5f\x70\x72\x6f\x74\x6f\0\x70\x72\x69\ +\x6f\x72\x69\x74\x79\0\x69\x6e\x67\x72\x65\x73\x73\x5f\x69\x66\x69\x6e\x64\x65\ +\x78\0\x69\x66\x69\x6e\x64\x65\x78\0\x74\x63\x5f\x69\x6e\x64\x65\x78\0\x63\x62\ +\0\x68\x61\x73\x68\0\x74\x63\x5f\x63\x6c\x61\x73\x73\x69\x64\0\x64\x61\x74\x61\ +\0\x64\x61\x74\x61\x5f\x65\x6e\x64\0\x6e\x61\x70\x69\x5f\x69\x64\0\x66\x61\x6d\ +\x69\x6c\x79\0\x72\x65\x6d\x6f\x74\x65\x5f\x69\x70\x34\0\x6c\x6f\x63\x61\x6c\ +\x5f\x69\x70\x34\0\x72\x65\x6d\x6f\x74\x65\x5f\x69\x70\x36\0\x6c\x6f\x63\x61\ +\x6c\x5f\x69\x70\x36\0\x72\x65\x6d\x6f\x74\x65\x5f\x70\x6f\x72\x74\0\x6c\x6f\ +\x63\x61\x6c\x5f\x70\x6f\x72\x74\0\x64\x61\x74\x61\x5f\x6d\x65\x74\x61\0\x74\ +\x73\x74\x61\x6d\x70\0\x77\x69\x72\x65\x5f\x6c\x65\x6e\0\x67\x73\x6f\x5f\x73\ +\x65\x67\x73\0\x67\x73\x6f\x5f\x73\x69\x7a\x65\0\x74\x73\x74\x61\x6d\x70\x5f\ +\x74\x79\x70\x65\0\x68\x77\x74\x73\x74\x61\x6d\x70\0\x5f\x5f\x75\x33\x32\0\x75\ +\x6e\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\x74\0\x66\x6c\x6f\x77\x5f\x6b\x65\x79\ +\x73\0\x5f\x5f\x75\x36\x34\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x6c\x6f\x6e\ +\x67\x20\x6c\x6f\x6e\x67\0\x73\x6b\0\x5f\x5f\x75\x38\0\x75\x6e\x73\x69\x67\x6e\ +\x65\x64\x20\x63\x68\x61\x72\0\x73\x6b\x62\0\x74\x75\x6e\x5f\x72\x73\x73\x5f\ +\x73\x74\x65\x65\x72\x69\x6e\x67\x5f\x70\x72\x6f\x67\0\x73\x6f\x63\x6b\x65\x74\ +\0\x2f\x68\x6f\x6d\x65\x2f\x61\x6e\x64\x2f\x53\x52\x43\x53\x2f\x71\x65\x6d\x75\ +\x2f\x74\x6f\x6f\x6c\x73\x2f\x65\x62\x70\x66\x2f\x72\x73\x73\x2e\x62\x70\x66\ +\x2e\x63\0\x69\x6e\x74\x20\x74\x75\x6e\x5f\x72\x73\x73\x5f\x73\x74\x65\x65\x72\ +\x69\x6e\x67\x5f\x70\x72\x6f\x67\x28\x73\x74\x72\x75\x63\x74\x20\x5f\x5f\x73\ +\x6b\x5f\x62\x75\x66\x66\x20\x2a\x73\x6b\x62\x29\0\x20\x20\x20\x20\x5f\x5f\x75\ +\x33\x32\x20\x6b\x65\x79\x20\x3d\x20\x30\x3b\0\x20\x20\x20\x20\x63\x6f\x6e\x66\ +\x69\x67\x20\x3d\x20\x62\x70\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\x6b\x75\x70\ +\x5f\x65\x6c\x65\x6d\x28\x26\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\ +\x63\x6f\x6e\x66\x69\x67\x75\x72\x61\x74\x69\x6f\x6e\x73\x2c\x20\x26\x6b\x65\ +\x79\x29\x3b\0\x20\x20\x20\x20\x74\x6f\x65\x20\x3d\x20\x62\x70\x66\x5f\x6d\x61\ +\x70\x5f\x6c\x6f\x6f\x6b\x75\x70\x5f\x65\x6c\x65\x6d\x28\x26\x74\x61\x70\x5f\ +\x72\x73\x73\x5f\x6d\x61\x70\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\x5f\x6b\x65\ +\x79\x2c\x20\x26\x6b\x65\x79\x29\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x63\x6f\ +\x6e\x66\x69\x67\x20\x26\x26\x20\x74\x6f\x65\x29\x20\x7b\0\x20\x20\x20\x20\x20\ +\x20\x20\x20\x69\x66\x20\x28\x21\x63\x6f\x6e\x66\x69\x67\x2d\x3e\x72\x65\x64\ +\x69\x72\x65\x63\x74\x29\x20\x7b\0\x20\x20\x20\x20\x5f\x5f\x75\x38\x20\x72\x73\ +\x73\x5f\x69\x6e\x70\x75\x74\x5b\x48\x41\x53\x48\x5f\x43\x41\x4c\x43\x55\x4c\ +\x41\x54\x49\x4f\x4e\x5f\x42\x55\x46\x46\x45\x52\x5f\x53\x49\x5a\x45\x5d\x20\ +\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x70\x61\x63\ +\x6b\x65\x74\x5f\x68\x61\x73\x68\x5f\x69\x6e\x66\x6f\x5f\x74\x20\x70\x61\x63\ +\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x69\ +\x66\x20\x28\x21\x69\x6e\x66\x6f\x20\x7c\x7c\x20\x21\x73\x6b\x62\x29\x20\x7b\0\ +\x20\x20\x20\x20\x5f\x5f\x62\x65\x31\x36\x20\x72\x65\x74\x20\x3d\x20\x30\x3b\0\ +\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\ +\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\ +\x73\x6b\x62\x2c\x20\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x72\x65\x74\x2c\x20\ +\x73\x69\x7a\x65\x6f\x66\x28\x72\x65\x74\x29\x2c\0\x20\x20\x20\x20\x69\x66\x20\ +\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\x20\x20\x73\x77\x69\x74\x63\x68\x20\x28\ +\x62\x70\x66\x5f\x6e\x74\x6f\x68\x73\x28\x72\x65\x74\x29\x29\x20\x7b\0\x20\x20\ +\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\ +\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\ +\x65\x28\x73\x6b\x62\x2c\x20\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x72\x65\x74\ +\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x72\x65\x74\x29\x2c\0\x20\x20\x20\x20\x72\ +\x65\x74\x75\x72\x6e\x20\x72\x65\x74\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x6c\ +\x33\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\x3d\x20\x30\x29\x20\x7b\0\x20\ +\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\ +\x34\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\ +\x74\x20\x69\x70\x68\x64\x72\x20\x69\x70\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\ +\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\ +\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\ +\x28\x73\x6b\x62\x2c\x20\x30\x2c\x20\x26\x69\x70\x2c\x20\x73\x69\x7a\x65\x6f\ +\x66\x28\x69\x70\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x65\ +\x72\x72\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\ +\x69\x73\x5f\x66\x72\x61\x67\x6d\x65\x6e\x74\x65\x64\x20\x3d\x20\x21\x21\x28\ +\x62\x70\x66\x5f\x6e\x74\x6f\x68\x73\x28\x69\x70\x2e\x66\x72\x61\x67\x5f\x6f\ +\x66\x66\x29\x20\x26\x20\x28\x30\x78\x32\x30\x30\x30\x20\x7c\x20\x30\x78\x31\ +\x66\x66\x66\x29\x29\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\ +\x3e\x69\x6e\x5f\x73\x72\x63\x20\x3d\x20\x69\x70\x2e\x73\x61\x64\x64\x72\x3b\0\ +\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x6e\x5f\x64\x73\ +\x74\x20\x3d\x20\x69\x70\x2e\x64\x61\x64\x64\x72\x3b\0\x20\x20\x20\x20\x20\x20\ +\x20\x20\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\x20\x69\x70\x2e\ +\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x6c\x34\ +\x5f\x6f\x66\x66\x73\x65\x74\x20\x3d\x20\x69\x70\x2e\x69\x68\x6c\x20\x2a\x20\ +\x34\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\ +\x6f\x6c\x20\x21\x3d\x20\x30\x20\x26\x26\x20\x21\x69\x6e\x66\x6f\x2d\x3e\x69\ +\x73\x5f\x66\x72\x61\x67\x6d\x65\x6e\x74\x65\x64\x29\x20\x7b\0\x20\x20\x20\x20\ \x20\x20\x20\x20\x69\x66\x20\x28\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\ -\x20\x21\x3d\x20\x30\x20\x26\x26\x20\x21\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\ -\x66\x72\x61\x67\x6d\x65\x6e\x74\x65\x64\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\ -\x20\x20\x69\x66\x20\x28\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\ -\x3d\x20\x49\x50\x50\x52\x4f\x54\x4f\x5f\x54\x43\x50\x29\x20\x7b\0\x20\x20\x20\ -\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x74\ -\x63\x70\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x73\x74\x72\x75\x63\x74\x20\x74\x63\x70\x68\x64\x72\x20\x74\x63\x70\x20\x3d\ -\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\ -\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\ -\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x6c\x34\ -\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x74\x63\x70\x2c\x20\x73\x69\x7a\x65\ -\x6f\x66\x28\x74\x63\x70\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\x20\x20\x69\x66\x20\x28\ -\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\x76\x34\ -\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x70\x61\x63\x6b\ -\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x74\x63\x70\x20\x26\x26\0\x20\x20\ -\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x36\ -\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\ -\x20\x69\x70\x76\x36\x68\x64\x72\x20\x69\x70\x36\x20\x3d\x20\x7b\x7d\x3b\0\x20\ -\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\ -\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\ -\x76\x65\x28\x73\x6b\x62\x2c\x20\x30\x2c\x20\x26\x69\x70\x36\x2c\x20\x73\x69\ -\x7a\x65\x6f\x66\x28\x69\x70\x36\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\ -\x6e\x66\x6f\x2d\x3e\x69\x6e\x36\x5f\x73\x72\x63\x20\x3d\x20\x69\x70\x36\x2e\ -\x73\x61\x64\x64\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\ -\x3e\x69\x6e\x36\x5f\x64\x73\x74\x20\x3d\x20\x69\x70\x36\x2e\x64\x61\x64\x64\ -\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\ -\x6f\x6c\x20\x3d\x20\x69\x70\x36\x2e\x6e\x65\x78\x74\x68\x64\x72\x3b\0\x20\x20\ -\x20\x20\x73\x77\x69\x74\x63\x68\x20\x28\x68\x64\x72\x5f\x74\x79\x70\x65\x29\ -\x20\x7b\0\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x69\x70\x76\x36\x5f\x6f\ -\x70\x74\x5f\x68\x64\x72\x20\x65\x78\x74\x5f\x68\x64\x72\x20\x3d\x20\x7b\x7d\ -\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\ -\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\ -\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\ -\x74\x2c\x20\x26\x65\x78\x74\x5f\x68\x64\x72\x2c\0\x20\x20\x20\x20\x20\x20\x20\ -\x20\x69\x66\x20\x28\x2a\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\ -\x3d\x20\x49\x50\x50\x52\x4f\x54\x4f\x5f\x52\x4f\x55\x54\x49\x4e\x47\x29\x20\ -\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\ -\x20\x69\x70\x76\x36\x5f\x72\x74\x5f\x68\x64\x72\x20\x65\x78\x74\x5f\x72\x74\ +\x20\x3d\x3d\x20\x49\x50\x50\x52\x4f\x54\x4f\x5f\x54\x43\x50\x29\x20\x7b\0\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\ +\x5f\x74\x63\x70\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x73\x74\x72\x75\x63\x74\x20\x74\x63\x70\x68\x64\x72\x20\x74\x63\x70\ \x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\ \x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\ \x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\ -\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x65\x78\x74\x5f\x72\x74\ -\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x28\x65\ -\x78\x74\x5f\x72\x74\x2e\x74\x79\x70\x65\x20\x3d\x3d\x20\x49\x50\x56\x36\x5f\ -\x53\x52\x43\x52\x54\x5f\x54\x59\x50\x45\x5f\x32\x29\x20\x26\x26\0\x20\x20\x20\ -\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x2a\x6c\ -\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x6f\x66\x66\x73\x65\x74\x6f\x66\ -\x28\x73\x74\x72\x75\x63\x74\x20\x72\x74\x32\x5f\x68\x64\x72\x2c\x20\x61\x64\ -\x64\x72\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x74\x63\x70\x2c\x20\x73\x69\ +\x7a\x65\x6f\x66\x28\x74\x63\x70\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\x20\x20\x69\x66\ +\x20\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\ +\x76\x34\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x70\x61\ +\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x74\x63\x70\x20\x26\x26\0\ +\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\ +\x76\x36\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\ +\x63\x74\x20\x69\x70\x76\x36\x68\x64\x72\x20\x69\x70\x36\x20\x3d\x20\x7b\x7d\ +\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\ +\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\ +\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x30\x2c\x20\x26\x69\x70\x36\x2c\x20\ +\x73\x69\x7a\x65\x6f\x66\x28\x69\x70\x36\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\ +\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x6e\x36\x5f\x73\x72\x63\x20\x3d\x20\x69\x70\ +\x36\x2e\x73\x61\x64\x64\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\ +\x6f\x2d\x3e\x69\x6e\x36\x5f\x64\x73\x74\x20\x3d\x20\x69\x70\x36\x2e\x64\x61\ +\x64\x64\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x6c\x34\x5f\x70\x72\x6f\x74\ +\x6f\x63\x6f\x6c\x20\x3d\x20\x69\x70\x36\x2e\x6e\x65\x78\x74\x68\x64\x72\x3b\0\ +\x20\x20\x20\x20\x73\x77\x69\x74\x63\x68\x20\x28\x68\x64\x72\x5f\x74\x79\x70\ +\x65\x29\x20\x7b\0\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x69\x70\x76\x36\ +\x5f\x6f\x70\x74\x5f\x68\x64\x72\x20\x65\x78\x74\x5f\x68\x64\x72\x20\x3d\x20\ +\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\ +\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\ +\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x2a\x6c\x34\x5f\x6f\x66\x66\ +\x73\x65\x74\x2c\x20\x26\x65\x78\x74\x5f\x68\x64\x72\x2c\0\x20\x20\x20\x20\x20\ +\x20\x20\x20\x69\x66\x20\x28\x2a\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\ +\x20\x3d\x3d\x20\x49\x50\x50\x52\x4f\x54\x4f\x5f\x52\x4f\x55\x54\x49\x4e\x47\ +\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\ +\x63\x74\x20\x69\x70\x76\x36\x5f\x72\x74\x5f\x68\x64\x72\x20\x65\x78\x74\x5f\ +\x72\x74\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\ \x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\ -\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\ -\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x36\x5f\ -\x65\x78\x74\x5f\x64\x73\x74\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x20\x20\x20\x7d\x20\x5f\x5f\x61\x74\x74\x72\x69\x62\x75\x74\x65\x5f\ -\x5f\x28\x28\x70\x61\x63\x6b\x65\x64\x29\x29\x20\x6f\x70\x74\x20\x3d\x20\x7b\ -\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6f\ -\x70\x74\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x3d\x20\x28\x6f\x70\x74\x2e\x74\ -\x79\x70\x65\x20\x3d\x3d\x20\x49\x50\x56\x36\x5f\x54\x4c\x56\x5f\x50\x41\x44\ -\x31\x29\x20\x3f\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x69\x66\x20\x28\x6f\x70\x74\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x31\ -\x20\x3e\x3d\x20\x65\x78\x74\x5f\x68\x64\x72\x2e\x68\x64\x72\x6c\x65\x6e\x20\ -\x2a\x20\x38\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x2c\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x65\x78\x74\x5f\ +\x72\x74\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\ +\x28\x65\x78\x74\x5f\x72\x74\x2e\x74\x79\x70\x65\x20\x3d\x3d\x20\x49\x50\x56\ +\x36\x5f\x53\x52\x43\x52\x54\x5f\x54\x59\x50\x45\x5f\x32\x29\x20\x26\x26\0\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x6f\x66\x66\x73\x65\x74\ +\x6f\x66\x28\x73\x74\x72\x75\x63\x74\x20\x72\x74\x32\x5f\x68\x64\x72\x2c\x20\ +\x61\x64\x64\x72\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\ \x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\ -\x6b\x62\x2c\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x6f\x70\ -\x74\x5f\x6f\x66\x66\x73\x65\x74\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x6f\x70\x74\x2e\x74\x79\x70\x65\x20\ -\x3d\x3d\x20\x49\x50\x56\x36\x5f\x54\x4c\x56\x5f\x48\x41\x4f\x29\x20\x7b\0\x20\ -\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x20\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x6f\x70\ -\x74\x5f\x6f\x66\x66\x73\x65\x74\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\ -\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\ -\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\ +\x6b\x62\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\ +\x36\x5f\x65\x78\x74\x5f\x64\x73\x74\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x7d\x20\x5f\x5f\x61\x74\x74\x72\x69\x62\x75\x74\ +\x65\x5f\x5f\x28\x28\x70\x61\x63\x6b\x65\x64\x29\x29\x20\x6f\x70\x74\x20\x3d\ +\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x6f\x70\x74\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x3d\x20\x28\x6f\x70\x74\ +\x2e\x74\x79\x70\x65\x20\x3d\x3d\x20\x49\x50\x56\x36\x5f\x54\x4c\x56\x5f\x50\ +\x41\x44\x31\x29\x20\x3f\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x69\x66\x20\x28\x6f\x70\x74\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\ +\x20\x31\x20\x3e\x3d\x20\x65\x78\x74\x5f\x68\x64\x72\x2e\x68\x64\x72\x6c\x65\ +\x6e\x20\x2a\x20\x38\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\ +\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\ +\x28\x73\x6b\x62\x2c\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\ +\x6f\x70\x74\x5f\x6f\x66\x66\x73\x65\x74\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x6f\x70\x74\x2e\x74\x79\x70\ +\x65\x20\x3d\x3d\x20\x49\x50\x56\x36\x5f\x54\x4c\x56\x5f\x48\x41\x4f\x29\x20\ \x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x36\x5f\x65\x78\x74\ -\x5f\x73\x72\x63\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x66\x72\x61\x67\x6d\x65\x6e\x74\ -\x65\x64\x20\x3d\x20\x74\x72\x75\x65\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x2a\ -\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x3d\x20\x28\x65\x78\x74\x5f\x68\ -\x64\x72\x2e\x68\x64\x72\x6c\x65\x6e\x20\x2b\x20\x31\x29\x20\x2a\x20\x38\x3b\0\ -\x20\x20\x20\x20\x20\x20\x20\x20\x2a\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\ -\x6c\x20\x3d\x20\x65\x78\x74\x5f\x68\x64\x72\x2e\x6e\x65\x78\x74\x68\x64\x72\ -\x3b\0\x20\x20\x20\x20\x66\x6f\x72\x20\x28\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\ -\x69\x6e\x74\x20\x69\x20\x3d\x20\x30\x3b\x20\x69\x20\x3c\x20\x49\x50\x36\x5f\ -\x45\x58\x54\x45\x4e\x53\x49\x4f\x4e\x53\x5f\x43\x4f\x55\x4e\x54\x3b\x20\x2b\ -\x2b\x69\x29\x20\x7b\0\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\x66\x20\ -\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\x76\ -\x36\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\ -\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\x76\ -\x36\x5f\x65\x78\x74\x5f\x64\x73\x74\x20\x26\x26\0\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x20\x20\x20\x69\x66\x20\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\ -\x6f\x2e\x69\x73\x5f\x69\x70\x76\x36\x5f\x65\x78\x74\x5f\x73\x72\x63\x20\x26\ -\x26\0\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\x66\x20\ -\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x75\x64\x70\ +\x20\x20\x20\x20\x20\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\ +\x6f\x70\x74\x5f\x6f\x66\x66\x73\x65\x74\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\ +\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\ +\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\ +\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x36\x5f\x65\ +\x78\x74\x5f\x73\x72\x63\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x66\x72\x61\x67\x6d\x65\ +\x6e\x74\x65\x64\x20\x3d\x20\x74\x72\x75\x65\x3b\0\x20\x20\x20\x20\x20\x20\x20\ +\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x3d\x20\x28\x65\x78\x74\ +\x5f\x68\x64\x72\x2e\x68\x64\x72\x6c\x65\x6e\x20\x2b\x20\x31\x29\x20\x2a\x20\ +\x38\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x2a\x6c\x34\x5f\x70\x72\x6f\x74\x6f\ +\x63\x6f\x6c\x20\x3d\x20\x65\x78\x74\x5f\x68\x64\x72\x2e\x6e\x65\x78\x74\x68\ +\x64\x72\x3b\0\x20\x20\x20\x20\x66\x6f\x72\x20\x28\x75\x6e\x73\x69\x67\x6e\x65\ +\x64\x20\x69\x6e\x74\x20\x69\x20\x3d\x20\x30\x3b\x20\x69\x20\x3c\x20\x49\x50\ +\x36\x5f\x45\x58\x54\x45\x4e\x53\x49\x4f\x4e\x53\x5f\x43\x4f\x55\x4e\x54\x3b\ +\x20\x2b\x2b\x69\x29\x20\x7b\0\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\ +\x66\x20\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\ +\x70\x76\x36\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\ +\x66\x20\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\ +\x70\x76\x36\x5f\x65\x78\x74\x5f\x64\x73\x74\x20\x26\x26\0\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\ +\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\x76\x36\x5f\x65\x78\x74\x5f\x73\x72\x63\ \x20\x26\x26\0\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\ -\x66\x20\x28\x63\x6f\x6e\x66\x69\x67\x2d\x3e\x68\x61\x73\x68\x5f\x74\x79\x70\ -\x65\x73\x20\x26\x20\x56\x49\x52\x54\x49\x4f\x5f\x4e\x45\x54\x5f\x52\x53\x53\ -\x5f\x48\x41\x53\x48\x5f\x54\x59\x50\x45\x5f\x49\x50\x76\x34\x29\x20\x7b\0\x20\ -\x20\x20\x20\x5f\x5f\x62\x75\x69\x6c\x74\x69\x6e\x5f\x6d\x65\x6d\x63\x70\x79\ -\x28\x26\x72\x73\x73\x5f\x69\x6e\x70\x75\x74\x5b\x2a\x62\x79\x74\x65\x73\x5f\ -\x77\x72\x69\x74\x74\x65\x6e\x5d\x2c\x20\x70\x74\x72\x2c\x20\x73\x69\x7a\x65\ -\x29\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\ -\x3e\x69\x73\x5f\x75\x64\x70\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x75\x64\x70\x68\x64\x72\x20\ -\x75\x64\x70\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\ -\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\ -\x62\x2c\x20\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x75\x64\x70\x2c\ -\x20\x73\x69\x7a\x65\x6f\x66\x28\x75\x64\x70\x29\x2c\0\x20\x20\x20\x20\x20\x20\ -\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\x66\x20\x28\x63\x6f\x6e\x66\x69\x67\ -\x2d\x3e\x68\x61\x73\x68\x5f\x74\x79\x70\x65\x73\x20\x26\x20\x56\x49\x52\x54\ -\x49\x4f\x5f\x4e\x45\x54\x5f\x52\x53\x53\x5f\x48\x41\x53\x48\x5f\x54\x59\x50\ -\x45\x5f\x49\x50\x76\x36\x29\x20\x7b\0\x20\x20\x20\x20\x66\x6f\x72\x20\x28\x62\ -\x79\x74\x65\x20\x3d\x20\x30\x3b\x20\x62\x79\x74\x65\x20\x3c\x20\x48\x41\x53\ -\x48\x5f\x43\x41\x4c\x43\x55\x4c\x41\x54\x49\x4f\x4e\x5f\x42\x55\x46\x46\x45\ -\x52\x5f\x53\x49\x5a\x45\x3b\x20\x62\x79\x74\x65\x2b\x2b\x29\x20\x7b\0\x20\x20\ -\x20\x20\x5f\x5f\x75\x33\x32\x20\x6c\x65\x66\x74\x6d\x6f\x73\x74\x5f\x33\x32\ -\x5f\x62\x69\x74\x73\x20\x3d\x20\x6b\x65\x79\x2d\x3e\x6c\x65\x66\x74\x6d\x6f\ -\x73\x74\x5f\x33\x32\x5f\x62\x69\x74\x73\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\ -\x5f\x5f\x75\x38\x20\x69\x6e\x70\x75\x74\x5f\x62\x79\x74\x65\x20\x3d\x20\x69\ -\x6e\x70\x75\x74\x5b\x62\x79\x74\x65\x5d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x20\x20\x69\x66\x20\x28\x69\x6e\x70\x75\x74\x5f\x62\x79\x74\x65\x20\ -\x26\x20\x28\x31\x20\x3c\x3c\x20\x37\x29\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\ -\x20\x20\x5f\x5f\x75\x38\x20\x6b\x65\x79\x5f\x62\x79\x74\x65\x20\x3d\x20\x6b\ -\x65\x79\x2d\x3e\x6e\x65\x78\x74\x5f\x62\x79\x74\x65\x5b\x62\x79\x74\x65\x5d\ -\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x28\x6c\x65\x66\x74\x6d\x6f\x73\x74\x5f\x33\x32\x5f\x62\x69\x74\x73\ -\x20\x3c\x3c\x20\x31\x29\x20\x7c\x20\x28\x28\x6b\x65\x79\x5f\x62\x79\x74\x65\ -\x20\x26\x20\x28\x31\x20\x3c\x3c\x20\x37\x29\x29\x20\x3e\x3e\x20\x37\x29\x3b\0\ -\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x68\x61\x73\x68\x29\x20\x7b\0\ -\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x5f\x5f\x75\x33\x32\x20\x74\ -\x61\x62\x6c\x65\x5f\x69\x64\x78\x20\x3d\x20\x68\x61\x73\x68\x20\x25\x20\x63\ -\x6f\x6e\x66\x69\x67\x2d\x3e\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x73\ -\x5f\x6c\x65\x6e\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x71\x75\ -\x65\x75\x65\x20\x3d\x20\x62\x70\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\x6b\x75\ -\x70\x5f\x65\x6c\x65\x6d\x28\x26\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\ -\x5f\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\x62\x6c\x65\x2c\0\ -\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x71\x75\x65\ -\x75\x65\x29\x20\x7b\0\x7d\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ -\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x2a\x71\x75\x65\x75\x65\x3b\0\x63\ -\x68\x61\x72\0\x5f\x6c\x69\x63\x65\x6e\x73\x65\0\x2e\x6d\x61\x70\x73\0\x6c\x69\ -\x63\x65\x6e\x73\x65\0\x62\x70\x66\x5f\x66\x6c\x6f\x77\x5f\x6b\x65\x79\x73\0\ -\x62\x70\x66\x5f\x73\x6f\x63\x6b\0\0\0\0\x9f\xeb\x01\0\x20\0\0\0\0\0\0\0\x14\0\ -\0\0\x14\0\0\0\x6c\x0c\0\0\x80\x0c\0\0\0\0\0\0\x08\0\0\0\x26\x02\0\0\x01\0\0\0\ -\0\0\0\0\x24\0\0\0\x10\0\0\0\x26\x02\0\0\xc6\0\0\0\0\0\0\0\x37\x02\0\0\x61\x02\ -\0\0\0\x50\x08\0\x10\0\0\0\x37\x02\0\0\x92\x02\0\0\x0b\x68\x08\0\x20\0\0\0\x37\ -\x02\0\0\0\0\0\0\0\0\0\0\x28\0\0\0\x37\x02\0\0\xa5\x02\0\0\x0e\x74\x08\0\x50\0\ -\0\0\x37\x02\0\0\xea\x02\0\0\x0b\x78\x08\0\x88\0\0\0\x37\x02\0\0\x2a\x03\0\0\ -\x10\x80\x08\0\x90\0\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x98\0\0\0\x37\x02\0\0\x2a\ -\x03\0\0\x10\x80\x08\0\xa0\0\0\0\x37\x02\0\0\x43\x03\0\0\x16\x84\x08\0\xa8\0\0\ -\0\x37\x02\0\0\x43\x03\0\0\x0d\x84\x08\0\xc0\0\0\0\x37\x02\0\0\x64\x03\0\0\x0a\ -\xfc\x05\0\xe8\0\0\0\x37\x02\0\0\x9b\x03\0\0\x1f\x0c\x06\0\x38\x01\0\0\x37\x02\ -\0\0\xcb\x03\0\0\x0f\xa0\x04\0\x40\x01\0\0\x37\x02\0\0\xe4\x03\0\0\x0c\x20\x04\ -\0\x50\x01\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x58\x01\0\0\x37\x02\0\0\xf8\x03\0\0\ -\x0b\x2c\x04\0\x90\x01\0\0\x37\x02\0\0\x3e\x04\0\0\x09\x34\x04\0\xa0\x01\0\0\ -\x37\x02\0\0\x4d\x04\0\0\x0d\x44\x04\0\xb8\x01\0\0\x37\x02\0\0\x4d\x04\0\0\x05\ -\x44\x04\0\xd8\x01\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\xe0\x01\0\0\x37\x02\0\0\x6b\ -\x04\0\0\x0f\x58\x04\0\x10\x02\0\0\x37\x02\0\0\x3e\x04\0\0\x09\x70\x04\0\x18\ -\x02\0\0\x37\x02\0\0\xb5\x04\0\0\x0c\x80\x04\0\x20\x02\0\0\x37\x02\0\0\xc5\x04\ -\0\0\x09\xbc\x04\0\x50\x02\0\0\x37\x02\0\0\xe1\x04\0\0\x17\xd4\x04\0\x60\x02\0\ -\0\x37\x02\0\0\xfc\x04\0\0\x16\xdc\x04\0\x80\x02\0\0\x37\x02\0\0\xe1\x04\0\0\ -\x17\xd4\x04\0\x88\x02\0\0\x37\x02\0\0\x1a\x05\0\0\x0f\xe0\x04\0\xc0\x02\0\0\ -\x37\x02\0\0\x5d\x05\0\0\x0d\xe8\x04\0\xc8\x02\0\0\x37\x02\0\0\x70\x05\0\0\x24\ -\0\x05\0\xd0\x02\0\0\x37\x02\0\0\x70\x05\0\0\x20\0\x05\0\xe0\x02\0\0\x37\x02\0\ -\0\x9d\x05\0\0\x1b\xf8\x04\0\xe8\x02\0\0\x37\x02\0\0\x9d\x05\0\0\x16\xf8\x04\0\ -\xf0\x02\0\0\x37\x02\0\0\xbe\x05\0\0\x1b\xfc\x04\0\xf8\x02\0\0\x37\x02\0\0\xbe\ -\x05\0\0\x16\xfc\x04\0\0\x03\0\0\x37\x02\0\0\xdf\x05\0\0\x1a\x08\x05\0\x08\x03\ -\0\0\x37\x02\0\0\x70\x05\0\0\x1d\0\x05\0\x10\x03\0\0\x37\x02\0\0\x02\x06\0\0\ -\x18\x0c\x05\0\x18\x03\0\0\x37\x02\0\0\x02\x06\0\0\x1c\x0c\x05\0\x30\x03\0\0\ -\x37\x02\0\0\x22\x06\0\0\x15\x68\x05\0\x40\x03\0\0\x37\x02\0\0\x22\x06\0\0\x1a\ -\x68\x05\0\x58\x03\0\0\x37\x02\0\0\x56\x06\0\0\x0d\x6c\x05\0\x78\x03\0\0\x37\ -\x02\0\0\x80\x06\0\0\x1a\x70\x05\0\x88\x03\0\0\x37\x02\0\0\x9e\x06\0\0\x1b\x78\ -\x05\0\xa8\x03\0\0\x37\x02\0\0\x80\x06\0\0\x1a\x70\x05\0\xb0\x03\0\0\x37\x02\0\ -\0\xc2\x06\0\0\x13\x7c\x05\0\xe8\x03\0\0\x37\x02\0\0\x13\x07\0\0\x11\x84\x05\0\ -\xf0\x03\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x10\x04\0\0\x37\x02\0\0\x2a\x07\0\0\ -\x15\x28\x06\0\x18\x04\0\0\x37\x02\0\0\x2a\x07\0\0\x09\x28\x06\0\x20\x04\0\0\ -\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x04\0\0\x37\x02\0\0\x49\x07\0\0\x19\x2c\x06\0\ -\x80\x04\0\0\x37\x02\0\0\x49\x07\0\0\x20\x2c\x06\0\xa0\x04\0\0\x37\x02\0\0\0\0\ -\0\0\0\0\0\0\xf0\x04\0\0\x37\x02\0\0\x6b\x07\0\0\x17\x14\x05\0\0\x05\0\0\x37\ -\x02\0\0\x86\x07\0\0\x18\x1c\x05\0\x30\x05\0\0\x37\x02\0\0\x6b\x07\0\0\x17\x14\ -\x05\0\x48\x05\0\0\x37\x02\0\0\xa7\x07\0\0\x0f\x20\x05\0\x80\x05\0\0\x37\x02\0\ -\0\x5d\x05\0\0\x0d\x28\x05\0\x88\x05\0\0\x37\x02\0\0\xec\x07\0\0\x1d\x38\x05\0\ -\xc8\x05\0\0\x37\x02\0\0\x0f\x08\0\0\x1d\x3c\x05\0\x08\x06\0\0\x37\x02\0\0\x32\ -\x08\0\0\x1b\x44\x05\0\x10\x06\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\x58\ -\x06\0\0\x37\x02\0\0\x6d\x08\0\0\x19\xb8\x02\0\xd0\x06\0\0\x37\x02\0\0\0\0\0\0\ -\0\0\0\0\xd8\x06\0\0\x37\x02\0\0\x93\x08\0\0\x0f\xc8\x02\0\x10\x07\0\0\x37\x02\ -\0\0\x5d\x05\0\0\x0d\xd0\x02\0\x20\x07\0\0\x37\x02\0\0\xd8\x08\0\0\x0d\xe0\x02\ -\0\x40\x07\0\0\x37\x02\0\0\x07\x09\0\0\x20\xe4\x02\0\x68\x07\0\0\x37\x02\0\0\ -\x33\x09\0\0\x13\xec\x02\0\xa8\x07\0\0\x37\x02\0\0\x13\x07\0\0\x11\xf4\x02\0\ -\xb0\x07\0\0\x37\x02\0\0\x7b\x09\0\0\x19\x04\x03\0\xb8\x07\0\0\x37\x02\0\0\x7b\ -\x09\0\0\x34\x04\x03\0\xe0\x07\0\0\x37\x02\0\0\xb1\x09\0\0\x15\x18\x03\0\xf0\ -\x07\0\0\x37\x02\0\0\xf2\x09\0\0\x17\x14\x03\0\x30\x08\0\0\x37\x02\0\0\x29\x0a\ -\0\0\x15\x24\x03\0\x38\x08\0\0\x37\x02\0\0\x44\x0a\0\0\x27\x34\x03\0\x70\x08\0\ -\0\x37\x02\0\0\x6f\x0a\0\0\x27\x50\x03\0\x80\x08\0\0\x37\x02\0\0\x9f\x0a\0\0\ -\x1c\xb4\x03\0\x88\x08\0\0\x37\x02\0\0\xdb\x0a\0\0\x20\xc0\x03\0\x98\x08\0\0\ -\x37\x02\0\0\xdb\x0a\0\0\x2f\xc0\x03\0\xa0\x08\0\0\x37\x02\0\0\xdb\x0a\0\0\x36\ -\xc0\x03\0\xa8\x08\0\0\x37\x02\0\0\xdb\x0a\0\0\x15\xc0\x03\0\x18\x09\0\0\x37\ -\x02\0\0\x17\x0b\0\0\x43\x64\x03\0\x38\x09\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x40\ -\x09\0\0\x37\x02\0\0\x17\x0b\0\0\x17\x64\x03\0\x80\x09\0\0\x37\x02\0\0\x29\x0a\ -\0\0\x15\x6c\x03\0\x88\x09\0\0\x37\x02\0\0\x67\x0b\0\0\x19\x7c\x03\0\x90\x09\0\ -\0\x37\x02\0\0\x67\x0b\0\0\x15\x7c\x03\0\x98\x09\0\0\x37\x02\0\0\x97\x0b\0\0\ -\x19\x84\x03\0\xa0\x09\0\0\x37\x02\0\0\xc7\x0b\0\0\x1b\x80\x03\0\xe8\x09\0\0\ -\x37\x02\0\0\x02\x0c\0\0\x19\x94\x03\0\xf0\x09\0\0\x37\x02\0\0\x21\x0c\0\0\x2b\ -\xa4\x03\0\x10\x0a\0\0\x37\x02\0\0\x9f\x0a\0\0\x1f\xb4\x03\0\x30\x0a\0\0\x37\ -\x02\0\0\x50\x0c\0\0\x21\xd4\x03\0\x40\x0a\0\0\x37\x02\0\0\x78\x0c\0\0\x20\xe4\ -\x03\0\x48\x0a\0\0\x37\x02\0\0\x78\x0c\0\0\x2c\xe4\x03\0\x60\x0a\0\0\x37\x02\0\ -\0\x78\x0c\0\0\x14\xe4\x03\0\x70\x0a\0\0\x37\x02\0\0\xa8\x0c\0\0\x20\xe0\x03\0\ -\x80\x0a\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\xb0\x0a\0\0\x37\x02\0\0\xd0\ -\x0c\0\0\x38\xc0\x02\0\xd0\x0a\0\0\x37\x02\0\0\xd0\x0c\0\0\x05\xc0\x02\0\xe8\ -\x0a\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\xf8\x0a\0\0\x37\x02\0\0\x0e\x0d\ -\0\0\x1c\xc4\x06\0\x08\x0b\0\0\x37\x02\0\0\x0e\x0d\0\0\x10\xc4\x06\0\x10\x0b\0\ -\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x60\x0b\0\0\x37\x02\0\0\x49\x07\0\0\x19\xc8\x06\ -\0\x68\x0b\0\0\x37\x02\0\0\x49\x07\0\0\x20\xc8\x06\0\xa0\x0b\0\0\x37\x02\0\0\ -\x34\x0d\0\0\x2d\0\x07\0\xb0\x0b\0\0\x37\x02\0\0\x34\x0d\0\0\x1d\0\x07\0\xb8\ -\x0b\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\0\x07\0\xc8\x0b\0\0\x37\x02\0\0\x63\x0d\0\ -\0\x2d\xd4\x06\0\xf8\x0b\0\0\x37\x02\0\0\x63\x0d\0\0\x1d\xd4\x06\0\x08\x0c\0\0\ -\x37\x02\0\0\x63\x0d\0\0\x2d\xd4\x06\0\x18\x0c\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\ -\xe8\x0c\0\0\x37\x02\0\0\x92\x0d\0\0\x20\x68\x06\0\xf0\x0c\0\0\x37\x02\0\0\x92\ -\x0d\0\0\x27\x68\x06\0\x18\x0d\0\0\x37\x02\0\0\xbb\x0d\0\0\x27\xa4\x06\0\x20\ -\x0d\0\0\x37\x02\0\0\xbb\x0d\0\0\x14\xa4\x06\0\x28\x0d\0\0\x37\x02\0\0\x04\x0e\ -\0\0\x05\x98\x01\0\x38\x0d\0\0\x37\x02\0\0\x04\x0e\0\0\x05\x98\x01\0\x60\x0d\0\ -\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x0d\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x80\x0d\ -\0\0\x37\x02\0\0\x92\x0d\0\0\x20\x44\x07\0\x88\x0d\0\0\x37\x02\0\0\x92\x0d\0\0\ -\x27\x44\x07\0\xc0\x0d\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\x7c\x07\0\xd0\x0d\0\0\ -\x37\x02\0\0\x34\x0d\0\0\x1d\x7c\x07\0\xd8\x0d\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\ -\x7c\x07\0\xe8\x0d\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\x50\x07\0\x18\x0e\0\0\x37\ -\x02\0\0\x63\x0d\0\0\x1d\x50\x07\0\x28\x0e\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\x50\ -\x07\0\x40\x0e\0\0\x37\x02\0\0\x41\x0e\0\0\x1a\xa0\x05\0\x50\x0e\0\0\x37\x02\0\ -\0\x5f\x0e\0\0\x1b\xa8\x05\0\x60\x0e\0\0\x37\x02\0\0\x41\x0e\0\0\x1a\xa0\x05\0\ -\x68\x0e\0\0\x37\x02\0\0\x83\x0e\0\0\x13\xac\x05\0\xa0\x0e\0\0\x37\x02\0\0\x13\ -\x07\0\0\x11\xb4\x05\0\xb0\x0e\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\xc0\ -\x0e\0\0\x37\x02\0\0\xd4\x0e\0\0\x27\xc8\x07\0\xd0\x0e\0\0\x37\x02\0\0\xd4\x0e\ -\0\0\x14\xc8\x07\0\xf0\x0e\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\xcc\x07\0\0\x0f\0\0\ -\x37\x02\0\0\x63\x0d\0\0\x1d\xcc\x07\0\x08\x0f\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\ -\xcc\x07\0\x30\x0f\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x80\x0f\0\0\x37\x02\0\0\x34\ -\x0d\0\0\x1d\xf8\x07\0\x88\x0f\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\xf8\x07\0\x98\ -\x0f\0\0\x37\x02\0\0\x04\x0e\0\0\x05\x98\x01\0\xf0\x0f\0\0\x37\x02\0\0\x04\x0e\ -\0\0\x05\x98\x01\0\x30\x10\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x48\x10\0\0\x37\x02\ -\0\0\x1d\x0f\0\0\x05\xd0\x01\0\x50\x10\0\0\x37\x02\0\0\x5f\x0f\0\0\x23\xc4\x01\ -\0\x68\x10\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x10\0\0\x37\x02\0\0\x93\x0f\0\0\ -\x1b\xd4\x01\0\x90\x10\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\xa8\x10\0\0\ -\x37\x02\0\0\xe3\x0f\0\0\x19\xd8\x01\0\xc0\x10\0\0\x37\x02\0\0\x11\x10\0\0\x27\ -\xfc\x01\0\xc8\x10\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\xd8\x10\0\0\x37\ -\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\xe0\x10\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\ -\x01\0\x08\x11\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\x20\x11\0\0\x37\x02\0\ -\0\x11\x10\0\0\x27\xfc\x01\0\x28\x11\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\ -\x30\x11\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\x58\x11\0\0\x37\x02\0\0\x11\ -\x10\0\0\x27\xfc\x01\0\x60\x11\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\x78\ -\x11\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\x80\x11\0\0\x37\x02\0\0\xba\x0f\ -\0\0\x11\xe8\x01\0\xa8\x11\0\0\x37\x02\0\0\x11\x10\0\0\x27\xfc\x01\0\xb0\x11\0\ -\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\xc8\x11\0\0\x37\x02\0\0\x11\x10\0\0\ -\x2d\xfc\x01\0\xd0\x11\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\xf8\x11\0\0\ -\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\x10\x12\0\0\x37\x02\0\0\x11\x10\0\0\x27\ -\xfc\x01\0\x18\x12\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\x20\x12\0\0\x37\ -\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\x48\x12\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\ -\x01\0\x60\x12\0\0\x37\x02\0\0\x11\x10\0\0\x27\xfc\x01\0\x68\x12\0\0\x37\x02\0\ -\0\x11\x10\0\0\x2d\xfc\x01\0\x70\x12\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\ -\x98\x12\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\xb0\x12\0\0\x37\x02\0\0\x11\ -\x10\0\0\x27\xfc\x01\0\xb8\x12\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\xc0\ -\x12\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\xe0\x12\0\0\x37\x02\0\0\x11\x10\ -\0\0\x46\xfc\x01\0\xe8\x12\0\0\x37\x02\0\0\x11\x10\0\0\x27\xfc\x01\0\xf0\x12\0\ -\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\xf8\x12\0\0\x37\x02\0\0\x1d\x0f\0\0\ -\x3d\xd0\x01\0\x08\x13\0\0\x37\x02\0\0\x1d\x0f\0\0\x05\xd0\x01\0\x18\x13\0\0\ -\x37\x02\0\0\x5d\x10\0\0\x0d\x98\x08\0\x30\x13\0\0\x37\x02\0\0\x5d\x10\0\0\x0d\ -\x98\x08\0\x38\x13\0\0\x37\x02\0\0\x71\x10\0\0\x2e\x9c\x08\0\x58\x13\0\0\x37\ -\x02\0\0\x71\x10\0\0\x24\x9c\x08\0\x70\x13\0\0\x37\x02\0\0\x71\x10\0\0\x13\x9c\ -\x08\0\x80\x13\0\0\x37\x02\0\0\x71\x10\0\0\x2e\x9c\x08\0\x88\x13\0\0\x37\x02\0\ -\0\xb0\x10\0\0\x15\xa8\x08\0\xa0\x13\0\0\x37\x02\0\0\xf8\x10\0\0\x11\xb4\x08\0\ -\xa8\x13\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\xc8\x13\0\0\x37\x02\0\0\x11\x11\0\0\ -\x01\xd8\x08\0\xd0\x13\0\0\x37\x02\0\0\x13\x11\0\0\x18\xb8\x08\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\xde\0\0\0\0\0\x03\0\xc8\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x7a\x01\0\0\0\ -\0\x03\0\xb8\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xff\0\0\0\0\0\x03\0\xa8\x13\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\xc7\0\0\0\0\0\x03\0\xd0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\x2c\x02\0\0\0\0\x03\0\x20\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf7\0\0\0\0\0\x03\0\ -\xe8\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1c\x02\0\0\0\0\x03\0\x10\x04\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\x28\x01\0\0\0\0\x03\0\xe0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf3\ -\x01\0\0\0\0\x03\0\x30\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xeb\x01\0\0\0\0\x03\0\ -\x38\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x44\x02\0\0\0\0\x03\0\xf0\x03\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\xe3\x01\0\0\0\0\x03\0\xf8\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x18\ -\x01\0\0\0\0\x03\0\xe8\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x30\x01\0\0\0\0\x03\0\ -\xa0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa9\x01\0\0\0\0\x03\0\x40\x10\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\x51\x01\0\0\0\0\x03\0\x78\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x5c\ -\x02\0\0\0\0\x03\0\xb0\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\x02\0\0\0\0\x03\0\ -\x50\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc2\x01\0\0\0\0\x03\0\xc0\x06\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\x69\x01\0\0\0\0\x03\0\x20\x07\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\ -\x01\0\0\0\0\x03\0\x60\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x38\x01\0\0\0\0\x03\0\ -\x30\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x20\x01\0\0\0\0\x03\0\x40\x0a\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\xba\x01\0\0\0\0\x03\0\xe0\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa1\ -\x01\0\0\0\0\x03\0\x48\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\x01\0\0\0\0\x03\0\ -\x18\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xfb\x01\0\0\0\0\x03\0\x80\x08\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\x99\x01\0\0\0\0\x03\0\xf8\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x59\ -\x01\0\0\0\0\x03\0\x50\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x54\x02\0\0\0\0\x03\0\ -\x08\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xef\0\0\0\0\0\x03\0\xe8\x0a\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\x4c\x02\0\0\0\0\x03\0\xb0\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x24\ -\x02\0\0\0\0\x03\0\xd8\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x89\x01\0\0\0\0\x03\0\ -\x80\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x10\x01\0\0\0\0\x03\0\xb0\x0b\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\xd6\0\0\0\0\0\x03\0\xc8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x14\ -\x02\0\0\0\0\x03\0\xf8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb2\x01\0\0\0\0\x03\0\ -\x18\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xdb\x01\0\0\0\0\x03\0\x10\x0c\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\x3c\x02\0\0\0\0\x03\0\x18\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x91\ -\x01\0\0\0\0\x03\0\x60\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81\x01\0\0\0\0\x03\0\ -\xc0\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xe7\0\0\0\0\0\x03\0\xd0\x0d\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\x34\x02\0\0\0\0\x03\0\xe8\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd3\ -\x01\0\0\0\0\x03\0\x18\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\x01\0\0\0\0\x03\0\0\ -\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xce\0\0\0\0\0\x03\0\x18\x0f\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\x0b\x02\0\0\0\0\x03\0\xf0\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xca\x01\0\ -\0\0\0\x03\0\x30\x10\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x71\x01\0\0\0\0\x03\0\x60\x10\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x48\x01\0\0\0\0\x03\0\x18\x13\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\x64\x02\0\0\0\0\x03\0\xd0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x4e\0\0\0\ -\x12\0\x03\0\0\0\0\0\0\0\0\0\xe0\x13\0\0\0\0\0\0\x33\0\0\0\x11\0\x05\0\0\0\0\0\ -\0\0\0\0\x20\0\0\0\0\0\0\0\x01\0\0\0\x11\0\x05\0\x20\0\0\0\0\0\0\0\x20\0\0\0\0\ -\0\0\0\x90\0\0\0\x11\0\x05\0\x40\0\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\x87\0\0\0\x11\ -\0\x06\0\0\0\0\0\0\0\0\0\x07\0\0\0\0\0\0\0\x28\0\0\0\0\0\0\0\x01\0\0\0\x37\0\0\ -\0\x50\0\0\0\0\0\0\0\x01\0\0\0\x38\0\0\0\x88\x13\0\0\0\0\0\0\x01\0\0\0\x39\0\0\ -\0\xd8\x04\0\0\0\0\0\0\x04\0\0\0\x37\0\0\0\xe4\x04\0\0\0\0\0\0\x04\0\0\0\x38\0\ -\0\0\xf0\x04\0\0\0\0\0\0\x04\0\0\0\x39\0\0\0\x08\x05\0\0\0\0\0\0\x04\0\0\0\x3a\ -\0\0\0\x2c\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\0\0\0\0\0\0\0\x04\0\0\0\x01\0\ -\0\0\x50\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\ -\0\x70\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\x66\x20\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x75\ +\x64\x70\x20\x26\x26\0\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\ +\x20\x69\x66\x20\x28\x63\x6f\x6e\x66\x69\x67\x2d\x3e\x68\x61\x73\x68\x5f\x74\ +\x79\x70\x65\x73\x20\x26\x20\x56\x49\x52\x54\x49\x4f\x5f\x4e\x45\x54\x5f\x52\ +\x53\x53\x5f\x48\x41\x53\x48\x5f\x54\x59\x50\x45\x5f\x49\x50\x76\x34\x29\x20\ +\x7b\0\x20\x20\x20\x20\x5f\x5f\x62\x75\x69\x6c\x74\x69\x6e\x5f\x6d\x65\x6d\x63\ +\x70\x79\x28\x26\x72\x73\x73\x5f\x69\x6e\x70\x75\x74\x5b\x2a\x62\x79\x74\x65\ +\x73\x5f\x77\x72\x69\x74\x74\x65\x6e\x5d\x2c\x20\x70\x74\x72\x2c\x20\x73\x69\ +\x7a\x65\x29\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\ +\x6f\x2d\x3e\x69\x73\x5f\x75\x64\x70\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x75\x64\x70\x68\x64\ +\x72\x20\x75\x64\x70\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\ +\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\ +\x73\x6b\x62\x2c\x20\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x75\x64\ +\x70\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x75\x64\x70\x29\x2c\0\x20\x20\x20\x20\ +\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\x66\x20\x28\x63\x6f\x6e\x66\ +\x69\x67\x2d\x3e\x68\x61\x73\x68\x5f\x74\x79\x70\x65\x73\x20\x26\x20\x56\x49\ +\x52\x54\x49\x4f\x5f\x4e\x45\x54\x5f\x52\x53\x53\x5f\x48\x41\x53\x48\x5f\x54\ +\x59\x50\x45\x5f\x49\x50\x76\x36\x29\x20\x7b\0\x20\x20\x20\x20\x66\x6f\x72\x20\ +\x28\x62\x79\x74\x65\x20\x3d\x20\x30\x3b\x20\x62\x79\x74\x65\x20\x3c\x20\x48\ +\x41\x53\x48\x5f\x43\x41\x4c\x43\x55\x4c\x41\x54\x49\x4f\x4e\x5f\x42\x55\x46\ +\x46\x45\x52\x5f\x53\x49\x5a\x45\x3b\x20\x62\x79\x74\x65\x2b\x2b\x29\x20\x7b\0\ +\x20\x20\x20\x20\x5f\x5f\x75\x33\x32\x20\x6c\x65\x66\x74\x6d\x6f\x73\x74\x5f\ +\x33\x32\x5f\x62\x69\x74\x73\x20\x3d\x20\x6b\x65\x79\x2d\x3e\x6c\x65\x66\x74\ +\x6d\x6f\x73\x74\x5f\x33\x32\x5f\x62\x69\x74\x73\x3b\0\x20\x20\x20\x20\x20\x20\ +\x20\x20\x5f\x5f\x75\x38\x20\x69\x6e\x70\x75\x74\x5f\x62\x79\x74\x65\x20\x3d\ +\x20\x69\x6e\x70\x75\x74\x5b\x62\x79\x74\x65\x5d\x3b\0\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x69\x6e\x70\x75\x74\x5f\x62\x79\x74\ +\x65\x20\x26\x20\x28\x31\x20\x3c\x3c\x20\x37\x29\x29\x20\x7b\0\x20\x20\x20\x20\ +\x20\x20\x20\x20\x5f\x5f\x75\x38\x20\x6b\x65\x79\x5f\x62\x79\x74\x65\x20\x3d\ +\x20\x6b\x65\x79\x2d\x3e\x6e\x65\x78\x74\x5f\x62\x79\x74\x65\x5b\x62\x79\x74\ +\x65\x5d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x28\x6c\x65\x66\x74\x6d\x6f\x73\x74\x5f\x33\x32\x5f\x62\x69\ +\x74\x73\x20\x3c\x3c\x20\x31\x29\x20\x7c\x20\x28\x28\x6b\x65\x79\x5f\x62\x79\ +\x74\x65\x20\x26\x20\x28\x31\x20\x3c\x3c\x20\x37\x29\x29\x20\x3e\x3e\x20\x37\ +\x29\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x68\x61\x73\x68\x29\ +\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x5f\x5f\x75\x33\x32\ +\x20\x74\x61\x62\x6c\x65\x5f\x69\x64\x78\x20\x3d\x20\x68\x61\x73\x68\x20\x25\ +\x20\x63\x6f\x6e\x66\x69\x67\x2d\x3e\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\ +\x6e\x73\x5f\x6c\x65\x6e\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x71\x75\x65\x75\x65\x20\x3d\x20\x62\x70\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\ +\x6b\x75\x70\x5f\x65\x6c\x65\x6d\x28\x26\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\ +\x61\x70\x5f\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\x62\x6c\ +\x65\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x71\ +\x75\x65\x75\x65\x29\x20\x7b\0\x7d\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x2a\x71\x75\x65\x75\x65\ +\x3b\0\x63\x68\x61\x72\0\x5f\x6c\x69\x63\x65\x6e\x73\x65\0\x2e\x6d\x61\x70\x73\ +\0\x6c\x69\x63\x65\x6e\x73\x65\0\x62\x70\x66\x5f\x66\x6c\x6f\x77\x5f\x6b\x65\ +\x79\x73\0\x62\x70\x66\x5f\x73\x6f\x63\x6b\0\0\0\0\x9f\xeb\x01\0\x20\0\0\0\0\0\ +\0\0\x14\0\0\0\x14\0\0\0\xbc\x0c\0\0\xd0\x0c\0\0\0\0\0\0\x08\0\0\0\x30\x02\0\0\ +\x01\0\0\0\0\0\0\0\x26\0\0\0\x10\0\0\0\x30\x02\0\0\xcb\0\0\0\0\0\0\0\x37\x02\0\ +\0\x60\x02\0\0\0\x5c\x08\0\x10\0\0\0\x37\x02\0\0\x91\x02\0\0\x0b\x74\x08\0\x20\ +\0\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x28\0\0\0\x37\x02\0\0\xa4\x02\0\0\x0e\x80\ +\x08\0\x50\0\0\0\x37\x02\0\0\xe9\x02\0\0\x0b\x84\x08\0\x88\0\0\0\x37\x02\0\0\ +\x29\x03\0\0\x10\x8c\x08\0\x90\0\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x98\0\0\0\x37\ +\x02\0\0\x29\x03\0\0\x10\x8c\x08\0\xa0\0\0\0\x37\x02\0\0\x42\x03\0\0\x16\x90\ +\x08\0\xa8\0\0\0\x37\x02\0\0\x42\x03\0\0\x0d\x90\x08\0\xc0\0\0\0\x37\x02\0\0\ +\x63\x03\0\0\x0a\x08\x06\0\xe8\0\0\0\x37\x02\0\0\x9a\x03\0\0\x1f\x18\x06\0\x38\ +\x01\0\0\x37\x02\0\0\xca\x03\0\0\x0f\xac\x04\0\x40\x01\0\0\x37\x02\0\0\xe3\x03\ +\0\0\x0c\x2c\x04\0\x50\x01\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x58\x01\0\0\x37\x02\ +\0\0\xf7\x03\0\0\x0b\x38\x04\0\x80\x01\0\0\x37\x02\0\0\x3d\x04\0\0\x09\x40\x04\ +\0\x90\x01\0\0\x37\x02\0\0\x3d\x04\0\0\x09\x40\x04\0\xa0\x01\0\0\x37\x02\0\0\ +\x4c\x04\0\0\x0d\x50\x04\0\xb8\x01\0\0\x37\x02\0\0\x4c\x04\0\0\x05\x50\x04\0\ +\xd8\x01\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\xe0\x01\0\0\x37\x02\0\0\x6a\x04\0\0\ +\x0f\x64\x04\0\0\x02\0\0\x37\x02\0\0\x3d\x04\0\0\x09\x7c\x04\0\x10\x02\0\0\x37\ +\x02\0\0\x3d\x04\0\0\x09\x7c\x04\0\x18\x02\0\0\x37\x02\0\0\xb4\x04\0\0\x0c\x8c\ +\x04\0\x20\x02\0\0\x37\x02\0\0\xc4\x04\0\0\x09\xc8\x04\0\x48\x02\0\0\x37\x02\0\ +\0\xe0\x04\0\0\x17\xe0\x04\0\x58\x02\0\0\x37\x02\0\0\xfb\x04\0\0\x16\xe8\x04\0\ +\x78\x02\0\0\x37\x02\0\0\xe0\x04\0\0\x17\xe0\x04\0\x80\x02\0\0\x37\x02\0\0\x19\ +\x05\0\0\x0f\xec\x04\0\xa8\x02\0\0\x37\x02\0\0\x5c\x05\0\0\x0d\xf4\x04\0\xb8\ +\x02\0\0\x37\x02\0\0\x5c\x05\0\0\x0d\xf4\x04\0\xc0\x02\0\0\x37\x02\0\0\x6f\x05\ +\0\0\x22\x0c\x05\0\xc8\x02\0\0\x37\x02\0\0\x6f\x05\0\0\x39\x0c\x05\0\xd8\x02\0\ +\0\x37\x02\0\0\x6f\x05\0\0\x20\x0c\x05\0\xe8\x02\0\0\x37\x02\0\0\xbd\x05\0\0\ +\x1b\x04\x05\0\xf0\x02\0\0\x37\x02\0\0\xbd\x05\0\0\x16\x04\x05\0\xf8\x02\0\0\ +\x37\x02\0\0\xde\x05\0\0\x1b\x08\x05\0\0\x03\0\0\x37\x02\0\0\xde\x05\0\0\x16\ +\x08\x05\0\x08\x03\0\0\x37\x02\0\0\xff\x05\0\0\x1a\x14\x05\0\x10\x03\0\0\x37\ +\x02\0\0\x22\x06\0\0\x18\x18\x05\0\x18\x03\0\0\x37\x02\0\0\x22\x06\0\0\x1c\x18\ +\x05\0\x30\x03\0\0\x37\x02\0\0\x6f\x05\0\0\x1d\x0c\x05\0\x38\x03\0\0\x37\x02\0\ +\0\x42\x06\0\0\x15\x74\x05\0\x48\x03\0\0\x37\x02\0\0\x42\x06\0\0\x1a\x74\x05\0\ +\x60\x03\0\0\x37\x02\0\0\x76\x06\0\0\x0d\x78\x05\0\x80\x03\0\0\x37\x02\0\0\xa0\ +\x06\0\0\x1a\x7c\x05\0\x90\x03\0\0\x37\x02\0\0\xbe\x06\0\0\x1b\x84\x05\0\xb0\ +\x03\0\0\x37\x02\0\0\xa0\x06\0\0\x1a\x7c\x05\0\xb8\x03\0\0\x37\x02\0\0\xe2\x06\ +\0\0\x13\x88\x05\0\xe0\x03\0\0\x37\x02\0\0\x33\x07\0\0\x11\x90\x05\0\xf0\x03\0\ +\0\x37\x02\0\0\x33\x07\0\0\x11\x90\x05\0\xf8\x03\0\0\x37\x02\0\0\0\0\0\0\0\0\0\ +\0\x18\x04\0\0\x37\x02\0\0\x4a\x07\0\0\x15\x34\x06\0\x20\x04\0\0\x37\x02\0\0\ +\x4a\x07\0\0\x09\x34\x06\0\x28\x04\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x78\x04\0\0\ +\x37\x02\0\0\x69\x07\0\0\x19\x38\x06\0\x80\x04\0\0\x37\x02\0\0\x69\x07\0\0\x20\ +\x38\x06\0\xa0\x04\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\xf0\x04\0\0\x37\x02\0\0\x8b\ +\x07\0\0\x17\x20\x05\0\0\x05\0\0\x37\x02\0\0\xa6\x07\0\0\x18\x28\x05\0\x30\x05\ +\0\0\x37\x02\0\0\x8b\x07\0\0\x17\x20\x05\0\x48\x05\0\0\x37\x02\0\0\xc7\x07\0\0\ +\x0f\x2c\x05\0\x70\x05\0\0\x37\x02\0\0\x5c\x05\0\0\x0d\x34\x05\0\x80\x05\0\0\ +\x37\x02\0\0\x5c\x05\0\0\x0d\x34\x05\0\x88\x05\0\0\x37\x02\0\0\x0c\x08\0\0\x1d\ +\x44\x05\0\xc8\x05\0\0\x37\x02\0\0\x2f\x08\0\0\x1d\x48\x05\0\x08\x06\0\0\x37\ +\x02\0\0\x52\x08\0\0\x1b\x50\x05\0\x10\x06\0\0\x37\x02\0\0\x75\x08\0\0\x05\x3c\ +\x02\0\x58\x06\0\0\x37\x02\0\0\x8d\x08\0\0\x19\xc4\x02\0\xc8\x06\0\0\x37\x02\0\ +\0\0\0\0\0\0\0\0\0\xd0\x06\0\0\x37\x02\0\0\xb3\x08\0\0\x0f\xd4\x02\0\xf8\x06\0\ +\0\x37\x02\0\0\x5c\x05\0\0\x0d\xdc\x02\0\x10\x07\0\0\x37\x02\0\0\x5c\x05\0\0\ +\x0d\xdc\x02\0\x18\x07\0\0\x37\x02\0\0\xf8\x08\0\0\x0d\xec\x02\0\x38\x07\0\0\ +\x37\x02\0\0\x27\x09\0\0\x20\xf0\x02\0\x60\x07\0\0\x37\x02\0\0\x53\x09\0\0\x13\ +\xf8\x02\0\x88\x07\0\0\x37\x02\0\0\x33\x07\0\0\x11\0\x03\0\xa0\x07\0\0\x37\x02\ +\0\0\x33\x07\0\0\x11\0\x03\0\xa8\x07\0\0\x37\x02\0\0\x9b\x09\0\0\x19\x10\x03\0\ +\xb0\x07\0\0\x37\x02\0\0\x9b\x09\0\0\x34\x10\x03\0\xd8\x07\0\0\x37\x02\0\0\xd1\ +\x09\0\0\x15\x24\x03\0\xe8\x07\0\0\x37\x02\0\0\x12\x0a\0\0\x17\x20\x03\0\x10\ +\x08\0\0\x37\x02\0\0\x49\x0a\0\0\x15\x30\x03\0\x28\x08\0\0\x37\x02\0\0\x49\x0a\ +\0\0\x15\x30\x03\0\x30\x08\0\0\x37\x02\0\0\x64\x0a\0\0\x27\x40\x03\0\x58\x08\0\ +\0\x37\x02\0\0\x8f\x0a\0\0\x27\x5c\x03\0\x68\x08\0\0\x37\x02\0\0\xbf\x0a\0\0\ +\x1c\xc0\x03\0\x70\x08\0\0\x37\x02\0\0\xfb\x0a\0\0\x20\xcc\x03\0\x80\x08\0\0\ +\x37\x02\0\0\xfb\x0a\0\0\x2f\xcc\x03\0\x88\x08\0\0\x37\x02\0\0\xfb\x0a\0\0\x36\ +\xcc\x03\0\x90\x08\0\0\x37\x02\0\0\xfb\x0a\0\0\x15\xcc\x03\0\xf8\x08\0\0\x37\ +\x02\0\0\x37\x0b\0\0\x43\x70\x03\0\x18\x09\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x20\ +\x09\0\0\x37\x02\0\0\x37\x0b\0\0\x17\x70\x03\0\x48\x09\0\0\x37\x02\0\0\x49\x0a\ +\0\0\x15\x78\x03\0\x60\x09\0\0\x37\x02\0\0\x49\x0a\0\0\x15\x78\x03\0\x68\x09\0\ +\0\x37\x02\0\0\x87\x0b\0\0\x19\x88\x03\0\x70\x09\0\0\x37\x02\0\0\x87\x0b\0\0\ +\x15\x88\x03\0\x78\x09\0\0\x37\x02\0\0\xb7\x0b\0\0\x19\x90\x03\0\x80\x09\0\0\ +\x37\x02\0\0\xe7\x0b\0\0\x1b\x8c\x03\0\xb0\x09\0\0\x37\x02\0\0\x22\x0c\0\0\x19\ +\xa0\x03\0\xc8\x09\0\0\x37\x02\0\0\x22\x0c\0\0\x19\xa0\x03\0\xd0\x09\0\0\x37\ +\x02\0\0\x41\x0c\0\0\x2b\xb0\x03\0\xf0\x09\0\0\x37\x02\0\0\xbf\x0a\0\0\x1f\xc0\ +\x03\0\x10\x0a\0\0\x37\x02\0\0\x70\x0c\0\0\x21\xe0\x03\0\x20\x0a\0\0\x37\x02\0\ +\0\x98\x0c\0\0\x20\xf0\x03\0\x28\x0a\0\0\x37\x02\0\0\x98\x0c\0\0\x2c\xf0\x03\0\ +\x40\x0a\0\0\x37\x02\0\0\x98\x0c\0\0\x14\xf0\x03\0\x50\x0a\0\0\x37\x02\0\0\xc8\ +\x0c\0\0\x20\xec\x03\0\x58\x0a\0\0\x37\x02\0\0\x75\x08\0\0\x05\x3c\x02\0\xa0\ +\x0a\0\0\x37\x02\0\0\xf0\x0c\0\0\x38\xcc\x02\0\xc0\x0a\0\0\x37\x02\0\0\xf0\x0c\ +\0\0\x05\xcc\x02\0\xd8\x0a\0\0\x37\x02\0\0\x75\x08\0\0\x05\x3c\x02\0\xe8\x0a\0\ +\0\x37\x02\0\0\x2e\x0d\0\0\x1c\xd0\x06\0\xf0\x0a\0\0\x37\x02\0\0\x2e\x0d\0\0\ +\x10\xd0\x06\0\xf8\x0a\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x48\x0b\0\0\x37\x02\0\0\ +\x69\x07\0\0\x19\xd4\x06\0\x50\x0b\0\0\x37\x02\0\0\x69\x07\0\0\x20\xd4\x06\0\ +\x88\x0b\0\0\x37\x02\0\0\x54\x0d\0\0\x2d\x0c\x07\0\x98\x0b\0\0\x37\x02\0\0\x54\ +\x0d\0\0\x1d\x0c\x07\0\xa0\x0b\0\0\x37\x02\0\0\x54\x0d\0\0\x2d\x0c\x07\0\xb0\ +\x0b\0\0\x37\x02\0\0\x83\x0d\0\0\x2d\xe0\x06\0\xe0\x0b\0\0\x37\x02\0\0\x83\x0d\ +\0\0\x1d\xe0\x06\0\xf0\x0b\0\0\x37\x02\0\0\x83\x0d\0\0\x2d\xe0\x06\0\0\x0c\0\0\ +\x37\x02\0\0\0\0\0\0\0\0\0\0\xd0\x0c\0\0\x37\x02\0\0\xb2\x0d\0\0\x20\x74\x06\0\ +\xd8\x0c\0\0\x37\x02\0\0\xb2\x0d\0\0\x27\x74\x06\0\0\x0d\0\0\x37\x02\0\0\xdb\ +\x0d\0\0\x27\xb0\x06\0\x08\x0d\0\0\x37\x02\0\0\xdb\x0d\0\0\x14\xb0\x06\0\x10\ +\x0d\0\0\x37\x02\0\0\x24\x0e\0\0\x05\xa4\x01\0\x20\x0d\0\0\x37\x02\0\0\x24\x0e\ +\0\0\x05\xa4\x01\0\x50\x0d\0\0\x37\x02\0\0\x5c\x05\0\0\x0d\x60\x05\0\x60\x0d\0\ +\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x0d\0\0\x37\x02\0\0\xb2\x0d\0\0\x20\x50\x07\ +\0\x78\x0d\0\0\x37\x02\0\0\xb2\x0d\0\0\x27\x50\x07\0\xb0\x0d\0\0\x37\x02\0\0\ +\x54\x0d\0\0\x2d\x88\x07\0\xc0\x0d\0\0\x37\x02\0\0\x54\x0d\0\0\x1d\x88\x07\0\ +\xc8\x0d\0\0\x37\x02\0\0\x54\x0d\0\0\x2d\x88\x07\0\xd8\x0d\0\0\x37\x02\0\0\x83\ +\x0d\0\0\x2d\x5c\x07\0\x08\x0e\0\0\x37\x02\0\0\x83\x0d\0\0\x1d\x5c\x07\0\x18\ +\x0e\0\0\x37\x02\0\0\x83\x0d\0\0\x2d\x5c\x07\0\x30\x0e\0\0\x37\x02\0\0\x61\x0e\ +\0\0\x1a\xac\x05\0\x40\x0e\0\0\x37\x02\0\0\x7f\x0e\0\0\x1b\xb4\x05\0\x50\x0e\0\ +\0\x37\x02\0\0\x61\x0e\0\0\x1a\xac\x05\0\x58\x0e\0\0\x37\x02\0\0\xa3\x0e\0\0\ +\x13\xb8\x05\0\x80\x0e\0\0\x37\x02\0\0\x33\x07\0\0\x11\xc0\x05\0\x90\x0e\0\0\ +\x37\x02\0\0\x33\x07\0\0\x11\xc0\x05\0\xa0\x0e\0\0\x37\x02\0\0\x75\x08\0\0\x05\ +\x3c\x02\0\xb0\x0e\0\0\x37\x02\0\0\xf4\x0e\0\0\x27\xd4\x07\0\xc0\x0e\0\0\x37\ +\x02\0\0\xf4\x0e\0\0\x14\xd4\x07\0\xe0\x0e\0\0\x37\x02\0\0\x83\x0d\0\0\x2d\xd8\ +\x07\0\xf0\x0e\0\0\x37\x02\0\0\x83\x0d\0\0\x1d\xd8\x07\0\xf8\x0e\0\0\x37\x02\0\ +\0\x83\x0d\0\0\x2d\xd8\x07\0\x20\x0f\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x0f\0\ +\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x80\x0f\0\0\x37\x02\0\0\x54\x0d\0\0\x1d\x04\x08\ +\0\x88\x0f\0\0\x37\x02\0\0\x54\x0d\0\0\x2d\x04\x08\0\x98\x0f\0\0\x37\x02\0\0\ +\x24\x0e\0\0\x05\xa4\x01\0\xe8\x0f\0\0\x37\x02\0\0\x24\x0e\0\0\x05\xa4\x01\0\ +\x20\x10\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x30\x10\0\0\x37\x02\0\0\x3d\x0f\0\0\ +\x05\xdc\x01\0\x38\x10\0\0\x37\x02\0\0\x7f\x0f\0\0\x23\xd0\x01\0\x50\x10\0\0\ +\x37\x02\0\0\0\0\0\0\0\0\0\0\x58\x10\0\0\x37\x02\0\0\xb3\x0f\0\0\x1b\xe0\x01\0\ +\x78\x10\0\0\x37\x02\0\0\xda\x0f\0\0\x11\xf4\x01\0\x90\x10\0\0\x37\x02\0\0\x03\ +\x10\0\0\x19\xe4\x01\0\xa8\x10\0\0\x37\x02\0\0\x31\x10\0\0\x27\x08\x02\0\xc0\ +\x10\0\0\x37\x02\0\0\x31\x10\0\0\x2d\x08\x02\0\xc8\x10\0\0\x37\x02\0\0\xda\x0f\ +\0\0\x11\xf4\x01\0\x08\x11\0\0\x37\x02\0\0\x31\x10\0\0\x27\x08\x02\0\x10\x11\0\ +\0\x37\x02\0\0\x31\x10\0\0\x2d\x08\x02\0\x18\x11\0\0\x37\x02\0\0\xda\x0f\0\0\ +\x11\xf4\x01\0\x40\x11\0\0\x37\x02\0\0\x31\x10\0\0\x27\x08\x02\0\x60\x11\0\0\ +\x37\x02\0\0\x31\x10\0\0\x2d\x08\x02\0\x68\x11\0\0\x37\x02\0\0\xda\x0f\0\0\x11\ +\xf4\x01\0\x90\x11\0\0\x37\x02\0\0\x31\x10\0\0\x27\x08\x02\0\xb0\x11\0\0\x37\ +\x02\0\0\x31\x10\0\0\x2d\x08\x02\0\xb8\x11\0\0\x37\x02\0\0\xda\x0f\0\0\x11\xf4\ +\x01\0\xf8\x11\0\0\x37\x02\0\0\x31\x10\0\0\x27\x08\x02\0\0\x12\0\0\x37\x02\0\0\ +\x31\x10\0\0\x2d\x08\x02\0\x08\x12\0\0\x37\x02\0\0\xda\x0f\0\0\x11\xf4\x01\0\ +\x48\x12\0\0\x37\x02\0\0\x31\x10\0\0\x27\x08\x02\0\x50\x12\0\0\x37\x02\0\0\x31\ +\x10\0\0\x2d\x08\x02\0\x58\x12\0\0\x37\x02\0\0\xda\x0f\0\0\x11\xf4\x01\0\x98\ +\x12\0\0\x37\x02\0\0\x31\x10\0\0\x27\x08\x02\0\xa0\x12\0\0\x37\x02\0\0\x31\x10\ +\0\0\x2d\x08\x02\0\xa8\x12\0\0\x37\x02\0\0\xda\x0f\0\0\x11\xf4\x01\0\xd0\x12\0\ +\0\x37\x02\0\0\x31\x10\0\0\x27\x08\x02\0\xd8\x12\0\0\x37\x02\0\0\x31\x10\0\0\ +\x2d\x08\x02\0\xe0\x12\0\0\x37\x02\0\0\x3d\x0f\0\0\x3d\xdc\x01\0\xf0\x12\0\0\ +\x37\x02\0\0\x3d\x0f\0\0\x05\xdc\x01\0\0\x13\0\0\x37\x02\0\0\x7d\x10\0\0\x0d\ +\xa4\x08\0\x10\x13\0\0\x37\x02\0\0\x7d\x10\0\0\x0d\xa4\x08\0\x18\x13\0\0\x37\ +\x02\0\0\x91\x10\0\0\x2e\xa8\x08\0\x38\x13\0\0\x37\x02\0\0\x91\x10\0\0\x24\xa8\ +\x08\0\x40\x13\0\0\x37\x02\0\0\x91\x10\0\0\x13\xa8\x08\0\x50\x13\0\0\x37\x02\0\ +\0\x91\x10\0\0\x2e\xa8\x08\0\x58\x13\0\0\x37\x02\0\0\xd0\x10\0\0\x15\xb4\x08\0\ +\x70\x13\0\0\x37\x02\0\0\x18\x11\0\0\x11\xc0\x08\0\x78\x13\0\0\x37\x02\0\0\0\0\ +\0\0\0\0\0\0\x98\x13\0\0\x37\x02\0\0\x31\x11\0\0\x01\xe4\x08\0\xa0\x13\0\0\x37\ +\x02\0\0\x33\x11\0\0\x18\xc4\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\x03\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x2d\x01\0\0\0\0\x03\0\ +\x98\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x6f\x01\0\0\0\0\x03\0\xb8\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\x46\x01\0\0\0\0\x03\0\x78\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xbd\0\ +\0\0\0\0\x03\0\xd0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\x02\0\0\0\0\x03\0\x20\ +\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\x01\0\0\0\0\x03\0\xe8\x04\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\xcc\0\0\0\0\0\x03\0\x18\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x25\x01\0\ +\0\0\0\x03\0\xe8\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x32\x02\0\0\0\0\x03\0\x38\x03\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x2a\x02\0\0\0\0\x03\0\x28\x0e\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\xec\0\0\0\0\0\x03\0\xf8\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x22\x02\0\0\0\ +\0\x03\0\xe8\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x5f\x01\0\0\0\0\x03\0\xd0\x0c\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\x76\x01\0\0\0\0\x03\0\xa0\x04\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\xe8\x01\0\0\0\0\x03\0\x28\x10\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x97\x01\0\0\0\0\ +\x03\0\x68\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x42\x02\0\0\0\0\x03\0\xa0\x0e\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\xe0\x01\0\0\0\0\x03\0\x50\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\xaf\x01\0\0\0\0\x03\0\xc0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd0\x01\0\0\0\0\ +\x03\0\x50\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa7\x01\0\0\0\0\x03\0\x48\x08\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\x7e\x01\0\0\0\0\x03\0\x10\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\x67\x01\0\0\0\0\x03\0\x20\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf9\x01\0\0\0\0\ +\x03\0\xd8\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x86\x01\0\0\0\0\x03\0\xf8\x08\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\x3a\x02\0\0\0\0\x03\0\x68\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\xd8\x01\0\0\0\0\x03\0\xe0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x9f\x01\0\0\0\0\ +\x03\0\x38\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xfc\0\0\0\0\0\x03\0\xe8\x09\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\x3e\x01\0\0\0\0\x03\0\xd8\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\xf4\0\0\0\0\0\x03\0\x98\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd4\0\0\0\0\0\x03\0\ +\xc8\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc8\x01\0\0\0\0\x03\0\x70\x0d\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\x57\x01\0\0\0\0\x03\0\x98\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1d\ +\x01\0\0\0\0\x03\0\xb0\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc4\0\0\0\0\0\x03\0\xe0\ +\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf1\x01\0\0\0\0\x03\0\0\x0c\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\x1a\x02\0\0\0\0\x03\0\xf8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xe4\0\0\0\ +\0\0\x03\0\0\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc0\x01\0\0\0\0\x03\0\xb0\x0e\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\x36\x01\0\0\0\0\x03\0\xc0\x0d\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\xdc\0\0\0\0\0\x03\0\xd8\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x12\x02\0\0\0\0\ +\x03\0\x08\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x4f\x01\0\0\0\0\x03\0\xf0\x0e\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\x15\x01\0\0\0\0\x03\0\x08\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\x4a\x02\0\0\0\0\x03\0\xe8\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x09\x02\0\0\0\0\ +\x03\0\x20\x10\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb7\x01\0\0\0\0\x03\0\x48\x10\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\x8e\x01\0\0\0\0\x03\0\0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\x0c\x01\0\0\0\0\x03\0\xa0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x59\0\0\0\x12\0\x03\ +\0\0\0\0\0\0\0\0\0\xb0\x13\0\0\0\0\0\0\x3e\0\0\0\x11\0\x05\0\0\0\0\0\0\0\0\0\ +\x28\0\0\0\0\0\0\0\x01\0\0\0\x11\0\x05\0\x28\0\0\0\0\0\0\0\x28\0\0\0\0\0\0\0\ +\x86\0\0\0\x11\0\x05\0\x50\0\0\0\0\0\0\0\x28\0\0\0\0\0\0\0\x7d\0\0\0\x11\0\x06\ +\0\0\0\0\0\0\0\0\0\x07\0\0\0\0\0\0\0\x28\0\0\0\0\0\0\0\x01\0\0\0\x35\0\0\0\x50\ +\0\0\0\0\0\0\0\x01\0\0\0\x36\0\0\0\x58\x13\0\0\0\0\0\0\x01\0\0\0\x37\0\0\0\x20\ +\x05\0\0\0\0\0\0\x04\0\0\0\x35\0\0\0\x2c\x05\0\0\0\0\0\0\x04\0\0\0\x36\0\0\0\ +\x38\x05\0\0\0\0\0\0\x04\0\0\0\x37\0\0\0\x50\x05\0\0\0\0\0\0\x04\0\0\0\x38\0\0\ +\0\x2c\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\x50\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\x70\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ \x90\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ \xb0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ \xd0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ @@ -911,61 +916,63 @@ static inline const void *rss_bpf__elf_bytes(size_t *sz) \0\x40\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\ \0\0\x60\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x0c\0\0\0\0\0\0\x04\0\0\0\x01\ \0\0\0\x80\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x0c\0\0\0\0\0\0\x04\0\0\0\ -\x01\0\0\0\x40\x41\x42\x43\x44\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\ -\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\x5f\x6b\x65\x79\0\x2e\x74\x65\x78\x74\0\ -\x2e\x72\x65\x6c\x2e\x42\x54\x46\x2e\x65\x78\x74\0\x2e\x6d\x61\x70\x73\0\x74\ -\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\x75\x72\ -\x61\x74\x69\x6f\x6e\x73\0\x74\x75\x6e\x5f\x72\x73\x73\x5f\x73\x74\x65\x65\x72\ -\x69\x6e\x67\x5f\x70\x72\x6f\x67\0\x2e\x72\x65\x6c\x74\x75\x6e\x5f\x72\x73\x73\ -\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\0\x2e\x6c\x6c\x76\x6d\x5f\x61\x64\x64\x72\ -\x73\x69\x67\0\x5f\x6c\x69\x63\x65\x6e\x73\x65\0\x74\x61\x70\x5f\x72\x73\x73\ -\x5f\x6d\x61\x70\x5f\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\ -\x62\x6c\x65\0\x2e\x73\x74\x72\x74\x61\x62\0\x2e\x73\x79\x6d\x74\x61\x62\0\x2e\ -\x72\x65\x6c\x2e\x42\x54\x46\0\x4c\x42\x42\x30\x5f\x39\0\x4c\x42\x42\x30\x5f\ -\x39\x39\0\x4c\x42\x42\x30\x5f\x37\x39\0\x4c\x42\x42\x30\x5f\x31\x30\x39\0\x4c\ -\x42\x42\x30\x5f\x38\x38\0\x4c\x42\x42\x30\x5f\x34\x38\0\x4c\x42\x42\x30\x5f\ -\x31\x38\0\x4c\x42\x42\x30\x5f\x31\x30\x38\0\x4c\x42\x42\x30\x5f\x39\x37\0\x4c\ -\x42\x42\x30\x5f\x37\x37\0\x4c\x42\x42\x30\x5f\x36\x37\0\x4c\x42\x42\x30\x5f\ -\x34\x37\0\x4c\x42\x42\x30\x5f\x31\x37\0\x4c\x42\x42\x30\x5f\x36\x36\0\x4c\x42\ -\x42\x30\x5f\x34\x36\0\x4c\x42\x42\x30\x5f\x33\x36\0\x4c\x42\x42\x30\x5f\x31\ -\x30\x36\0\x4c\x42\x42\x30\x5f\x35\x35\0\x4c\x42\x42\x30\x5f\x34\x35\0\x4c\x42\ -\x42\x30\x5f\x33\x35\0\x4c\x42\x42\x30\x5f\x32\x35\0\x4c\x42\x42\x30\x5f\x31\ -\x30\x35\0\x4c\x42\x42\x30\x5f\x34\0\x4c\x42\x42\x30\x5f\x39\x34\0\x4c\x42\x42\ -\x30\x5f\x38\x34\0\x4c\x42\x42\x30\x5f\x35\x34\0\x4c\x42\x42\x30\x5f\x34\x34\0\ -\x4c\x42\x42\x30\x5f\x33\x34\0\x4c\x42\x42\x30\x5f\x31\x30\x34\0\x4c\x42\x42\ -\x30\x5f\x38\x33\0\x4c\x42\x42\x30\x5f\x35\x33\0\x4c\x42\x42\x30\x5f\x32\x33\0\ -\x4c\x42\x42\x30\x5f\x31\x30\x33\0\x4c\x42\x42\x30\x5f\x39\x32\0\x4c\x42\x42\ -\x30\x5f\x38\x32\0\x4c\x42\x42\x30\x5f\x37\x32\0\x4c\x42\x42\x30\x5f\x36\x32\0\ -\x4c\x42\x42\x30\x5f\x35\x32\0\x4c\x42\x42\x30\x5f\x34\x32\0\x4c\x42\x42\x30\ -\x5f\x32\x32\0\x4c\x42\x42\x30\x5f\x31\x30\x32\0\x4c\x42\x42\x30\x5f\x38\x31\0\ -\x4c\x42\x42\x30\x5f\x36\x31\0\x4c\x42\x42\x30\x5f\x35\x31\0\x4c\x42\x42\x30\ -\x5f\x31\x31\0\x4c\x42\x42\x30\x5f\x39\x30\0\x4c\x42\x42\x30\x5f\x37\x30\0\x4c\ -\x42\x42\x30\x5f\x36\x30\0\x4c\x42\x42\x30\x5f\x35\x30\0\x4c\x42\x42\x30\x5f\ -\x34\x30\0\x4c\x42\x42\x30\x5f\x32\x30\0\x4c\x42\x42\x30\x5f\x31\x31\x30\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xae\0\0\0\x03\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x25\x4a\0\0\0\0\0\0\x6d\x02\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1a\0\0\0\x01\0\0\0\x06\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\x40\0\0\0\0\0\0\0\xe0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\x64\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\x3d\0\0\ -\0\0\0\0\x30\0\0\0\0\0\0\0\x0c\0\0\0\x03\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\ -\0\0\x2d\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x20\x14\0\0\0\0\0\0\ -\x60\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x88\0\0\0\ -\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x80\x14\0\0\0\0\0\0\x07\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc2\0\0\0\x01\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x88\x14\0\0\0\0\0\0\x8d\x16\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xbe\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\x70\x3d\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\x0c\0\0\0\x07\0\0\0\x08\ -\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x24\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\x18\x2b\0\0\0\0\0\0\xa0\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\x20\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb0\ -\x3d\0\0\0\0\0\0\x70\x0c\0\0\0\0\0\0\x0c\0\0\0\x09\0\0\0\x08\0\0\0\0\0\0\0\x10\ -\0\0\0\0\0\0\0\x79\0\0\0\x03\x4c\xff\x6f\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\0\x20\ -\x4a\0\0\0\0\0\0\x05\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\xb6\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb8\x37\0\0\0\0\0\0\ -\x88\x05\0\0\0\0\0\0\x01\0\0\0\x36\0\0\0\x08\0\0\0\0\0\0\0\x18\0\0\0\0\0\0\0"; +\x01\0\0\0\xa0\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x0c\0\0\0\0\0\0\x04\0\0\ +\0\x01\0\0\0\xc0\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x0c\0\0\0\0\0\0\x04\0\ +\0\0\x01\0\0\0\xe0\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x3e\x3f\x40\x41\x42\0\ +\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x74\x6f\x65\x70\x6c\x69\x74\ +\x7a\x5f\x6b\x65\x79\0\x2e\x74\x65\x78\x74\0\x2e\x72\x65\x6c\x2e\x42\x54\x46\ +\x2e\x65\x78\x74\0\x2e\x72\x65\x6c\x73\x6f\x63\x6b\x65\x74\0\x2e\x6d\x61\x70\ +\x73\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\ +\x75\x72\x61\x74\x69\x6f\x6e\x73\0\x74\x75\x6e\x5f\x72\x73\x73\x5f\x73\x74\x65\ +\x65\x72\x69\x6e\x67\x5f\x70\x72\x6f\x67\0\x2e\x6c\x6c\x76\x6d\x5f\x61\x64\x64\ +\x72\x73\x69\x67\0\x5f\x6c\x69\x63\x65\x6e\x73\x65\0\x74\x61\x70\x5f\x72\x73\ +\x73\x5f\x6d\x61\x70\x5f\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\ +\x61\x62\x6c\x65\0\x2e\x73\x74\x72\x74\x61\x62\0\x2e\x73\x79\x6d\x74\x61\x62\0\ +\x2e\x72\x65\x6c\x2e\x42\x54\x46\0\x4c\x42\x42\x30\x5f\x39\0\x4c\x42\x42\x30\ +\x5f\x37\x39\0\x4c\x42\x42\x30\x5f\x35\x39\0\x4c\x42\x42\x30\x5f\x34\x39\0\x4c\ +\x42\x42\x30\x5f\x38\x38\0\x4c\x42\x42\x30\x5f\x36\x38\0\x4c\x42\x42\x30\x5f\ +\x35\x38\0\x4c\x42\x42\x30\x5f\x34\x38\0\x4c\x42\x42\x30\x5f\x33\x38\0\x4c\x42\ +\x42\x30\x5f\x31\x38\0\x4c\x42\x42\x30\x5f\x31\x30\x38\0\x4c\x42\x42\x30\x5f\ +\x39\x37\0\x4c\x42\x42\x30\x5f\x37\x37\0\x4c\x42\x42\x30\x5f\x31\x37\0\x4c\x42\ +\x42\x30\x5f\x31\x30\x37\0\x4c\x42\x42\x30\x5f\x38\x36\0\x4c\x42\x42\x30\x5f\ +\x34\x36\0\x4c\x42\x42\x30\x5f\x31\x30\x36\0\x4c\x42\x42\x30\x5f\x39\x35\0\x4c\ +\x42\x42\x30\x5f\x37\x35\0\x4c\x42\x42\x30\x5f\x36\x35\0\x4c\x42\x42\x30\x5f\ +\x34\x35\0\x4c\x42\x42\x30\x5f\x34\0\x4c\x42\x42\x30\x5f\x36\x34\0\x4c\x42\x42\ +\x30\x5f\x34\x34\0\x4c\x42\x42\x30\x5f\x33\x34\0\x4c\x42\x42\x30\x5f\x31\x30\ +\x34\0\x4c\x42\x42\x30\x5f\x35\x33\0\x4c\x42\x42\x30\x5f\x34\x33\0\x4c\x42\x42\ +\x30\x5f\x33\x33\0\x4c\x42\x42\x30\x5f\x32\x33\0\x4c\x42\x42\x30\x5f\x31\x30\ +\x33\0\x4c\x42\x42\x30\x5f\x39\x32\0\x4c\x42\x42\x30\x5f\x38\x32\0\x4c\x42\x42\ +\x30\x5f\x35\x32\0\x4c\x42\x42\x30\x5f\x34\x32\0\x4c\x42\x42\x30\x5f\x32\x32\0\ +\x4c\x42\x42\x30\x5f\x31\x30\x32\0\x4c\x42\x42\x30\x5f\x38\x31\0\x4c\x42\x42\ +\x30\x5f\x35\x31\0\x4c\x42\x42\x30\x5f\x31\x31\0\x4c\x42\x42\x30\x5f\x31\x30\ +\x31\0\x4c\x42\x42\x30\x5f\x39\x30\0\x4c\x42\x42\x30\x5f\x38\x30\0\x4c\x42\x42\ +\x30\x5f\x37\x30\0\x4c\x42\x42\x30\x5f\x36\x30\0\x4c\x42\x42\x30\x5f\x35\x30\0\ +\x4c\x42\x42\x30\x5f\x34\x30\0\x4c\x42\x42\x30\x5f\x32\x30\0\x4c\x42\x42\x30\ +\x5f\x31\x30\x30\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa4\0\0\0\ +\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xe5\x4a\0\0\0\0\0\0\x53\x02\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1a\0\0\0\x01\0\0\0\x06\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x31\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\xb0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\x2d\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\xb0\x3d\0\0\0\0\0\0\x30\0\0\0\0\0\0\0\x0c\0\0\0\x03\0\0\0\x08\0\0\0\0\0\0\0\ +\x10\0\0\0\0\0\0\0\x38\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf0\ +\x13\0\0\0\0\0\0\x78\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\x7e\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\x14\0\0\0\0\0\ +\0\x07\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb8\0\0\ +\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x70\x14\0\0\0\0\0\0\xf5\x16\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb4\0\0\0\x09\0\0\0\ +\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xe0\x3d\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\x0c\0\ +\0\0\x07\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x24\0\0\0\x01\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\x68\x2b\0\0\0\0\0\0\xf0\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x20\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\x20\x3e\0\0\0\0\0\0\xc0\x0c\0\0\0\0\0\0\x0c\0\0\0\x09\0\0\0\x08\0\ +\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x6f\0\0\0\x03\x4c\xff\x6f\0\0\0\x80\0\0\0\0\0\0\ +\0\0\0\0\0\0\xe0\x4a\0\0\0\0\0\0\x05\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\xac\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x58\ +\x38\0\0\0\0\0\0\x58\x05\0\0\0\0\0\0\x01\0\0\0\x34\0\0\0\x08\0\0\0\0\0\0\0\x18\ +\0\0\0\0\0\0\0"; } #ifdef __cplusplus diff --git a/ebpf/trace.h b/ebpf/trace.h deleted file mode 100644 index abefc46ab10..00000000000 --- a/ebpf/trace.h +++ /dev/null @@ -1 +0,0 @@ -#include "trace/trace-ebpf.h" diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc index 1610472cfc4..1c85c48a73c 100644 --- a/fpu/softfloat-specialize.c.inc +++ b/fpu/softfloat-specialize.c.inc @@ -152,7 +152,7 @@ static void parts64_default_nan(FloatParts64 *p, float_status *status) /* * This case is true for Alpha, ARM, MIPS, OpenRISC, PPC, RISC-V, * S390, SH4, TriCore, and Xtensa. Our other supported targets, - * CRIS, Nios2, and Tile, do not have floating-point. + * CRIS and Nios2, do not have floating-point. */ if (snan_bit_is_one(status)) { /* set all bits other than msb */ diff --git a/fsdev/meson.build b/fsdev/meson.build index 1bec0659245..e20d7255e1e 100644 --- a/fsdev/meson.build +++ b/fsdev/meson.build @@ -1,13 +1,13 @@ fsdev_ss = ss.source_set() fsdev_ss.add(files('qemu-fsdev-opts.c', 'qemu-fsdev-throttle.c')) -fsdev_ss.add(when: 'CONFIG_ALL', if_true: files('qemu-fsdev-dummy.c')) fsdev_ss.add(when: ['CONFIG_FSDEV_9P'], if_true: files( '9p-iov-marshal.c', '9p-marshal.c', 'qemu-fsdev.c', ), if_false: files('qemu-fsdev-dummy.c')) -system_ss.add_all(when: 'CONFIG_LINUX', if_true: fsdev_ss) -system_ss.add_all(when: 'CONFIG_DARWIN', if_true: fsdev_ss) +if host_os in ['linux', 'darwin'] + system_ss.add_all(fsdev_ss) +endif if have_virtfs_proxy_helper executable('virtfs-proxy-helper', diff --git a/gdbstub/gdbstub.c b/gdbstub/gdbstub.c index 46d752bbc2c..9c23d44baf6 100644 --- a/gdbstub/gdbstub.c +++ b/gdbstub/gdbstub.c @@ -47,10 +47,9 @@ typedef struct GDBRegisterState { int base_reg; - int num_regs; gdb_get_reg_cb get_reg; gdb_set_reg_cb set_reg; - const char *xml; + const GDBFeature *feature; } GDBRegisterState; GDBState gdbserver_state; @@ -353,6 +352,7 @@ static const char *get_feature_xml(const char *p, const char **newp, { CPUState *cpu = gdb_get_first_cpu_in_process(process); CPUClass *cc = CPU_GET_CLASS(cpu); + GDBRegisterState *r; size_t len; /* @@ -366,7 +366,6 @@ static const char *get_feature_xml(const char *p, const char **newp, /* Is it the main target xml? */ if (strncmp(p, "target.xml", len) == 0) { if (!process->target_xml) { - GDBRegisterState *r; g_autoptr(GPtrArray) xml = g_ptr_array_new_with_free_func(g_free); g_ptr_array_add( @@ -381,18 +380,12 @@ static const char *get_feature_xml(const char *p, const char **newp, g_markup_printf_escaped("%s", cc->gdb_arch_name(cpu))); } - g_ptr_array_add( - xml, - g_markup_printf_escaped("", - cc->gdb_core_xml_file)); - if (cpu->gdb_regs) { - for (guint i = 0; i < cpu->gdb_regs->len; i++) { - r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); - g_ptr_array_add( - xml, - g_markup_printf_escaped("", - r->xml)); - } + for (guint i = 0; i < cpu->gdb_regs->len; i++) { + r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); + g_ptr_array_add( + xml, + g_markup_printf_escaped("", + r->feature->xmlname)); } g_ptr_array_add(xml, g_strdup("")); g_ptr_array_add(xml, NULL); @@ -401,20 +394,11 @@ static const char *get_feature_xml(const char *p, const char **newp, } return process->target_xml; } - /* Is it dynamically generated by the target? */ - if (cc->gdb_get_dynamic_xml) { - g_autofree char *xmlname = g_strndup(p, len); - const char *xml = cc->gdb_get_dynamic_xml(cpu, xmlname); - if (xml) { - return xml; - } - } - /* Is it one of the encoded gdb-xml/ files? */ - for (int i = 0; gdb_static_features[i].xmlname; i++) { - const char *name = gdb_static_features[i].xmlname; - if ((strncmp(name, p, len) == 0) && - strlen(name) == len) { - return gdb_static_features[i].xml; + /* Is it one of the features? */ + for (guint i = 0; i < cpu->gdb_regs->len; i++) { + r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); + if (strncmp(p, r->feature->xmlname, len) == 0) { + return r->feature->xml; } } @@ -435,9 +419,10 @@ void gdb_feature_builder_init(GDBFeatureBuilder *builder, GDBFeature *feature, builder->feature = feature; builder->xml = g_ptr_array_new(); g_ptr_array_add(builder->xml, header); + builder->regs = g_ptr_array_new(); builder->base_reg = base_reg; feature->xmlname = xmlname; - feature->num_regs = 0; + feature->name = name; } void gdb_feature_builder_append_tag(const GDBFeatureBuilder *builder, @@ -456,10 +441,12 @@ void gdb_feature_builder_append_reg(const GDBFeatureBuilder *builder, const char *type, const char *group) { - if (builder->feature->num_regs < regnum) { - builder->feature->num_regs = regnum; + if (builder->regs->len <= regnum) { + g_ptr_array_set_size(builder->regs, regnum + 1); } + builder->regs->pdata[regnum] = (gpointer *)name; + if (group) { gdb_feature_builder_append_tag( builder, @@ -485,6 +472,9 @@ void gdb_feature_builder_end(const GDBFeatureBuilder *builder) } g_ptr_array_free(builder->xml, TRUE); + + builder->feature->num_regs = builder->regs->len; + builder->feature->regs = (void *)g_ptr_array_free(builder->regs, FALSE); } const GDBFeature *gdb_find_static_feature(const char *xmlname) @@ -500,22 +490,44 @@ const GDBFeature *gdb_find_static_feature(const char *xmlname) g_assert_not_reached(); } -static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) +GArray *gdb_get_register_list(CPUState *cpu) +{ + GArray *results = g_array_new(true, true, sizeof(GDBRegDesc)); + + /* registers are only available once the CPU is initialised */ + if (!cpu->gdb_regs) { + return results; + } + + for (int f = 0; f < cpu->gdb_regs->len; f++) { + GDBRegisterState *r = &g_array_index(cpu->gdb_regs, GDBRegisterState, f); + for (int i = 0; i < r->feature->num_regs; i++) { + const char *name = r->feature->regs[i]; + GDBRegDesc desc = { + r->base_reg + i, + name, + r->feature->name + }; + g_array_append_val(results, desc); + } + } + + return results; +} + +int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) { CPUClass *cc = CPU_GET_CLASS(cpu); - CPUArchState *env = cpu_env(cpu); GDBRegisterState *r; if (reg < cc->gdb_num_core_regs) { return cc->gdb_read_register(cpu, buf, reg); } - if (cpu->gdb_regs) { - for (guint i = 0; i < cpu->gdb_regs->len; i++) { - r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); - if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) { - return r->get_reg(env, buf, reg - r->base_reg); - } + for (guint i = 0; i < cpu->gdb_regs->len; i++) { + r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); + if (r->base_reg <= reg && reg < r->base_reg + r->feature->num_regs) { + return r->get_reg(cpu, buf, reg - r->base_reg); } } return 0; @@ -524,58 +536,79 @@ static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg) { CPUClass *cc = CPU_GET_CLASS(cpu); - CPUArchState *env = cpu_env(cpu); GDBRegisterState *r; if (reg < cc->gdb_num_core_regs) { return cc->gdb_write_register(cpu, mem_buf, reg); } - if (cpu->gdb_regs) { - for (guint i = 0; i < cpu->gdb_regs->len; i++) { - r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); - if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) { - return r->set_reg(env, mem_buf, reg - r->base_reg); - } + for (guint i = 0; i < cpu->gdb_regs->len; i++) { + r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); + if (r->base_reg <= reg && reg < r->base_reg + r->feature->num_regs) { + return r->set_reg(cpu, mem_buf, reg - r->base_reg); } } return 0; } +static void gdb_register_feature(CPUState *cpu, int base_reg, + gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg, + const GDBFeature *feature) +{ + GDBRegisterState s = { + .base_reg = base_reg, + .get_reg = get_reg, + .set_reg = set_reg, + .feature = feature + }; + + g_array_append_val(cpu->gdb_regs, s); +} + +void gdb_init_cpu(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + const GDBFeature *feature; + + cpu->gdb_regs = g_array_new(false, false, sizeof(GDBRegisterState)); + + if (cc->gdb_core_xml_file) { + feature = gdb_find_static_feature(cc->gdb_core_xml_file); + gdb_register_feature(cpu, 0, + cc->gdb_read_register, cc->gdb_write_register, + feature); + cpu->gdb_num_regs = cpu->gdb_num_g_regs = feature->num_regs; + } + + if (cc->gdb_num_core_regs) { + cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs; + } +} + void gdb_register_coprocessor(CPUState *cpu, gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg, - int num_regs, const char *xml, int g_pos) + const GDBFeature *feature, int g_pos) { GDBRegisterState *s; guint i; + int base_reg = cpu->gdb_num_regs; - if (cpu->gdb_regs) { - for (i = 0; i < cpu->gdb_regs->len; i++) { - /* Check for duplicates. */ - s = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); - if (strcmp(s->xml, xml) == 0) { - return; - } + for (i = 0; i < cpu->gdb_regs->len; i++) { + /* Check for duplicates. */ + s = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); + if (s->feature == feature) { + return; } - } else { - cpu->gdb_regs = g_array_new(false, false, sizeof(GDBRegisterState)); - i = 0; } - g_array_set_size(cpu->gdb_regs, i + 1); - s = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); - s->base_reg = cpu->gdb_num_regs; - s->num_regs = num_regs; - s->get_reg = get_reg; - s->set_reg = set_reg; - s->xml = xml; + gdb_register_feature(cpu, base_reg, get_reg, set_reg, feature); /* Add to end of list. */ - cpu->gdb_num_regs += num_regs; + cpu->gdb_num_regs += feature->num_regs; if (g_pos) { - if (g_pos != s->base_reg) { + if (g_pos != base_reg) { error_report("Error: Bad gdb register numbering for '%s', " - "expected %d got %d", xml, g_pos, s->base_reg); + "expected %d got %d", feature->xml, g_pos, base_reg); } else { cpu->gdb_num_g_regs = cpu->gdb_num_regs; } @@ -991,6 +1024,12 @@ static void handle_detach(GArray *params, void *user_ctx) pid = get_param(params, 0)->val_ul; } +#ifdef CONFIG_USER_ONLY + if (gdb_handle_detach_user(pid)) { + return; + } +#endif + process = gdb_get_process(pid); gdb_process_breakpoint_remove_all(process); process->attached = false; @@ -1066,6 +1105,7 @@ static void handle_cont_with_sig(GArray *params, void *user_ctx) static void handle_set_thread(GArray *params, void *user_ctx) { + uint32_t pid, tid; CPUState *cpu; if (params->len != 2) { @@ -1083,8 +1123,14 @@ static void handle_set_thread(GArray *params, void *user_ctx) return; } - cpu = gdb_get_cpu(get_param(params, 1)->thread_id.pid, - get_param(params, 1)->thread_id.tid); + pid = get_param(params, 1)->thread_id.pid; + tid = get_param(params, 1)->thread_id.tid; +#ifdef CONFIG_USER_ONLY + if (gdb_handle_set_thread_user(pid, tid)) { + return; + } +#endif + cpu = gdb_get_cpu(pid, tid); if (!cpu) { gdb_put_packet("E22"); return; @@ -1617,13 +1663,22 @@ static void handle_query_supported(GArray *params, void *user_ctx) if (gdbserver_state.c_cpu->opaque) { g_string_append(gdbserver_state.str_buf, ";qXfer:auxv:read+"); } + g_string_append(gdbserver_state.str_buf, ";QCatchSyscalls+"); + + g_string_append(gdbserver_state.str_buf, ";qXfer:siginfo:read+"); #endif g_string_append(gdbserver_state.str_buf, ";qXfer:exec-file:read+"); #endif - if (params->len && - strstr(get_param(params, 0)->data, "multiprocess+")) { - gdbserver_state.multiprocess = true; + if (params->len) { + const char *gdb_supported = get_param(params, 0)->data; + + if (strstr(gdb_supported, "multiprocess+")) { + gdbserver_state.multiprocess = true; + } +#if defined(CONFIG_USER_ONLY) + gdb_handle_query_supported_user(gdb_supported); +#endif } g_string_append(gdbserver_state.str_buf, ";vContSupported+;multiprocess+"); @@ -1765,6 +1820,12 @@ static const GdbCmdParseEntry gdb_gen_query_table[] = { .cmd_startswith = 1, .schema = "l,l0" }, + { + .handler = gdb_handle_query_xfer_siginfo, + .cmd = "Xfer:siginfo:read::", + .cmd_startswith = 1, + .schema = "l,l0" + }, #endif { .handler = gdb_handle_query_xfer_exec_file, @@ -1810,6 +1871,14 @@ static const GdbCmdParseEntry gdb_gen_set_table[] = { .schema = "l0" }, #endif +#if defined(CONFIG_USER_ONLY) + { + .handler = gdb_handle_set_catch_syscalls, + .cmd = "CatchSyscalls:", + .cmd_startswith = 1, + .schema = "s0", + }, +#endif }; static void handle_gen_query(GArray *params, void *user_ctx) diff --git a/gdbstub/internals.h b/gdbstub/internals.h index 5c0c725e54c..32f9f63297e 100644 --- a/gdbstub/internals.h +++ b/gdbstub/internals.h @@ -115,7 +115,7 @@ void gdb_read_byte(uint8_t ch); /* * Packet acknowledgement - we handle this slightly differently - * between user and softmmu mode, mainly to deal with the differences + * between user and system mode, mainly to deal with the differences * between the flexible chardev and the direct fd approaches. * * We currently don't support a negotiated QStartNoAckMode @@ -125,7 +125,7 @@ void gdb_read_byte(uint8_t ch); * gdb_got_immediate_ack() - check ok to continue * * Returns true to continue, false to re-transmit for user only, the - * softmmu stub always returns true. + * system stub always returns true. */ bool gdb_got_immediate_ack(void); /* utility helpers */ @@ -135,11 +135,12 @@ CPUState *gdb_first_attached_cpu(void); void gdb_append_thread_id(CPUState *cpu, GString *buf); int gdb_get_cpu_index(CPUState *cpu); unsigned int gdb_get_max_cpus(void); /* both */ -bool gdb_can_reverse(void); /* softmmu, stub for user */ +bool gdb_can_reverse(void); /* system emulation, stub for user */ +int gdb_target_sigtrap(void); /* user */ void gdb_create_default_process(GDBState *s); -/* signal mapping, common for softmmu, specialised for user-mode */ +/* signal mapping, common for system, specialised for user-mode */ int gdb_signal_to_target(int sig); int gdb_target_signal_to_gdb(int sig); @@ -156,12 +157,12 @@ void gdb_continue(void); int gdb_continue_partial(char *newstates); /* - * Helpers with separate softmmu and user implementations + * Helpers with separate system and user implementations */ void gdb_put_buffer(const uint8_t *buf, int len); /* - * Command handlers - either specialised or softmmu or user only + * Command handlers - either specialised or system or user only */ void gdb_init_gdbserver_state(void); @@ -186,20 +187,25 @@ typedef union GdbCmdVariant { #define get_param(p, i) (&g_array_index(p, GdbCmdVariant, i)) -void gdb_handle_query_rcmd(GArray *params, void *user_ctx); /* softmmu */ +void gdb_handle_query_rcmd(GArray *params, void *ctx); /* system */ void gdb_handle_query_offsets(GArray *params, void *user_ctx); /* user */ void gdb_handle_query_xfer_auxv(GArray *params, void *user_ctx); /*user */ +void gdb_handle_query_xfer_siginfo(GArray *params, void *user_ctx); /*user */ void gdb_handle_v_file_open(GArray *params, void *user_ctx); /* user */ void gdb_handle_v_file_close(GArray *params, void *user_ctx); /* user */ void gdb_handle_v_file_pread(GArray *params, void *user_ctx); /* user */ void gdb_handle_v_file_readlink(GArray *params, void *user_ctx); /* user */ void gdb_handle_query_xfer_exec_file(GArray *params, void *user_ctx); /* user */ +void gdb_handle_set_catch_syscalls(GArray *params, void *user_ctx); /* user */ +void gdb_handle_query_supported_user(const char *gdb_supported); /* user */ +bool gdb_handle_set_thread_user(uint32_t pid, uint32_t tid); /* user */ +bool gdb_handle_detach_user(uint32_t pid); /* user */ -void gdb_handle_query_attached(GArray *params, void *user_ctx); /* both */ +void gdb_handle_query_attached(GArray *params, void *ctx); /* both */ -/* softmmu only */ -void gdb_handle_query_qemu_phy_mem_mode(GArray *params, void *user_ctx); -void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx); +/* system only */ +void gdb_handle_query_qemu_phy_mem_mode(GArray *params, void *ctx); +void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *ctx); /* sycall handling */ void gdb_handle_file_io(GArray *params, void *user_ctx); @@ -207,11 +213,11 @@ bool gdb_handled_syscall(void); void gdb_disable_syscalls(void); void gdb_syscall_reset(void); -/* user/softmmu specific syscall handling */ +/* user/system specific syscall handling */ void gdb_syscall_handling(const char *syscall_packet); /* - * Break/Watch point support - there is an implementation for softmmu + * Break/Watch point support - there is an implementation for system * and user mode. */ bool gdb_supports_guest_debug(void); diff --git a/gdbstub/meson.build b/gdbstub/meson.build index e5bccba34e5..da5721d8452 100644 --- a/gdbstub/meson.build +++ b/gdbstub/meson.build @@ -14,8 +14,8 @@ gdb_system_ss = ss.source_set() gdb_user_ss.add(files('gdbstub.c', 'user.c')) gdb_system_ss.add(files('gdbstub.c', 'system.c')) -gdb_user_ss = gdb_user_ss.apply(config_targetos, strict: false) -gdb_system_ss = gdb_system_ss.apply(config_targetos, strict: false) +gdb_user_ss = gdb_user_ss.apply({}) +gdb_system_ss = gdb_system_ss.apply({}) libgdb_user = static_library('gdb_user', gdb_user_ss.sources() + genh, diff --git a/gdbstub/system.c b/gdbstub/system.c index 83fd452800b..d2354038553 100644 --- a/gdbstub/system.c +++ b/gdbstub/system.c @@ -1,5 +1,5 @@ /* - * gdb server stub - softmmu specific bits + * gdb server stub - system specific bits * * Debug integration depends on support from the individual * accelerators so most of this involves calling the ops helpers. @@ -488,13 +488,13 @@ bool gdb_can_reverse(void) */ void gdb_handle_query_qemu_phy_mem_mode(GArray *params, - void *user_ctx) + void *ctx) { g_string_printf(gdbserver_state.str_buf, "%d", phy_memory_mode); gdb_put_strbuf(); } -void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx) +void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *ctx) { if (!params->len) { gdb_put_packet("E22"); @@ -509,7 +509,7 @@ void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx) gdb_put_packet("OK"); } -void gdb_handle_query_rcmd(GArray *params, void *user_ctx) +void gdb_handle_query_rcmd(GArray *params, void *ctx) { const guint8 zero = 0; int len; @@ -539,7 +539,7 @@ void gdb_handle_query_rcmd(GArray *params, void *user_ctx) * Execution state helpers */ -void gdb_handle_query_attached(GArray *params, void *user_ctx) +void gdb_handle_query_attached(GArray *params, void *ctx) { gdb_put_packet("1"); } diff --git a/gdbstub/user-target.c b/gdbstub/user-target.c index c4bba4c72c7..6646684a4c6 100644 --- a/gdbstub/user-target.c +++ b/gdbstub/user-target.c @@ -204,7 +204,7 @@ int gdb_target_signal_to_gdb(int sig) int gdb_get_cpu_index(CPUState *cpu) { - TaskState *ts = (TaskState *) cpu->opaque; + TaskState *ts = get_task_state(cpu); return ts ? ts->ts_tid : -1; } @@ -399,7 +399,7 @@ void gdb_handle_query_xfer_exec_file(GArray *params, void *user_ctx) return; } - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); if (!ts || !ts->bprm || !ts->bprm->filename) { gdb_put_packet("E00"); return; @@ -418,3 +418,8 @@ void gdb_handle_query_xfer_exec_file(GArray *params, void *user_ctx) ts->bprm->filename + offset); gdb_put_strbuf(); } + +int gdb_target_sigtrap(void) +{ + return TARGET_SIGTRAP; +} diff --git a/gdbstub/user.c b/gdbstub/user.c index dbe1d9b8875..edeb72efebc 100644 --- a/gdbstub/user.c +++ b/gdbstub/user.c @@ -10,6 +10,7 @@ */ #include "qemu/osdep.h" +#include "qemu/bitops.h" #include "qemu/cutils.h" #include "qemu/sockets.h" #include "exec/hwaddr.h" @@ -21,11 +22,81 @@ #include "trace.h" #include "internals.h" +#define GDB_NR_SYSCALLS 1024 +typedef unsigned long GDBSyscallsMask[BITS_TO_LONGS(GDB_NR_SYSCALLS)]; + +/* + * Forked child talks to its parent in order to let GDB enforce the + * follow-fork-mode. This happens inside a start_exclusive() section, so that + * the other threads, which may be forking too, do not interfere. The + * implementation relies on GDB not sending $vCont until it has detached + * either from the parent (follow-fork-mode child) or from the child + * (follow-fork-mode parent). + * + * The parent and the child share the GDB socket; at any given time only one + * of them is allowed to use it, as is reflected in the respective fork_state. + * This is negotiated via the fork_sockets pair as a reaction to $Hg. + * + * Below is a short summary of the possible state transitions: + * + * ENABLED : Terminal state. + * DISABLED : Terminal state. + * ACTIVE : Parent initial state. + * INACTIVE : Child initial state. + * ACTIVE -> DEACTIVATING: On $Hg. + * ACTIVE -> ENABLING : On $D. + * ACTIVE -> DISABLING : On $D. + * ACTIVE -> DISABLED : On communication error. + * DEACTIVATING -> INACTIVE : On gdb_read_byte() return. + * DEACTIVATING -> DISABLED : On communication error. + * INACTIVE -> ACTIVE : On $Hg in the peer. + * INACTIVE -> ENABLE : On $D in the peer. + * INACTIVE -> DISABLE : On $D in the peer. + * INACTIVE -> DISABLED : On communication error. + * ENABLING -> ENABLED : On gdb_read_byte() return. + * ENABLING -> DISABLED : On communication error. + * DISABLING -> DISABLED : On gdb_read_byte() return. + */ +enum GDBForkState { + /* Fully owning the GDB socket. */ + GDB_FORK_ENABLED, + /* Working with the GDB socket; the peer is inactive. */ + GDB_FORK_ACTIVE, + /* Handing off the GDB socket to the peer. */ + GDB_FORK_DEACTIVATING, + /* The peer is working with the GDB socket. */ + GDB_FORK_INACTIVE, + /* Asking the peer to close its GDB socket fd. */ + GDB_FORK_ENABLING, + /* Asking the peer to take over, closing our GDB socket fd. */ + GDB_FORK_DISABLING, + /* The peer has taken over, our GDB socket fd is closed. */ + GDB_FORK_DISABLED, +}; + +enum GDBForkMessage { + GDB_FORK_ACTIVATE = 'a', + GDB_FORK_ENABLE = 'e', + GDB_FORK_DISABLE = 'd', +}; + /* User-mode specific state */ typedef struct { int fd; char *socket_path; int running_state; + /* + * Store syscalls mask without memory allocation in order to avoid + * implementing synchronization. + */ + bool catch_all_syscalls; + GDBSyscallsMask catch_syscalls_mask; + bool fork_events; + enum GDBForkState fork_state; + int fork_sockets[2]; + pid_t fork_peer_pid, fork_peer_tid; + uint8_t siginfo[MAX_SIGINFO_LENGTH]; + unsigned long siginfo_len; } GDBUserState; static GDBUserState gdbserver_user_state; @@ -121,7 +192,8 @@ void gdb_qemu_exit(int code) exit(code); } -int gdb_handlesig(CPUState *cpu, int sig) +int gdb_handlesig(CPUState *cpu, int sig, const char *reason, void *siginfo, + int siginfo_len) { char buf[256]; int n; @@ -130,6 +202,18 @@ int gdb_handlesig(CPUState *cpu, int sig) return sig; } + if (siginfo) { + /* + * Save target-specific siginfo. + * + * siginfo size, i.e. siginfo_len, is asserted at compile-time to fit in + * gdbserver_user_state.siginfo, usually in the source file calling + * gdb_handlesig. See, for instance, {linux,bsd}-user/signal.c. + */ + memcpy(gdbserver_user_state.siginfo, siginfo, siginfo_len); + gdbserver_user_state.siginfo_len = siginfo_len; + } + /* disable single step if it was enabled */ cpu_single_step(cpu, 0); tb_flush(cpu); @@ -141,6 +225,9 @@ int gdb_handlesig(CPUState *cpu, int sig) "T%02xthread:", gdb_target_signal_to_gdb(sig)); gdb_append_thread_id(cpu, gdbserver_state.str_buf); g_string_append_c(gdbserver_state.str_buf, ';'); + if (reason) { + g_string_append(gdbserver_state.str_buf, reason); + } gdb_put_strbuf(); gdbserver_state.allow_stop_reply = false; } @@ -343,16 +430,192 @@ int gdbserver_start(const char *port_or_path) return -1; } -/* Disable gdb stub for child processes. */ -void gdbserver_fork(CPUState *cpu) +void gdbserver_fork_start(void) { if (!gdbserver_state.init || gdbserver_user_state.fd < 0) { return; } + if (!gdbserver_user_state.fork_events || + qemu_socketpair(AF_UNIX, SOCK_STREAM, 0, + gdbserver_user_state.fork_sockets) < 0) { + gdbserver_user_state.fork_state = GDB_FORK_DISABLED; + return; + } + gdbserver_user_state.fork_state = GDB_FORK_INACTIVE; + gdbserver_user_state.fork_peer_pid = getpid(); + gdbserver_user_state.fork_peer_tid = qemu_get_thread_id(); +} + +static void disable_gdbstub(CPUState *thread_cpu) +{ + CPUState *cpu; + close(gdbserver_user_state.fd); gdbserver_user_state.fd = -1; - cpu_breakpoint_remove_all(cpu, BP_GDB); - /* no cpu_watchpoint_remove_all for user-mode */ + CPU_FOREACH(cpu) { + cpu_breakpoint_remove_all(cpu, BP_GDB); + /* no cpu_watchpoint_remove_all for user-mode */ + cpu_single_step(cpu, 0); + } + tb_flush(thread_cpu); +} + +void gdbserver_fork_end(CPUState *cpu, pid_t pid) +{ + char b; + int fd; + + if (!gdbserver_state.init || gdbserver_user_state.fd < 0) { + return; + } + + if (pid == -1) { + if (gdbserver_user_state.fork_state != GDB_FORK_DISABLED) { + g_assert(gdbserver_user_state.fork_state == GDB_FORK_INACTIVE); + close(gdbserver_user_state.fork_sockets[0]); + close(gdbserver_user_state.fork_sockets[1]); + } + return; + } + + if (gdbserver_user_state.fork_state == GDB_FORK_DISABLED) { + if (pid == 0) { + disable_gdbstub(cpu); + } + return; + } + + if (pid == 0) { + close(gdbserver_user_state.fork_sockets[0]); + fd = gdbserver_user_state.fork_sockets[1]; + g_assert(gdbserver_state.process_num == 1); + g_assert(gdbserver_state.processes[0].pid == + gdbserver_user_state.fork_peer_pid); + g_assert(gdbserver_state.processes[0].attached); + gdbserver_state.processes[0].pid = getpid(); + } else { + close(gdbserver_user_state.fork_sockets[1]); + fd = gdbserver_user_state.fork_sockets[0]; + gdbserver_user_state.fork_state = GDB_FORK_ACTIVE; + gdbserver_user_state.fork_peer_pid = pid; + gdbserver_user_state.fork_peer_tid = pid; + + if (!gdbserver_state.allow_stop_reply) { + goto fail; + } + g_string_printf(gdbserver_state.str_buf, + "T%02xfork:p%02x.%02x;thread:p%02x.%02x;", + gdb_target_signal_to_gdb(gdb_target_sigtrap()), + pid, pid, (int)getpid(), qemu_get_thread_id()); + gdb_put_strbuf(); + } + + gdbserver_state.state = RS_IDLE; + gdbserver_state.allow_stop_reply = false; + gdbserver_user_state.running_state = 0; + for (;;) { + switch (gdbserver_user_state.fork_state) { + case GDB_FORK_ENABLED: + if (gdbserver_user_state.running_state) { + close(fd); + return; + } + QEMU_FALLTHROUGH; + case GDB_FORK_ACTIVE: + if (read(gdbserver_user_state.fd, &b, 1) != 1) { + goto fail; + } + gdb_read_byte(b); + break; + case GDB_FORK_DEACTIVATING: + b = GDB_FORK_ACTIVATE; + if (write(fd, &b, 1) != 1) { + goto fail; + } + gdbserver_user_state.fork_state = GDB_FORK_INACTIVE; + break; + case GDB_FORK_INACTIVE: + if (read(fd, &b, 1) != 1) { + goto fail; + } + switch (b) { + case GDB_FORK_ACTIVATE: + gdbserver_user_state.fork_state = GDB_FORK_ACTIVE; + break; + case GDB_FORK_ENABLE: + gdbserver_user_state.fork_state = GDB_FORK_ENABLED; + break; + case GDB_FORK_DISABLE: + gdbserver_user_state.fork_state = GDB_FORK_DISABLED; + break; + default: + g_assert_not_reached(); + } + break; + case GDB_FORK_ENABLING: + b = GDB_FORK_DISABLE; + if (write(fd, &b, 1) != 1) { + goto fail; + } + gdbserver_user_state.fork_state = GDB_FORK_ENABLED; + break; + case GDB_FORK_DISABLING: + b = GDB_FORK_ENABLE; + if (write(fd, &b, 1) != 1) { + goto fail; + } + gdbserver_user_state.fork_state = GDB_FORK_DISABLED; + break; + case GDB_FORK_DISABLED: + close(fd); + disable_gdbstub(cpu); + return; + default: + g_assert_not_reached(); + } + } + +fail: + close(fd); + if (pid == 0) { + disable_gdbstub(cpu); + } +} + +void gdb_handle_query_supported_user(const char *gdb_supported) +{ + if (strstr(gdb_supported, "fork-events+")) { + gdbserver_user_state.fork_events = true; + } + g_string_append(gdbserver_state.str_buf, ";fork-events+"); +} + +bool gdb_handle_set_thread_user(uint32_t pid, uint32_t tid) +{ + if (gdbserver_user_state.fork_state == GDB_FORK_ACTIVE && + pid == gdbserver_user_state.fork_peer_pid && + tid == gdbserver_user_state.fork_peer_tid) { + gdbserver_user_state.fork_state = GDB_FORK_DEACTIVATING; + gdb_put_packet("OK"); + return true; + } + return false; +} + +bool gdb_handle_detach_user(uint32_t pid) +{ + bool enable; + + if (gdbserver_user_state.fork_state == GDB_FORK_ACTIVE) { + enable = pid == gdbserver_user_state.fork_peer_pid; + if (enable || pid == getpid()) { + gdbserver_user_state.fork_state = enable ? GDB_FORK_ENABLING : + GDB_FORK_DISABLING; + gdb_put_packet("OK"); + return true; + } + } + return false; } /* @@ -497,5 +760,117 @@ void gdb_breakpoint_remove_all(CPUState *cs) void gdb_syscall_handling(const char *syscall_packet) { gdb_put_packet(syscall_packet); - gdb_handlesig(gdbserver_state.c_cpu, 0); + gdb_handlesig(gdbserver_state.c_cpu, 0, NULL, NULL, 0); +} + +static bool should_catch_syscall(int num) +{ + if (gdbserver_user_state.catch_all_syscalls) { + return true; + } + if (num < 0 || num >= GDB_NR_SYSCALLS) { + return false; + } + return test_bit(num, gdbserver_user_state.catch_syscalls_mask); +} + +void gdb_syscall_entry(CPUState *cs, int num) +{ + if (should_catch_syscall(num)) { + g_autofree char *reason = g_strdup_printf("syscall_entry:%x;", num); + gdb_handlesig(cs, gdb_target_sigtrap(), reason, NULL, 0); + } +} + +void gdb_syscall_return(CPUState *cs, int num) +{ + if (should_catch_syscall(num)) { + g_autofree char *reason = g_strdup_printf("syscall_return:%x;", num); + gdb_handlesig(cs, gdb_target_sigtrap(), reason, NULL, 0); + } +} + +void gdb_handle_set_catch_syscalls(GArray *params, void *user_ctx) +{ + const char *param = get_param(params, 0)->data; + GDBSyscallsMask catch_syscalls_mask; + bool catch_all_syscalls; + unsigned int num; + const char *p; + + /* "0" means not catching any syscalls. */ + if (strcmp(param, "0") == 0) { + gdbserver_user_state.catch_all_syscalls = false; + memset(gdbserver_user_state.catch_syscalls_mask, 0, + sizeof(gdbserver_user_state.catch_syscalls_mask)); + gdb_put_packet("OK"); + return; + } + + /* "1" means catching all syscalls. */ + if (strcmp(param, "1") == 0) { + gdbserver_user_state.catch_all_syscalls = true; + gdb_put_packet("OK"); + return; + } + + /* + * "1;..." means catching only the specified syscalls. + * The syscall list must not be empty. + */ + if (param[0] == '1' && param[1] == ';') { + catch_all_syscalls = false; + memset(catch_syscalls_mask, 0, sizeof(catch_syscalls_mask)); + for (p = ¶m[2];; p++) { + if (qemu_strtoui(p, &p, 16, &num) || (*p && *p != ';')) { + goto err; + } + if (num >= GDB_NR_SYSCALLS) { + /* + * Fall back to reporting all syscalls. Reporting extra + * syscalls is inefficient, but the spec explicitly allows it. + * Keep parsing in case there is a syntax error ahead. + */ + catch_all_syscalls = true; + } else { + set_bit(num, catch_syscalls_mask); + } + if (!*p) { + break; + } + } + gdbserver_user_state.catch_all_syscalls = catch_all_syscalls; + if (!catch_all_syscalls) { + memcpy(gdbserver_user_state.catch_syscalls_mask, + catch_syscalls_mask, sizeof(catch_syscalls_mask)); + } + gdb_put_packet("OK"); + return; + } + +err: + gdb_put_packet("E00"); +} + +void gdb_handle_query_xfer_siginfo(GArray *params, void *user_ctx) +{ + unsigned long offset, len; + uint8_t *siginfo_offset; + + offset = get_param(params, 0)->val_ul; + len = get_param(params, 1)->val_ul; + + if (offset + len > gdbserver_user_state.siginfo_len) { + /* Invalid offset and/or requested length. */ + gdb_put_packet("E01"); + return; + } + + siginfo_offset = (uint8_t *)gdbserver_user_state.siginfo + offset; + + /* Reply */ + g_string_assign(gdbserver_state.str_buf, "l"); + gdb_memtox(gdbserver_state.str_buf, (const char *)siginfo_offset, len); + gdb_put_packet_binary(gdbserver_state.str_buf->str, + gdbserver_state.str_buf->len, true); } diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index f5b37eb74ab..ad1b1306e34 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -1,8 +1,8 @@ -HXCOMM Use DEFHEADING() to define headings in both help text and rST. -HXCOMM Text between SRST and ERST is copied to the rST version and -HXCOMM discarded from C version. -HXCOMM DEF(command, args, callback, arg_string, help) is used to construct -HXCOMM monitor info commands +HXCOMM See docs/devel/docs.rst for the format of this file. +HXCOMM +HXCOMM This file defines the contents of an array of HMPCommand structs +HXCOMM which specify the name, behaviour and help text for HMP commands. +HXCOMM Text between SRST and ERST is rST format documentation. HXCOMM HXCOMM can be used for comments, discarded from both rST and C. HXCOMM HXCOMM In this file, generally SRST fragments should have two extra @@ -540,9 +540,9 @@ ERST { .name = "qtree", - .args_type = "", - .params = "", - .help = "show device tree", + .args_type = "brief:-b", + .params = "[-b]", + .help = "show device tree (-b: brief, omit properties)", .cmd = hmp_info_qtree, }, diff --git a/hmp-commands.hx b/hmp-commands.hx index 765349ed149..2e2a3bcf989 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -1,8 +1,8 @@ -HXCOMM Use DEFHEADING() to define headings in both help text and rST. -HXCOMM Text between SRST and ERST is copied to the rST version and -HXCOMM discarded from C version. -HXCOMM DEF(command, args, callback, arg_string, help) is used to construct -HXCOMM monitor commands +HXCOMM See docs/devel/docs.rst for the format of this file. +HXCOMM +HXCOMM This file defines the contents of an array of HMPCommand structs +HXCOMM which specify the name, behaviour and help text for HMP commands. +HXCOMM Text between SRST and ERST is rST format documentation. HXCOMM HXCOMM can be used for comments, discarded from both rST and C. @@ -398,19 +398,6 @@ SRST If called with option off, the emulation returns to normal mode. ERST - { - .name = "singlestep", - .args_type = "option:s?", - .params = "[on|off]", - .help = "deprecated synonym for one-insn-per-tb", - .cmd = hmp_one_insn_per_tb, - }, - -SRST -``singlestep [off]`` - This is a deprecated synonym for the one-insn-per-tb command. -ERST - { .name = "stop|s", .args_type = "", @@ -1425,7 +1412,7 @@ ERST { .name = "watchdog_action", .args_type = "action:s", - .params = "[reset|shutdown|poweroff|pause|debug|none]", + .params = "[reset|shutdown|poweroff|pause|debug|none|inject-nmi]", .help = "change watchdog action", .cmd = hmp_watchdog_action, .command_completion = watchdog_action_completion, diff --git a/hw/9pfs/meson.build b/hw/9pfs/meson.build index 2944ea63c38..f1b62fa8c80 100644 --- a/hw/9pfs/meson.build +++ b/hw/9pfs/meson.build @@ -13,8 +13,11 @@ fs_ss.add(files( 'coth.c', 'coxattr.c', )) -fs_ss.add(when: 'CONFIG_LINUX', if_true: files('9p-util-linux.c')) -fs_ss.add(when: 'CONFIG_DARWIN', if_true: files('9p-util-darwin.c')) +if host_os == 'darwin' + fs_ss.add(files('9p-util-darwin.c')) +elif host_os == 'linux' + fs_ss.add(files('9p-util-linux.c')) +endif fs_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-9p-backend.c')) system_ss.add_all(when: 'CONFIG_FSDEV_9P', if_true: fs_ss) diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c index 5f522e68e9f..efa41cfd73f 100644 --- a/hw/9pfs/virtio-9p-device.c +++ b/hw/9pfs/virtio-9p-device.c @@ -237,7 +237,7 @@ static const VMStateDescription vmstate_virtio_9p = { .name = "virtio-9p", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/Kconfig b/hw/Kconfig index 9ca7b38c31f..2c00936c28e 100644 --- a/hw/Kconfig +++ b/hw/Kconfig @@ -9,6 +9,7 @@ source core/Kconfig source cxl/Kconfig source display/Kconfig source dma/Kconfig +source fsi/Kconfig source gpio/Kconfig source hyperv/Kconfig source i2c/Kconfig diff --git a/hw/acpi/acpi_generic_initiator.c b/hw/acpi/acpi_generic_initiator.c new file mode 100644 index 00000000000..17b9a052f59 --- /dev/null +++ b/hw/acpi/acpi_generic_initiator.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#include "qemu/osdep.h" +#include "hw/acpi/acpi_generic_initiator.h" +#include "hw/acpi/aml-build.h" +#include "hw/boards.h" +#include "hw/pci/pci_device.h" +#include "qemu/error-report.h" + +typedef struct AcpiGenericInitiatorClass { + ObjectClass parent_class; +} AcpiGenericInitiatorClass; + +OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericInitiator, acpi_generic_initiator, + ACPI_GENERIC_INITIATOR, OBJECT, + { TYPE_USER_CREATABLE }, + { NULL }) + +OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericInitiator, ACPI_GENERIC_INITIATOR) + +static void acpi_generic_initiator_init(Object *obj) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + gi->node = MAX_NODES; + gi->pci_dev = NULL; +} + +static void acpi_generic_initiator_finalize(Object *obj) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + g_free(gi->pci_dev); +} + +static void acpi_generic_initiator_set_pci_device(Object *obj, const char *val, + Error **errp) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + gi->pci_dev = g_strdup(val); +} + +static void acpi_generic_initiator_set_node(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + MachineState *ms = MACHINE(qdev_get_machine()); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + if (value >= MAX_NODES) { + error_printf("%s: Invalid NUMA node specified\n", + TYPE_ACPI_GENERIC_INITIATOR); + exit(1); + } + + gi->node = value; + ms->numa_state->nodes[gi->node].has_gi = true; +} + +static void acpi_generic_initiator_class_init(ObjectClass *oc, void *data) +{ + object_class_property_add_str(oc, "pci-dev", NULL, + acpi_generic_initiator_set_pci_device); + object_class_property_add(oc, "node", "int", NULL, + acpi_generic_initiator_set_node, NULL, NULL); +} + +/* + * ACPI 6.3: + * Table 5-78 Generic Initiator Affinity Structure + */ +static void +build_srat_generic_pci_initiator_affinity(GArray *table_data, int node, + PCIDeviceHandle *handle) +{ + uint8_t index; + + build_append_int_noprefix(table_data, 5, 1); /* Type */ + build_append_int_noprefix(table_data, 32, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + build_append_int_noprefix(table_data, 1, 1); /* Device Handle Type: PCI */ + build_append_int_noprefix(table_data, node, 4); /* Proximity Domain */ + + /* Device Handle - PCI */ + build_append_int_noprefix(table_data, handle->segment, 2); + build_append_int_noprefix(table_data, handle->bdf, 2); + for (index = 0; index < 12; index++) { + build_append_int_noprefix(table_data, 0, 1); + } + + build_append_int_noprefix(table_data, GEN_AFFINITY_ENABLED, 4); /* Flags */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ +} + +static int build_all_acpi_generic_initiators(Object *obj, void *opaque) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + AcpiGenericInitiator *gi; + GArray *table_data = opaque; + PCIDeviceHandle dev_handle; + PCIDevice *pci_dev; + Object *o; + + if (!object_dynamic_cast(obj, TYPE_ACPI_GENERIC_INITIATOR)) { + return 0; + } + + gi = ACPI_GENERIC_INITIATOR(obj); + if (gi->node >= ms->numa_state->num_nodes) { + error_printf("%s: Specified node %d is invalid.\n", + TYPE_ACPI_GENERIC_INITIATOR, gi->node); + exit(1); + } + + o = object_resolve_path_type(gi->pci_dev, TYPE_PCI_DEVICE, NULL); + if (!o) { + error_printf("%s: Specified device must be a PCI device.\n", + TYPE_ACPI_GENERIC_INITIATOR); + exit(1); + } + + pci_dev = PCI_DEVICE(o); + + dev_handle.segment = 0; + dev_handle.bdf = PCI_BUILD_BDF(pci_bus_num(pci_get_bus(pci_dev)), + pci_dev->devfn); + + build_srat_generic_pci_initiator_affinity(table_data, + gi->node, &dev_handle); + + return 0; +} + +void build_srat_generic_pci_initiator(GArray *table_data) +{ + object_child_foreach_recursive(object_get_root(), + build_all_acpi_generic_initiators, + table_data); +} diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c index af66bde0f55..6d4517cfbe3 100644 --- a/hw/acpi/aml-build.c +++ b/hw/acpi/aml-build.c @@ -1994,6 +1994,59 @@ static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, } } +void build_spcr(GArray *table_data, BIOSLinker *linker, + const AcpiSpcrData *f, const uint8_t rev, + const char *oem_id, const char *oem_table_id) +{ + AcpiTable table = { .sig = "SPCR", .rev = rev, .oem_id = oem_id, + .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + /* Interface type */ + build_append_int_noprefix(table_data, f->interface_type, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 3); + /* Base Address */ + build_append_gas(table_data, f->base_addr.id, f->base_addr.width, + f->base_addr.offset, f->base_addr.size, + f->base_addr.addr); + /* Interrupt type */ + build_append_int_noprefix(table_data, f->interrupt_type, 1); + /* IRQ */ + build_append_int_noprefix(table_data, f->pc_interrupt, 1); + /* Global System Interrupt */ + build_append_int_noprefix(table_data, f->interrupt, 4); + /* Baud Rate */ + build_append_int_noprefix(table_data, f->baud_rate, 1); + /* Parity */ + build_append_int_noprefix(table_data, f->parity, 1); + /* Stop Bits */ + build_append_int_noprefix(table_data, f->stop_bits, 1); + /* Flow Control */ + build_append_int_noprefix(table_data, f->flow_control, 1); + /* Language */ + build_append_int_noprefix(table_data, f->language, 1); + /* Terminal Type */ + build_append_int_noprefix(table_data, f->terminal_type, 1); + /* PCI Device ID */ + build_append_int_noprefix(table_data, f->pci_device_id, 2); + /* PCI Vendor ID */ + build_append_int_noprefix(table_data, f->pci_vendor_id, 2); + /* PCI Bus Number */ + build_append_int_noprefix(table_data, f->pci_bus, 1); + /* PCI Device Number */ + build_append_int_noprefix(table_data, f->pci_device, 1); + /* PCI Function Number */ + build_append_int_noprefix(table_data, f->pci_function, 1); + /* PCI Flags */ + build_append_int_noprefix(table_data, f->pci_flags, 4); + /* PCI Segment */ + build_append_int_noprefix(table_data, f->pci_segment, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); + + acpi_table_end(linker, &table); +} /* * ACPI spec, Revision 6.3 * 5.2.29 Processor Properties Topology Table (PPTT) diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c index 011d2c6c2dd..2d81c1e7908 100644 --- a/hw/acpi/cpu.c +++ b/hw/acpi/cpu.c @@ -297,7 +297,7 @@ static const VMStateDescription vmstate_cpuhp_sts = { .name = "CPU hotplug device state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(is_inserting, AcpiCpuStatus), VMSTATE_BOOL(is_removing, AcpiCpuStatus), VMSTATE_UINT32(ost_event, AcpiCpuStatus), @@ -310,7 +310,7 @@ const VMStateDescription vmstate_cpu_hotplug = { .name = "CPU hotplug state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(selector, CPUHotplugState), VMSTATE_UINT8(command, CPUHotplugState), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, CPUHotplugState, dev_count, diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c index 634bbecb319..83b8bc5deb8 100644 --- a/hw/acpi/cpu_hotplug.c +++ b/hw/acpi/cpu_hotplug.c @@ -13,8 +13,8 @@ #include "hw/acpi/cpu_hotplug.h" #include "qapi/error.h" #include "hw/core/cpu.h" -#include "hw/i386/pc.h" -#include "hw/pci/pci.h" +#include "hw/i386/x86.h" +#include "hw/pci/pci_device.h" #include "qemu/error-report.h" #define CPU_EJECT_METHOD "CPEJ" @@ -59,7 +59,8 @@ static const MemoryRegionOps AcpiCpuHotplug_ops = { }, }; -static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu) +static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu, + bool *swtchd_to_modern) { CPUClass *k = CPU_GET_CLASS(cpu); int64_t cpu_id; @@ -68,23 +69,34 @@ static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu) if ((cpu_id / 8) >= ACPI_GPE_PROC_LEN) { object_property_set_bool(g->device, "cpu-hotplug-legacy", false, &error_abort); + *swtchd_to_modern = true; return; } + *swtchd_to_modern = false; g->sts[cpu_id / 8] |= (1 << (cpu_id % 8)); } void legacy_acpi_cpu_plug_cb(HotplugHandler *hotplug_dev, AcpiCpuHotplug *g, DeviceState *dev, Error **errp) { - acpi_set_cpu_present_bit(g, CPU(dev)); - acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS); + bool swtchd_to_modern; + Error *local_err = NULL; + + acpi_set_cpu_present_bit(g, CPU(dev), &swtchd_to_modern); + if (swtchd_to_modern) { + /* propagate the hotplug to the modern interface */ + hotplug_handler_plug(hotplug_dev, dev, &local_err); + } else { + acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS); + } } void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner, AcpiCpuHotplug *gpe_cpu, uint16_t base) { CPUState *cpu; + bool swtchd_to_modern; memory_region_init_io(&gpe_cpu->io, owner, &AcpiCpuHotplug_ops, gpe_cpu, "acpi-cpu-hotplug", ACPI_GPE_PROC_LEN); @@ -92,7 +104,7 @@ void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner, gpe_cpu->device = owner; CPU_FOREACH(cpu) { - acpi_set_cpu_present_bit(gpe_cpu, cpu); + acpi_set_cpu_present_bit(gpe_cpu, cpu, &swtchd_to_modern); } } diff --git a/hw/acpi/erst.c b/hw/acpi/erst.c index ba751dc60e2..b2f1b136301 100644 --- a/hw/acpi/erst.c +++ b/hw/acpi/erst.c @@ -932,7 +932,7 @@ static const VMStateDescription erst_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = erst_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(operation, ERSTDeviceState), VMSTATE_UINT8(busy_status, ERSTDeviceState), VMSTATE_UINT8(command_status, ERSTDeviceState), diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c index a3d31631fe0..2d6e91b124e 100644 --- a/hw/acpi/generic_event_device.c +++ b/hw/acpi/generic_event_device.c @@ -312,7 +312,7 @@ static const VMStateDescription vmstate_memhp_state = { .name = "acpi-ged/memhp", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_MEMORY_HOTPLUG(memhp_state, AcpiGedState), VMSTATE_END_OF_LIST() } @@ -322,7 +322,7 @@ static const VMStateDescription vmstate_ged_state = { .name = "acpi-ged-state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(sel, GEDState), VMSTATE_END_OF_LIST() } @@ -332,7 +332,7 @@ static const VMStateDescription vmstate_ghes = { .name = "acpi-ghes", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ghes_addr_le, AcpiGhesState), VMSTATE_END_OF_LIST() }, @@ -349,7 +349,7 @@ static const VMStateDescription vmstate_ghes_state = { .version_id = 1, .minimum_version_id = 1, .needed = ghes_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(ghes_state, AcpiGedState, 1, vmstate_ghes, AcpiGhesState), VMSTATE_END_OF_LIST() @@ -360,11 +360,11 @@ static const VMStateDescription vmstate_acpi_ged = { .name = "acpi-ged", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(ged_state, AcpiGedState, 1, vmstate_ged_state, GEDState), VMSTATE_END_OF_LIST(), }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_memhp_state, &vmstate_ghes_state, NULL diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c index 3042d223c82..9b1662b6b8a 100644 --- a/hw/acpi/hmat.c +++ b/hw/acpi/hmat.c @@ -78,6 +78,7 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, uint32_t *initiator_list) { int i, index; + uint32_t initiator_to_index[MAX_NODES] = {}; HMAT_LB_Data *lb_data; uint16_t *entry_list; uint32_t base; @@ -121,6 +122,8 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, /* Initiator Proximity Domain List */ for (i = 0; i < num_initiator; i++) { build_append_int_noprefix(table_data, initiator_list[i], 4); + /* Reverse mapping for array possitions */ + initiator_to_index[initiator_list[i]] = i; } /* Target Proximity Domain List */ @@ -132,7 +135,8 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, entry_list = g_new0(uint16_t, num_initiator * num_target); for (i = 0; i < hmat_lb->list->len; i++) { lb_data = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); - index = lb_data->initiator * num_target + lb_data->target; + index = initiator_to_index[lb_data->initiator] * num_target + + lb_data->target; entry_list[index] = (uint16_t)(lb_data->data / hmat_lb->base); } @@ -204,6 +208,13 @@ static void hmat_build_table_structs(GArray *table_data, NumaState *numa_state) build_append_int_noprefix(table_data, 0, 4); /* Reserved */ for (i = 0; i < numa_state->num_nodes; i++) { + /* + * Linux rejects whole HMAT table if a node with no memory + * has one of these structures listing it as a target. + */ + if (!numa_state->nodes[i].node_mem) { + continue; + } flags = 0; if (numa_state->nodes[i].initiator < MAX_NODES) { @@ -214,7 +225,7 @@ static void hmat_build_table_structs(GArray *table_data, NumaState *numa_state) } for (i = 0; i < numa_state->num_nodes; i++) { - if (numa_state->nodes[i].has_cpu) { + if (numa_state->nodes[i].has_cpu || numa_state->nodes[i].has_gi) { initiator_list[num_initiator++] = i; } } diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c index 25e2c7243e0..573d032e8e5 100644 --- a/hw/acpi/ich9.c +++ b/hw/acpi/ich9.c @@ -164,7 +164,7 @@ static const VMStateDescription vmstate_memhp_state = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_test_use_memhp, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, ICH9LPCPMRegs), VMSTATE_END_OF_LIST() } @@ -181,7 +181,7 @@ static const VMStateDescription vmstate_tco_io_state = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_test_use_tco, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(tco_regs, ICH9LPCPMRegs, 1, vmstate_tco_io_sts, TCOIORegs), VMSTATE_END_OF_LIST() @@ -208,7 +208,7 @@ static const VMStateDescription vmstate_cpuhp_state = { .minimum_version_id = 1, .needed = vmstate_test_use_cpuhp, .pre_load = vmstate_cpuhp_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CPU_HOTPLUG(cpuhp_state, ICH9LPCPMRegs), VMSTATE_END_OF_LIST() } @@ -226,7 +226,7 @@ static const VMStateDescription vmstate_pcihp_state = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_test_use_pcihp, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_HOTPLUG(acpi_pci_hotplug, ICH9LPCPMRegs, NULL, NULL), @@ -239,7 +239,7 @@ const VMStateDescription vmstate_ich9_pm = { .version_id = 1, .minimum_version_id = 1, .post_load = ich9_pm_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(acpi_regs.pm1.evt.sts, ICH9LPCPMRegs), VMSTATE_UINT16(acpi_regs.pm1.evt.en, ICH9LPCPMRegs), VMSTATE_UINT16(acpi_regs.pm1.cnt.cnt, ICH9LPCPMRegs), @@ -251,7 +251,7 @@ const VMStateDescription vmstate_ich9_pm = { VMSTATE_UINT32(smi_sts, ICH9LPCPMRegs), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_memhp_state, &vmstate_tco_io_state, &vmstate_cpuhp_state, diff --git a/hw/acpi/ich9_tco.c b/hw/acpi/ich9_tco.c index 1540f4fd461..81606219f73 100644 --- a/hw/acpi/ich9_tco.c +++ b/hw/acpi/ich9_tco.c @@ -254,7 +254,7 @@ const VMStateDescription vmstate_tco_io_sts = { .name = "tco io device status", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(tco.rld, TCOIORegs), VMSTATE_UINT8(tco.din, TCOIORegs), VMSTATE_UINT8(tco.dout, TCOIORegs), diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c index 0b883df8133..de6f974ebba 100644 --- a/hw/acpi/memory_hotplug.c +++ b/hw/acpi/memory_hotplug.c @@ -317,7 +317,7 @@ static const VMStateDescription vmstate_memhp_sts = { .name = "memory hotplug device state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(is_enabled, MemStatus), VMSTATE_BOOL(is_inserting, MemStatus), VMSTATE_UINT32(ost_event, MemStatus), @@ -330,7 +330,7 @@ const VMStateDescription vmstate_memory_hotplug = { .name = "memory hotplug state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(selector, MemHotplugState), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, MemHotplugState, dev_count, vmstate_memhp_sts, MemStatus), diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build index fc1b952379a..fa5c07db906 100644 --- a/hw/acpi/meson.build +++ b/hw/acpi/meson.build @@ -1,5 +1,6 @@ acpi_ss = ss.source_set() acpi_ss.add(files( + 'acpi_generic_initiator.c', 'acpi_interface.c', 'aml-build.c', 'bios-linker-loader.c', @@ -33,9 +34,4 @@ endif system_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c', 'acpi_interface.c')) system_ss.add(when: 'CONFIG_ACPI_PCI_BRIDGE', if_false: files('pci-bridge-stub.c')) system_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss) -system_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c', 'aml-build-stub.c', - 'acpi-x86-stub.c', 'ipmi-stub.c', 'ghes-stub.c', - 'acpi-mem-hotplug-stub.c', 'acpi-cpu-hotplug-stub.c', - 'acpi-pci-hotplug-stub.c', 'acpi-nvdimm-stub.c', - 'cxl-stub.c', 'pci-bridge-stub.c')) system_ss.add(files('acpi-qmp-cmds.c')) diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index 4f75c873e26..5f79c9016b4 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -517,7 +517,7 @@ const VMStateDescription vmstate_acpi_pcihp_pci_status = { .name = "acpi_pcihp_pci_status", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(up, AcpiPciHpPciStatus), VMSTATE_UINT32(down, AcpiPciHpPciStatus), VMSTATE_END_OF_LIST() diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c index dd523d2e4ce..debe1adb846 100644 --- a/hw/acpi/piix4.c +++ b/hw/acpi/piix4.c @@ -147,7 +147,7 @@ static const VMStateDescription vmstate_gpe = { .name = "gpe", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_GPE_ARRAY(sts, ACPIGPE), VMSTATE_GPE_ARRAY(en, ACPIGPE), VMSTATE_END_OF_LIST() @@ -158,7 +158,7 @@ static const VMStateDescription vmstate_pci_status = { .name = "pci_status", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(up, struct AcpiPciHpPciStatus), VMSTATE_UINT32(down, struct AcpiPciHpPciStatus), VMSTATE_END_OF_LIST() @@ -189,7 +189,7 @@ static const VMStateDescription vmstate_memhp_state = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_test_use_memhp, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, PIIX4PMState), VMSTATE_END_OF_LIST() } @@ -214,7 +214,7 @@ static const VMStateDescription vmstate_cpuhp_state = { .minimum_version_id = 1, .needed = vmstate_test_use_cpuhp, .pre_load = vmstate_cpuhp_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CPU_HOTPLUG(cpuhp_state, PIIX4PMState), VMSTATE_END_OF_LIST() } @@ -247,7 +247,7 @@ static const VMStateDescription vmstate_acpi = { .version_id = 3, .minimum_version_id = 3, .post_load = vmstate_acpi_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PIIX4PMState), VMSTATE_UINT16(ar.pm1.evt.sts, PIIX4PMState), VMSTATE_UINT16(ar.pm1.evt.en, PIIX4PMState), @@ -269,7 +269,7 @@ static const VMStateDescription vmstate_acpi = { vmstate_test_migrate_acpi_index), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_memhp_state, &vmstate_cpuhp_state, NULL diff --git a/hw/acpi/vmgenid.c b/hw/acpi/vmgenid.c index a39315c1b35..e63c8af4c3f 100644 --- a/hw/acpi/vmgenid.c +++ b/hw/acpi/vmgenid.c @@ -178,7 +178,7 @@ static const VMStateDescription vmstate_vmgenid = { .version_id = 1, .minimum_version_id = 1, .post_load = vmgenid_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(vmgenid_addr_le, VmGenIdState, sizeof(uint64_t)), VMSTATE_END_OF_LIST() }, diff --git a/hw/adc/aspeed_adc.c b/hw/adc/aspeed_adc.c index 0d296631295..68bdbc73b0e 100644 --- a/hw/adc/aspeed_adc.c +++ b/hw/adc/aspeed_adc.c @@ -280,7 +280,7 @@ static const VMStateDescription vmstate_aspeed_adc_engine = { .name = TYPE_ASPEED_ADC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedADCEngineState, ASPEED_ADC_NR_REGS), VMSTATE_END_OF_LIST(), } diff --git a/hw/adc/max111x.c b/hw/adc/max111x.c index e8bf4cccd44..957d177e1ce 100644 --- a/hw/adc/max111x.c +++ b/hw/adc/max111x.c @@ -96,7 +96,7 @@ static const VMStateDescription vmstate_max111x = { .name = "max111x", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SSI_PERIPHERAL(parent_obj, MAX111xState), VMSTATE_UINT8(tb1, MAX111xState), VMSTATE_UINT8(rb2, MAX111xState), diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c index bc6f3f55e64..c6647eec6d7 100644 --- a/hw/adc/npcm7xx_adc.c +++ b/hw/adc/npcm7xx_adc.c @@ -253,7 +253,7 @@ static const VMStateDescription vmstate_npcm7xx_adc = { .name = "npcm7xx-adc", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER(conv_timer, NPCM7xxADCState), VMSTATE_UINT32(con, NPCM7xxADCState), VMSTATE_UINT32(data, NPCM7xxADCState), diff --git a/hw/adc/stm32f2xx_adc.c b/hw/adc/stm32f2xx_adc.c index 01a0b14e69d..e9df6ea53f3 100644 --- a/hw/adc/stm32f2xx_adc.c +++ b/hw/adc/stm32f2xx_adc.c @@ -254,7 +254,7 @@ static const VMStateDescription vmstate_stm32f2xx_adc = { .name = TYPE_STM32F2XX_ADC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(adc_sr, STM32F2XXADCState), VMSTATE_UINT32(adc_cr1, STM32F2XXADCState), VMSTATE_UINT32(adc_cr2, STM32F2XXADCState), diff --git a/hw/adc/zynq-xadc.c b/hw/adc/zynq-xadc.c index 032e19cbd0a..34268319a40 100644 --- a/hw/adc/zynq-xadc.c +++ b/hw/adc/zynq-xadc.c @@ -269,7 +269,7 @@ static const VMStateDescription vmstate_zynq_xadc = { .name = "zynq-xadc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, ZynqXADCState, ZYNQ_XADC_NUM_IO_REGS), VMSTATE_UINT16_ARRAY(xadc_regs, ZynqXADCState, ZYNQ_XADC_NUM_ADC_REGS), diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c index 03495e1e606..52a1fa310b9 100644 --- a/hw/alpha/dp264.c +++ b/hw/alpha/dp264.c @@ -124,9 +124,7 @@ static void clipper_init(MachineState *machine) pci_vga_init(pci_bus); /* Network setup. e1000 is good enough, failing Tulip support. */ - for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); - } + pci_init_nic_devices(pci_bus, mc->default_nic); /* Super I/O */ isa_create_simple(isa_bus, TYPE_SMC37C669_SUPERIO); diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index 3ada335a243..893a7bff66b 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -8,6 +8,7 @@ config ARM_VIRT imply TPM_TIS_SYSBUS imply TPM_TIS_I2C imply NVDIMM + imply IOMMUFD select ARM_GIC select ACPI select ARM_SMMUV3 @@ -105,6 +106,11 @@ config MAINSTONE select PFLASH_CFI01 select SMC91C111 +config MPS3R + bool + default y + depends on TCG && ARM + config MUSCA bool default y @@ -179,7 +185,7 @@ config PXA2XX select SERIAL select SD select SSI - select USB_OHCI + select USB_OHCI_SYSBUS select PCMCIA config GUMSTIX @@ -250,7 +256,7 @@ config REALVIEW select PL310 # cache controller select ARM_SBCON_I2C select DS1338 # I2C RTC+NVRAM - select USB_OHCI + select USB_OHCI_SYSBUS config SBSA_REF bool @@ -269,6 +275,8 @@ config SBSA_REF select USB_XHCI_SYSBUS select WDT_SBSA select BOCHS_DISPLAY + select IDE_BUS + select IDE_DEV config SABRELITE bool @@ -331,7 +339,7 @@ config VERSATILE select PL080 # DMA controller select PL190 # Vector PIC select REALVIEW - select USB_OHCI + select USB_OHCI_SYSBUS config VEXPRESS bool @@ -387,6 +395,7 @@ config ALLWINNER_A10 select AXP2XX_PMU select SERIAL select UNIMP + select USB_OHCI_SYSBUS config ALLWINNER_H3 bool @@ -400,20 +409,24 @@ config ALLWINNER_H3 select ARM_TIMER select ARM_GIC select UNIMP - select USB_OHCI + select USB_OHCI_SYSBUS select USB_EHCI_SYSBUS select SD config ALLWINNER_R40 bool default y if TCG && ARM + select AHCI select ALLWINNER_SRAMC select ALLWINNER_A10_PIT + select ALLWINNER_WDT select AXP2XX_PMU select SERIAL select ARM_TIMER select ARM_GIC select UNIMP + select USB_OHCI_SYSBUS + select USB_EHCI_SYSBUS select SD config RASPI @@ -424,6 +437,8 @@ config RASPI select PL011 # UART select SDHCI select USB_DWC2 + select BCM2835_SPI + select BCM2835_I2C config STM32F100_SOC bool @@ -448,6 +463,21 @@ config STM32F405_SOC select STM32F4XX_SYSCFG select STM32F4XX_EXTI +config B_L475E_IOT01A + bool + default y + depends on TCG && ARM + select STM32L4X5_SOC + +config STM32L4X5_SOC + bool + select ARM_V7M + select OR_IRQ + select STM32L4X5_EXTI + select STM32L4X5_SYSCFG + select STM32L4X5_RCC + select STM32L4X5_GPIO + config XLNX_ZYNQMP_ARM bool default y if PIXMAN @@ -504,6 +534,7 @@ config NPCM7XX select SSI select UNIMP select PCA954X + select USB_OHCI_SYSBUS config FSL_IMX25 bool @@ -529,6 +560,7 @@ config FSL_IMX31 config FSL_IMX6 bool + imply PCIE_DEVICES imply I2C_DEVICES select A9MPCORE select IMX @@ -536,6 +568,8 @@ config FSL_IMX6 select IMX_I2C select IMX_USBPHY select WDT_IMX2 + select PL310 # cache controller + select PCI_EXPRESS_DESIGNWARE select SDHCI config ASPEED_SOC @@ -559,6 +593,7 @@ config ASPEED_SOC select LED select PMBUS select MAX31785 + select FSI_APB2OPB_ASPEED config MPS2 bool diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c index b0ea3f7f662..57d5d80159c 100644 --- a/hw/arm/allwinner-a10.c +++ b/hw/arm/allwinner-a10.c @@ -26,6 +26,7 @@ #include "hw/boards.h" #include "hw/usb/hcd-ohci.h" #include "hw/loader.h" +#include "target/arm/cpu-qom.h" #define AW_A10_SRAM_A_BASE 0x00000000 #define AW_A10_DRAMC_BASE 0x01c01000 @@ -79,15 +80,10 @@ static void aw_a10_init(Object *obj) object_initialize_child(obj, "i2c0", &s->i2c0, TYPE_AW_I2C); - if (machine_usb(current_machine)) { - int i; - - for (i = 0; i < AW_A10_NUM_USB; i++) { - object_initialize_child(obj, "ehci[*]", &s->ehci[i], - TYPE_PLATFORM_EHCI); - object_initialize_child(obj, "ohci[*]", &s->ohci[i], - TYPE_SYSBUS_OHCI); - } + for (size_t i = 0; i < AW_A10_NUM_USB; i++) { + object_initialize_child(obj, "ehci[*]", &s->ehci[i], + TYPE_PLATFORM_EHCI); + object_initialize_child(obj, "ohci[*]", &s->ohci[i], TYPE_SYSBUS_OHCI); } object_initialize_child(obj, "mmc0", &s->mmc0, TYPE_AW_SDHOST_SUN4I); @@ -142,11 +138,7 @@ static void aw_a10_realize(DeviceState *dev, Error **errp) sysbus_realize(SYS_BUS_DEVICE(&s->dramc), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 0, AW_A10_DRAMC_BASE); - /* FIXME use qdev NIC properties instead of nd_table[] */ - if (nd_table[0].used) { - qemu_check_nic_model(&nd_table[0], TYPE_AW_EMAC); - qdev_set_nic_properties(DEVICE(&s->emac), &nd_table[0]); - } + qemu_configure_nic_device(DEVICE(&s->emac), true, NULL); if (!sysbus_realize(SYS_BUS_DEVICE(&s->emac), errp)) { return; } @@ -165,28 +157,24 @@ static void aw_a10_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in(dev, 1), 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); - if (machine_usb(current_machine)) { - int i; - - for (i = 0; i < AW_A10_NUM_USB; i++) { - g_autofree char *bus = g_strdup_printf("usb-bus.%d", i); - - object_property_set_bool(OBJECT(&s->ehci[i]), "companion-enable", - true, &error_fatal); - sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), &error_fatal); - sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, - AW_A10_EHCI_BASE + i * 0x8000); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, - qdev_get_gpio_in(dev, 39 + i)); - - object_property_set_str(OBJECT(&s->ohci[i]), "masterbus", bus, - &error_fatal); - sysbus_realize(SYS_BUS_DEVICE(&s->ohci[i]), &error_fatal); - sysbus_mmio_map(SYS_BUS_DEVICE(&s->ohci[i]), 0, - AW_A10_OHCI_BASE + i * 0x8000); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->ohci[i]), 0, - qdev_get_gpio_in(dev, 64 + i)); - } + for (size_t i = 0; i < AW_A10_NUM_USB; i++) { + g_autofree char *bus = g_strdup_printf("usb-bus.%zu", i); + + object_property_set_bool(OBJECT(&s->ehci[i]), "companion-enable", + true, &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, + AW_A10_EHCI_BASE + i * 0x8000); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, + qdev_get_gpio_in(dev, 39 + i)); + + object_property_set_str(OBJECT(&s->ohci[i]), "masterbus", bus, + &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->ohci[i]), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ohci[i]), 0, + AW_A10_OHCI_BASE + i * 0x8000); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ohci[i]), 0, + qdev_get_gpio_in(dev, 64 + i)); } /* SD/MMC */ diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c index f05afddf7e0..6870c3fe963 100644 --- a/hw/arm/allwinner-h3.c +++ b/hw/arm/allwinner-h3.c @@ -30,6 +30,8 @@ #include "hw/loader.h" #include "sysemu/sysemu.h" #include "hw/arm/allwinner-h3.h" +#include "target/arm/cpu-qom.h" +#include "target/arm/gtimer.h" /* Memory map */ const hwaddr allwinner_h3_memmap[] = { @@ -369,11 +371,7 @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp) "sd-bus"); /* EMAC */ - /* FIXME use qdev NIC properties instead of nd_table[] */ - if (nd_table[0].used) { - qemu_check_nic_model(&nd_table[0], TYPE_AW_SUN8I_EMAC); - qdev_set_nic_properties(DEVICE(&s->emac), &nd_table[0]); - } + qemu_configure_nic_device(DEVICE(&s->emac), true, NULL); object_property_set_link(OBJECT(&s->emac), "dma-memory", OBJECT(get_system_memory()), &error_fatal); sysbus_realize(SYS_BUS_DEVICE(&s->emac), &error_fatal); diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c index a0d367c60d1..b8c72021334 100644 --- a/hw/arm/allwinner-r40.c +++ b/hw/arm/allwinner-r40.c @@ -23,6 +23,7 @@ #include "qemu/bswap.h" #include "qemu/module.h" #include "qemu/units.h" +#include "hw/boards.h" #include "hw/qdev-core.h" #include "hw/sysbus.h" #include "hw/char/serial.h" @@ -32,6 +33,8 @@ #include "sysemu/sysemu.h" #include "hw/arm/allwinner-r40.h" #include "hw/misc/allwinner-r40-dramc.h" +#include "target/arm/cpu-qom.h" +#include "target/arm/gtimer.h" /* Memory map */ const hwaddr allwinner_r40_memmap[] = { @@ -45,8 +48,14 @@ const hwaddr allwinner_r40_memmap[] = { [AW_R40_DEV_MMC1] = 0x01c10000, [AW_R40_DEV_MMC2] = 0x01c11000, [AW_R40_DEV_MMC3] = 0x01c12000, + [AW_R40_DEV_AHCI] = 0x01c18000, + [AW_R40_DEV_EHCI1] = 0x01c19000, + [AW_R40_DEV_OHCI1] = 0x01c19400, + [AW_R40_DEV_EHCI2] = 0x01c1c000, + [AW_R40_DEV_OHCI2] = 0x01c1c400, [AW_R40_DEV_CCU] = 0x01c20000, [AW_R40_DEV_PIT] = 0x01c20c00, + [AW_R40_DEV_WDT] = 0x01c20c90, [AW_R40_DEV_UART0] = 0x01c28000, [AW_R40_DEV_UART1] = 0x01c28400, [AW_R40_DEV_UART2] = 0x01c28800, @@ -88,10 +97,9 @@ static struct AwR40Unimplemented r40_unimplemented[] = { { "usb0-host", 0x01c14000, 4 * KiB }, { "crypto", 0x01c15000, 4 * KiB }, { "spi2", 0x01c17000, 4 * KiB }, - { "sata", 0x01c18000, 4 * KiB }, - { "usb1-host", 0x01c19000, 4 * KiB }, + { "usb1-phy", 0x01c19800, 2 * KiB }, { "sid", 0x01c1b000, 4 * KiB }, - { "usb2-host", 0x01c1c000, 4 * KiB }, + { "usb2-phy", 0x01c1c800, 2 * KiB }, { "cs1", 0x01c1d000, 4 * KiB }, { "spi3", 0x01c1f000, 4 * KiB }, { "rtc", 0x01c20400, 1 * KiB }, @@ -181,6 +189,11 @@ enum { AW_R40_GIC_SPI_MMC2 = 34, AW_R40_GIC_SPI_MMC3 = 35, AW_R40_GIC_SPI_EMAC = 55, + AW_R40_GIC_SPI_AHCI = 56, + AW_R40_GIC_SPI_OHCI1 = 64, + AW_R40_GIC_SPI_OHCI2 = 65, + AW_R40_GIC_SPI_EHCI1 = 76, + AW_R40_GIC_SPI_EHCI2 = 78, AW_R40_GIC_SPI_GMAC = 85, }; @@ -269,6 +282,8 @@ static void allwinner_r40_init(Object *obj) object_property_add_alias(obj, "clk1-freq", OBJECT(&s->timer), "clk1-freq"); + object_initialize_child(obj, "wdt", &s->wdt, TYPE_AW_WDT_SUN4I); + object_initialize_child(obj, "ccu", &s->ccu, TYPE_AW_R40_CCU); for (int i = 0; i < AW_R40_NUM_MMCS; i++) { @@ -276,6 +291,15 @@ static void allwinner_r40_init(Object *obj) TYPE_AW_SDHOST_SUN50I_A64); } + object_initialize_child(obj, "sata", &s->sata, TYPE_ALLWINNER_AHCI); + + for (size_t i = 0; i < AW_R40_NUM_USB; i++) { + object_initialize_child(obj, "ehci[*]", &s->ehci[i], + TYPE_PLATFORM_EHCI); + object_initialize_child(obj, "ohci[*]", &s->ohci[i], + TYPE_SYSBUS_OHCI); + } + object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C_SUN6I); object_initialize_child(obj, "emac", &s->emac, TYPE_AW_EMAC); @@ -294,7 +318,6 @@ static void allwinner_r40_init(Object *obj) static void allwinner_r40_realize(DeviceState *dev, Error **errp) { - const char *r40_nic_models[] = { "gmac", "emac", NULL }; AwR40State *s = AW_R40(dev); /* CPUs */ @@ -407,6 +430,40 @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) sysbus_realize(SYS_BUS_DEVICE(&s->ccu), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccu), 0, s->memmap[AW_R40_DEV_CCU]); + /* SATA / AHCI */ + sysbus_realize(SYS_BUS_DEVICE(&s->sata), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sata), 0, + allwinner_r40_memmap[AW_R40_DEV_AHCI]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->sata), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_AHCI)); + + /* USB */ + for (size_t i = 0; i < AW_R40_NUM_USB; i++) { + g_autofree char *bus = g_strdup_printf("usb-bus.%zu", i); + + object_property_set_bool(OBJECT(&s->ehci[i]), "companion-enable", true, + &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, + allwinner_r40_memmap[i ? AW_R40_DEV_EHCI2 + : AW_R40_DEV_EHCI1]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, + qdev_get_gpio_in(DEVICE(&s->gic), + i ? AW_R40_GIC_SPI_EHCI2 + : AW_R40_GIC_SPI_EHCI1)); + + object_property_set_str(OBJECT(&s->ohci[i]), "masterbus", bus, + &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->ohci[i]), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ohci[i]), 0, + allwinner_r40_memmap[i ? AW_R40_DEV_OHCI2 + : AW_R40_DEV_OHCI1]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ohci[i]), 0, + qdev_get_gpio_in(DEVICE(&s->gic), + i ? AW_R40_GIC_SPI_OHCI2 + : AW_R40_GIC_SPI_OHCI1)); + } + /* SD/MMC */ for (int i = 0; i < AW_R40_NUM_MMCS; i++) { qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->gic), @@ -454,31 +511,8 @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 2, s->memmap[AW_R40_DEV_DRAMPHY]); - /* nic support gmac and emac */ - for (int i = 0; i < ARRAY_SIZE(r40_nic_models) - 1; i++) { - NICInfo *nic = &nd_table[i]; - - if (!nic->used) { - continue; - } - if (qemu_show_nic_models(nic->model, r40_nic_models)) { - exit(0); - } - - switch (qemu_find_nic_model(nic, r40_nic_models, r40_nic_models[0])) { - case 0: /* gmac */ - qdev_set_nic_properties(DEVICE(&s->gmac), nic); - break; - case 1: /* emac */ - qdev_set_nic_properties(DEVICE(&s->emac), nic); - break; - default: - exit(1); - break; - } - } - /* GMAC */ + qemu_configure_nic_device(DEVICE(&s->gmac), true, "gmac"); object_property_set_link(OBJECT(&s->gmac), "dma-memory", OBJECT(get_system_memory()), &error_fatal); sysbus_realize(SYS_BUS_DEVICE(&s->gmac), &error_fatal); @@ -487,11 +521,17 @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_GMAC)); /* EMAC */ + qemu_configure_nic_device(DEVICE(&s->emac), true, "emac"); sysbus_realize(SYS_BUS_DEVICE(&s->emac), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(&s->emac), 0, s->memmap[AW_R40_DEV_EMAC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->emac), 0, qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_EMAC)); + /* WDT */ + sysbus_realize(SYS_BUS_DEVICE(&s->wdt), &error_fatal); + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->wdt), 0, + allwinner_r40_memmap[AW_R40_DEV_WDT], 1); + /* Unimplemented devices */ for (unsigned i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) { create_unimplemented_device(r40_unimplemented[i].device_name, diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c index 31acbf73471..91502d157a9 100644 --- a/hw/arm/armsse.c +++ b/hw/arm/armsse.c @@ -1022,10 +1022,8 @@ static void armsse_realize(DeviceState *dev, Error **errp) * later if necessary. */ if (extract32(info->cpuwait_rst, i, 1)) { - if (!object_property_set_bool(cpuobj, "start-powered-off", true, - errp)) { - return; - } + object_property_set_bool(cpuobj, "start-powered-off", true, + &error_abort); } if (!s->cpu_fpu[i]) { if (!object_property_set_bool(cpuobj, "vfp", false, errp)) { @@ -1677,7 +1675,7 @@ static const VMStateDescription armsse_vmstate = { .name = "iotkit", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(mainclk, ARMSSE), VMSTATE_CLOCK(s32kclk, ARMSSE), VMSTATE_UINT32(nsccfg, ARMSSE), diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c index d10abb36a8e..7c68525a9e6 100644 --- a/hw/arm/armv7m.c +++ b/hw/arm/armv7m.c @@ -21,7 +21,9 @@ #include "qemu/module.h" #include "qemu/log.h" #include "target/arm/idau.h" +#include "target/arm/cpu.h" #include "target/arm/cpu-features.h" +#include "target/arm/cpu-qom.h" #include "migration/vmstate.h" /* Bitbanded IO. Each word corresponds to a single bit. */ @@ -256,6 +258,8 @@ static void armv7m_instance_init(Object *obj) object_initialize_child(obj, "nvic", &s->nvic, TYPE_NVIC); object_property_add_alias(obj, "num-irq", OBJECT(&s->nvic), "num-irq"); + object_property_add_alias(obj, "num-prio-bits", + OBJECT(&s->nvic), "num-prio-bits"); object_initialize_child(obj, "systick-reg-ns", &s->systick[M_REG_NS], TYPE_SYSTICK); @@ -318,12 +322,6 @@ static void armv7m_realize(DeviceState *dev, Error **errp) return; } } - if (object_property_find(OBJECT(s->cpu), "start-powered-off")) { - if (!object_property_set_bool(OBJECT(s->cpu), "start-powered-off", - s->start_powered_off, errp)) { - return; - } - } if (object_property_find(OBJECT(s->cpu), "vfp")) { if (!object_property_set_bool(OBJECT(s->cpu), "vfp", s->vfp, errp)) { return; @@ -334,6 +332,8 @@ static void armv7m_realize(DeviceState *dev, Error **errp) return; } } + object_property_set_bool(OBJECT(s->cpu), "start-powered-off", + s->start_powered_off, &error_abort); /* * Real M-profile hardware can be configured with a different number of @@ -559,7 +559,7 @@ static const VMStateDescription vmstate_armv7m = { .name = "armv7m", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(refclk, ARMv7MState), VMSTATE_CLOCK(cpuclk, ARMv7MState), VMSTATE_END_OF_LIST() diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index cc59176563a..93ca87fda26 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -18,7 +18,7 @@ #include "hw/block/flash.h" #include "hw/i2c/i2c_mux_pca954x.h" #include "hw/i2c/smbus_eeprom.h" -#include "hw/misc/pca9552.h" +#include "hw/gpio/pca9552.h" #include "hw/nvram/eeprom_at24c.h" #include "hw/sensor/tmp105.h" #include "hw/misc/led.h" @@ -289,12 +289,14 @@ static void aspeed_install_boot_rom(AspeedMachineState *bmc, BlockBackend *blk, uint64_t rom_size) { AspeedSoCState *soc = bmc->soc; + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(soc); memory_region_init_rom(&bmc->boot_rom, NULL, "aspeed.boot_rom", rom_size, &error_abort); memory_region_add_subregion_overlap(&soc->spi_boot_container, 0, &bmc->boot_rom, 1); - write_boot_rom(blk, ASPEED_SOC_SPI_BOOT_ADDR, rom_size, &error_abort); + write_boot_rom(blk, sc->memmap[ASPEED_DEV_SPI_BOOT], + rom_size, &error_abort); } void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, @@ -342,7 +344,7 @@ static void connect_serial_hds_to_uarts(AspeedMachineState *bmc) int uart_chosen = bmc->uart_chosen ? bmc->uart_chosen : amc->uart_default; aspeed_soc_uart_set_chr(s, uart_chosen, serial_hd(0)); - for (int i = 1, uart = ASPEED_DEV_UART1; i < sc->uarts_num; i++, uart++) { + for (int i = 1, uart = sc->uarts_base; i < sc->uarts_num; i++, uart++) { if (uart == uart_chosen) { continue; } @@ -356,7 +358,6 @@ static void aspeed_machine_init(MachineState *machine) AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(machine); AspeedSoCClass *sc; int i; - NICInfo *nd = &nd_table[0]; bmc->soc = ASPEED_SOC(object_new(amc->soc_name)); object_property_add_child(OBJECT(machine), "soc", OBJECT(bmc->soc)); @@ -371,10 +372,10 @@ static void aspeed_machine_init(MachineState *machine) &error_fatal); for (i = 0; i < sc->macs_num; i++) { - if ((amc->macs_mask & (1 << i)) && nd->used) { - qemu_check_nic_model(nd, TYPE_FTGMAC100); - qdev_set_nic_properties(DEVICE(&bmc->soc->ftgmac100[i]), nd); - nd++; + if ((amc->macs_mask & (1 << i)) && + !qemu_configure_nic_device(DEVICE(&bmc->soc->ftgmac100[i]), + true, NULL)) { + break; /* No configs left; stop asking */ } } @@ -1095,7 +1096,7 @@ static char *aspeed_get_bmc_console(Object *obj, Error **errp) AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(bmc); int uart_chosen = bmc->uart_chosen ? bmc->uart_chosen : amc->uart_default; - return g_strdup_printf("uart%d", uart_chosen - ASPEED_DEV_UART1 + 1); + return g_strdup_printf("uart%d", aspeed_uart_index(uart_chosen)); } static void aspeed_set_bmc_console(Object *obj, const char *value, Error **errp) @@ -1104,6 +1105,8 @@ static void aspeed_set_bmc_console(Object *obj, const char *value, Error **errp) AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(bmc); AspeedSoCClass *sc = ASPEED_SOC_CLASS(object_class_by_name(amc->soc_name)); int val; + int uart_first = aspeed_uart_first(sc); + int uart_last = aspeed_uart_last(sc); if (sscanf(value, "uart%u", &val) != 1) { error_setg(errp, "Bad value for \"uart\" property"); @@ -1111,11 +1114,12 @@ static void aspeed_set_bmc_console(Object *obj, const char *value, Error **errp) } /* The number of UART depends on the SoC */ - if (val < 1 || val > sc->uarts_num) { - error_setg(errp, "\"uart\" should be in range [1 - %d]", sc->uarts_num); + if (val < uart_first || val > uart_last) { + error_setg(errp, "\"uart\" should be in range [%d - %d]", + uart_first, uart_last); return; } - bmc->uart_chosen = ASPEED_DEV_UART1 + val - 1; + bmc->uart_chosen = val + ASPEED_DEV_UART0; } static void aspeed_machine_class_props_init(ObjectClass *oc) @@ -1141,10 +1145,15 @@ static void aspeed_machine_class_props_init(ObjectClass *oc) "Change the SPI Flash model"); } -static int aspeed_soc_num_cpus(const char *soc_name) +static void aspeed_machine_class_init_cpus_defaults(MachineClass *mc) { - AspeedSoCClass *sc = ASPEED_SOC_CLASS(object_class_by_name(soc_name)); - return sc->num_cpus; + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(mc); + AspeedSoCClass *sc = ASPEED_SOC_CLASS(object_class_by_name(amc->soc_name)); + + mc->default_cpus = sc->num_cpus; + mc->min_cpus = sc->num_cpus; + mc->max_cpus = sc->num_cpus; + mc->valid_cpu_types = sc->valid_cpu_types; } static void aspeed_machine_class_init(ObjectClass *oc, void *data) @@ -1176,8 +1185,7 @@ static void aspeed_machine_palmetto_class_init(ObjectClass *oc, void *data) amc->num_cs = 1; amc->i2c_init = palmetto_bmc_i2c_init; mc->default_ram_size = 256 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_quanta_q71l_class_init(ObjectClass *oc, void *data) @@ -1193,8 +1201,7 @@ static void aspeed_machine_quanta_q71l_class_init(ObjectClass *oc, void *data) amc->num_cs = 1; amc->i2c_init = quanta_q71l_bmc_i2c_init; mc->default_ram_size = 128 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); } static void aspeed_machine_supermicrox11_bmc_class_init(ObjectClass *oc, @@ -1212,6 +1219,7 @@ static void aspeed_machine_supermicrox11_bmc_class_init(ObjectClass *oc, amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = palmetto_bmc_i2c_init; mc->default_ram_size = 256 * MiB; + aspeed_machine_class_init_cpus_defaults(mc); } static void aspeed_machine_supermicro_x11spi_bmc_class_init(ObjectClass *oc, @@ -1229,8 +1237,7 @@ static void aspeed_machine_supermicro_x11spi_bmc_class_init(ObjectClass *oc, amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = palmetto_bmc_i2c_init; mc->default_ram_size = 512 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); } static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data) @@ -1246,8 +1253,7 @@ static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data) amc->num_cs = 1; amc->i2c_init = ast2500_evb_i2c_init; mc->default_ram_size = 512 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc, void *data) @@ -1264,8 +1270,7 @@ static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->i2c_init = yosemitev2_bmc_i2c_init; mc->default_ram_size = 512 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data) @@ -1281,8 +1286,7 @@ static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->i2c_init = romulus_bmc_i2c_init; mc->default_ram_size = 512 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_tiogapass_class_init(ObjectClass *oc, void *data) @@ -1299,9 +1303,7 @@ static void aspeed_machine_tiogapass_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->i2c_init = tiogapass_bmc_i2c_init; mc->default_ram_size = 1 * GiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, void *data) @@ -1317,8 +1319,7 @@ static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->i2c_init = sonorapass_bmc_i2c_init; mc->default_ram_size = 512 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data) @@ -1334,8 +1335,7 @@ static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->i2c_init = witherspoon_bmc_i2c_init; mc->default_ram_size = 512 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data) @@ -1354,8 +1354,7 @@ static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data) ASPEED_MAC3_ON; amc->i2c_init = ast2600_evb_i2c_init; mc->default_ram_size = 1 * GiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_tacoma_class_init(ObjectClass *oc, void *data) @@ -1373,8 +1372,7 @@ static void aspeed_machine_tacoma_class_init(ObjectClass *oc, void *data) amc->macs_mask = ASPEED_MAC2_ON; amc->i2c_init = witherspoon_bmc_i2c_init; /* Same board layout */ mc->default_ram_size = 1 * GiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_g220a_class_init(ObjectClass *oc, void *data) @@ -1391,8 +1389,7 @@ static void aspeed_machine_g220a_class_init(ObjectClass *oc, void *data) amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = g220a_bmc_i2c_init; mc->default_ram_size = 1024 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, void *data) @@ -1409,8 +1406,7 @@ static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, void *data) amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = fp5280g2_bmc_i2c_init; mc->default_ram_size = 512 * MiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data) @@ -1428,8 +1424,7 @@ static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data) amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; amc->i2c_init = rainier_bmc_i2c_init; mc->default_ram_size = 1 * GiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; #define FUJI_BMC_RAM_SIZE ASPEED_RAM_SIZE(2 * GiB) @@ -1450,8 +1445,7 @@ static void aspeed_machine_fuji_class_init(ObjectClass *oc, void *data) amc->i2c_init = fuji_bmc_i2c_init; amc->uart_default = ASPEED_DEV_UART1; mc->default_ram_size = FUJI_BMC_RAM_SIZE; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; #define BLETCHLEY_BMC_RAM_SIZE ASPEED_RAM_SIZE(2 * GiB) @@ -1471,8 +1465,7 @@ static void aspeed_machine_bletchley_class_init(ObjectClass *oc, void *data) amc->macs_mask = ASPEED_MAC2_ON; amc->i2c_init = bletchley_bmc_i2c_init; mc->default_ram_size = BLETCHLEY_BMC_RAM_SIZE; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); } static void fby35_reset(MachineState *state, ShutdownCause reason) @@ -1514,6 +1507,7 @@ static void aspeed_machine_fby35_class_init(ObjectClass *oc, void *data) amc->i2c_init = fby35_i2c_init; /* FIXME: Replace this macro with something more general */ mc->default_ram_size = FUJI_BMC_RAM_SIZE; + aspeed_machine_class_init_cpus_defaults(mc); } #define AST1030_INTERNAL_FLASH_SIZE (1024 * 1024) @@ -1587,11 +1581,11 @@ static void aspeed_minibmc_machine_ast1030_evb_class_init(ObjectClass *oc, mc->init = aspeed_minibmc_machine_init; amc->i2c_init = ast1030_evb_i2c_init; mc->default_ram_size = 0; - mc->default_cpus = mc->min_cpus = mc->max_cpus = 1; amc->fmc_model = "sst25vf032b"; amc->spi_model = "sst25vf032b"; amc->num_cs = 2; amc->macs_mask = 0; + aspeed_machine_class_init_cpus_defaults(mc); } static void aspeed_machine_qcom_dc_scm_v1_class_init(ObjectClass *oc, @@ -1610,8 +1604,7 @@ static void aspeed_machine_qcom_dc_scm_v1_class_init(ObjectClass *oc, amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; amc->i2c_init = qcom_dc_scm_bmc_i2c_init; mc->default_ram_size = 1 * GiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_qcom_firework_class_init(ObjectClass *oc, @@ -1630,8 +1623,7 @@ static void aspeed_machine_qcom_firework_class_init(ObjectClass *oc, amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; amc->i2c_init = qcom_dc_scm_firework_i2c_init; mc->default_ram_size = 1 * GiB; - mc->default_cpus = mc->min_cpus = mc->max_cpus = - aspeed_soc_num_cpus(amc->soc_name); + aspeed_machine_class_init_cpus_defaults(mc); }; static const TypeInfo aspeed_machine_types[] = { diff --git a/hw/arm/aspeed_ast10x0.c b/hw/arm/aspeed_ast10x0.c index 8becb146a8d..9f98ad8e87a 100644 --- a/hw/arm/aspeed_ast10x0.c +++ b/hw/arm/aspeed_ast10x0.c @@ -211,7 +211,7 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) /* AST1030 CPU Core */ armv7m = DEVICE(&a->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 256); - qdev_prop_set_string(armv7m, "cpu-type", sc->cpu_type); + qdev_prop_set_string(armv7m, "cpu-type", aspeed_soc_cpu_type(sc)); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); object_property_set_link(OBJECT(&a->armv7m), "memory", OBJECT(s->memory), &error_abort); @@ -417,13 +417,19 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) static void aspeed_soc_ast1030_class_init(ObjectClass *klass, void *data) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m4"), /* TODO cortex-m4f */ + NULL + }; DeviceClass *dc = DEVICE_CLASS(klass); AspeedSoCClass *sc = ASPEED_SOC_CLASS(dc); + /* Reason: The Aspeed SoC can only be instantiated from a board */ + dc->user_creatable = false; dc->realize = aspeed_soc_ast1030_realize; sc->name = "ast1030-a1"; - sc->cpu_type = ARM_CPU_TYPE_NAME("cortex-m4"); /* TODO cortex-m4f */ + sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST1030_A1_SILICON_REV; sc->sram_size = 0xc0000; sc->secsram_size = 0x40000; /* 256 * KiB */ @@ -432,6 +438,7 @@ static void aspeed_soc_ast1030_class_init(ObjectClass *klass, void *data) sc->wdts_num = 4; sc->macs_num = 1; sc->uarts_num = 13; + sc->uarts_base = ASPEED_DEV_UART1; sc->irqmap = aspeed_soc_ast1030_irqmap; sc->memmap = aspeed_soc_ast1030_memmap; sc->num_cpus = 1; diff --git a/hw/arm/aspeed_ast2400.c b/hw/arm/aspeed_ast2400.c index a4334c81b8f..d1258862075 100644 --- a/hw/arm/aspeed_ast2400.c +++ b/hw/arm/aspeed_ast2400.c @@ -21,11 +21,12 @@ #include "hw/i2c/aspeed_i2c.h" #include "net/net.h" #include "sysemu/sysemu.h" +#include "target/arm/cpu-qom.h" #define ASPEED_SOC_IOMEM_SIZE 0x00200000 static const hwaddr aspeed_soc_ast2400_memmap[] = { - [ASPEED_DEV_SPI_BOOT] = ASPEED_SOC_SPI_BOOT_ADDR, + [ASPEED_DEV_SPI_BOOT] = 0x00000000, [ASPEED_DEV_IOMEM] = 0x1E600000, [ASPEED_DEV_FMC] = 0x1E620000, [ASPEED_DEV_SPI1] = 0x1E630000, @@ -60,7 +61,7 @@ static const hwaddr aspeed_soc_ast2400_memmap[] = { }; static const hwaddr aspeed_soc_ast2500_memmap[] = { - [ASPEED_DEV_SPI_BOOT] = ASPEED_SOC_SPI_BOOT_ADDR, + [ASPEED_DEV_SPI_BOOT] = 0x00000000, [ASPEED_DEV_IOMEM] = 0x1E600000, [ASPEED_DEV_FMC] = 0x1E620000, [ASPEED_DEV_SPI1] = 0x1E630000, @@ -155,7 +156,8 @@ static void aspeed_ast2400_soc_init(Object *obj) } for (i = 0; i < sc->num_cpus; i++) { - object_initialize_child(obj, "cpu[*]", &a->cpu[i], sc->cpu_type); + object_initialize_child(obj, "cpu[*]", &a->cpu[i], + aspeed_soc_cpu_type(sc)); } snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname); @@ -247,7 +249,6 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) Aspeed2400SoCState *a = ASPEED2400_SOC(dev); AspeedSoCState *s = ASPEED_SOC(dev); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - Error *err = NULL; g_autofree char *sram_name = NULL; /* Default boot region (SPI memory or ROMs) */ @@ -276,9 +277,8 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) /* SRAM */ sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index); - memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, + errp)) { return; } memory_region_add_subregion(s->memory, @@ -503,6 +503,10 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("arm926"), + NULL + }; AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -511,7 +515,7 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) dc->user_creatable = false; sc->name = "ast2400-a1"; - sc->cpu_type = ARM_CPU_TYPE_NAME("arm926"); + sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST2400_A1_SILICON_REV; sc->sram_size = 0x8000; sc->spis_num = 1; @@ -519,6 +523,7 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) sc->wdts_num = 2; sc->macs_num = 2; sc->uarts_num = 5; + sc->uarts_base = ASPEED_DEV_UART1; sc->irqmap = aspeed_soc_ast2400_irqmap; sc->memmap = aspeed_soc_ast2400_memmap; sc->num_cpus = 1; @@ -527,6 +532,10 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("arm1176"), + NULL + }; AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -535,7 +544,7 @@ static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data) dc->user_creatable = false; sc->name = "ast2500-a1"; - sc->cpu_type = ARM_CPU_TYPE_NAME("arm1176"); + sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST2500_A1_SILICON_REV; sc->sram_size = 0x9000; sc->spis_num = 2; @@ -543,6 +552,7 @@ static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data) sc->wdts_num = 3; sc->macs_num = 2; sc->uarts_num = 5; + sc->uarts_base = ASPEED_DEV_UART1; sc->irqmap = aspeed_soc_ast2500_irqmap; sc->memmap = aspeed_soc_ast2500_memmap; sc->num_cpus = 1; diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index b965fbab5ee..31713de74a5 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -16,12 +16,13 @@ #include "hw/i2c/aspeed_i2c.h" #include "net/net.h" #include "sysemu/sysemu.h" +#include "target/arm/cpu-qom.h" #define ASPEED_SOC_IOMEM_SIZE 0x00200000 #define ASPEED_SOC_DPMCU_SIZE 0x00040000 static const hwaddr aspeed_soc_ast2600_memmap[] = { - [ASPEED_DEV_SPI_BOOT] = ASPEED_SOC_SPI_BOOT_ADDR, + [ASPEED_DEV_SPI_BOOT] = 0x00000000, [ASPEED_DEV_SRAM] = 0x10000000, [ASPEED_DEV_DPMCU] = 0x18000000, /* 0x16000000 0x17FFFFFF : AHB BUS do LPC Bus bridge */ @@ -75,6 +76,8 @@ static const hwaddr aspeed_soc_ast2600_memmap[] = { [ASPEED_DEV_UART12] = 0x1E790600, [ASPEED_DEV_UART13] = 0x1E790700, [ASPEED_DEV_VUART] = 0x1E787000, + [ASPEED_DEV_FSI1] = 0x1E79B000, + [ASPEED_DEV_FSI2] = 0x1E79B100, [ASPEED_DEV_I3C] = 0x1E7A0000, [ASPEED_DEV_SDRAM] = 0x80000000, }; @@ -132,6 +135,8 @@ static const int aspeed_soc_ast2600_irqmap[] = { [ASPEED_DEV_ETH4] = 33, [ASPEED_DEV_KCS] = 138, /* 138 -> 142 */ [ASPEED_DEV_DP] = 62, + [ASPEED_DEV_FSI1] = 100, + [ASPEED_DEV_FSI2] = 101, [ASPEED_DEV_I3C] = 102, /* 102 -> 107 */ }; @@ -157,7 +162,8 @@ static void aspeed_soc_ast2600_init(Object *obj) } for (i = 0; i < sc->num_cpus; i++) { - object_initialize_child(obj, "cpu[*]", &a->cpu[i], sc->cpu_type); + object_initialize_child(obj, "cpu[*]", &a->cpu[i], + aspeed_soc_cpu_type(sc)); } snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname); @@ -264,6 +270,10 @@ static void aspeed_soc_ast2600_init(Object *obj) object_initialize_child(obj, "emmc-boot-controller", &s->emmc_boot_controller, TYPE_UNIMPLEMENTED_DEVICE); + + for (i = 0; i < ASPEED_FSI_NUM; i++) { + object_initialize_child(obj, "fsi[*]", &s->fsi[i], TYPE_ASPEED_APB2OPB); + } } /* @@ -282,7 +292,6 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) Aspeed2600SoCState *a = ASPEED2600_SOC(dev); AspeedSoCState *s = ASPEED_SOC(dev); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - Error *err = NULL; qemu_irq irq; g_autofree char *sram_name = NULL; @@ -355,9 +364,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) /* SRAM */ sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index); - memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, + errp)) { return; } memory_region_add_subregion(s->memory, @@ -625,17 +633,34 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) return; } aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sbc), 0, sc->memmap[ASPEED_DEV_SBC]); + + /* FSI */ + for (i = 0; i < ASPEED_FSI_NUM; i++) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->fsi[i]), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fsi[i]), 0, + sc->memmap[ASPEED_DEV_FSI1 + i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->fsi[i]), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_FSI1 + i)); + } } static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a7"), + NULL + }; DeviceClass *dc = DEVICE_CLASS(oc); AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); dc->realize = aspeed_soc_ast2600_realize; + /* Reason: The Aspeed SoC can only be instantiated from a board */ + dc->user_creatable = false; sc->name = "ast2600-a3"; - sc->cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); + sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST2600_A3_SILICON_REV; sc->sram_size = 0x16400; sc->spis_num = 2; @@ -643,6 +668,7 @@ static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data) sc->wdts_num = 4; sc->macs_num = 4; sc->uarts_num = 13; + sc->uarts_base = ASPEED_DEV_UART1; sc->irqmap = aspeed_soc_ast2600_irqmap; sc->memmap = aspeed_soc_ast2600_memmap; sc->num_cpus = 2; diff --git a/hw/arm/aspeed_eeprom.c b/hw/arm/aspeed_eeprom.c index ace5266cec9..daa3d329d10 100644 --- a/hw/arm/aspeed_eeprom.c +++ b/hw/arm/aspeed_eeprom.c @@ -4,6 +4,7 @@ * SPDX-License-Identifier: GPL-2.0-only */ +#include "qemu/osdep.h" #include "aspeed_eeprom.h" /* Tiogapass BMC FRU */ diff --git a/hw/arm/aspeed_eeprom.h b/hw/arm/aspeed_eeprom.h index bbf9e54365b..f08c16ef506 100644 --- a/hw/arm/aspeed_eeprom.h +++ b/hw/arm/aspeed_eeprom.h @@ -7,7 +7,6 @@ #ifndef ASPEED_EEPROM_H #define ASPEED_EEPROM_H -#include "qemu/osdep.h" extern const uint8_t tiogapass_bmc_fruid[]; extern const size_t tiogapass_bmc_fruid_len; diff --git a/hw/arm/aspeed_soc_common.c b/hw/arm/aspeed_soc_common.c index 828f61093bf..1e8f2558fdc 100644 --- a/hw/arm/aspeed_soc_common.c +++ b/hw/arm/aspeed_soc_common.c @@ -18,6 +18,14 @@ #include "hw/char/serial.h" +const char *aspeed_soc_cpu_type(AspeedSoCClass *sc) +{ + assert(sc->valid_cpu_types); + assert(sc->valid_cpu_types[0]); + assert(!sc->valid_cpu_types[1]); + return sc->valid_cpu_types[0]; +} + qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev) { return ASPEED_SOC_GET_CLASS(s)->get_irq(s, dev); @@ -28,7 +36,7 @@ bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp) AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); SerialMM *smm; - for (int i = 0, uart = ASPEED_DEV_UART1; i < sc->uarts_num; i++, uart++) { + for (int i = 0, uart = sc->uarts_base; i < sc->uarts_num; i++, uart++) { smm = &s->uart[i]; /* Chardev property is set by the machine. */ @@ -50,7 +58,9 @@ bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp) void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr) { AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - int i = dev - ASPEED_DEV_UART1; + int uart_first = aspeed_uart_first(sc); + int uart_index = aspeed_uart_index(dev); + int i = uart_index - uart_first; g_assert(0 <= i && i < ARRAY_SIZE(s->uart) && i < sc->uarts_num); qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr); diff --git a/hw/arm/b-l475e-iot01a.c b/hw/arm/b-l475e-iot01a.c new file mode 100644 index 00000000000..d862aa43fc3 --- /dev/null +++ b/hw/arm/b-l475e-iot01a.c @@ -0,0 +1,64 @@ +/* + * B-L475E-IOT01A Discovery Kit machine + * (B-L475E-IOT01A IoT Node) + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This work is heavily inspired by the netduinoplus2 by Alistair Francis. + * Original code is licensed under the MIT License: + * + * Copyright (c) 2014 Alistair Francis + */ + +/* + * The reference used is the STMicroElectronics UM2153 User manual + * Discovery kit for IoT node, multi-channel communication with STM32L4. + * https://www.st.com/en/evaluation-tools/b-l475e-iot01a.html#documentation + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "qemu/error-report.h" +#include "hw/arm/stm32l4x5_soc.h" +#include "hw/arm/boot.h" + +/* B-L475E-IOT01A implementation is derived from netduinoplus2 */ + +static void b_l475e_iot01a_init(MachineState *machine) +{ + const Stm32l4x5SocClass *sc; + DeviceState *dev; + + dev = qdev_new(TYPE_STM32L4X5XG_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sc = STM32L4X5_SOC_GET_CLASS(dev); + armv7m_load_kernel(ARM_CPU(first_cpu), + machine->kernel_filename, + 0, sc->flash_size); +} + +static void b_l475e_iot01a_machine_init(MachineClass *mc) +{ + static const char *machine_valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m4"), + NULL + }; + mc->desc = "B-L475E-IOT01A Discovery Kit (Cortex-M4)"; + mc->init = b_l475e_iot01a_init; + mc->valid_cpu_types = machine_valid_cpu_types; + + /* SRAM pre-allocated as part of the SoC instantiation */ + mc->default_ram_size = 0; +} + +DEFINE_MACHINE("b-l475e-iot01a", b_l475e_iot01a_machine_init) diff --git a/hw/arm/bananapi_m2u.c b/hw/arm/bananapi_m2u.c index 8f24b18d8ca..0a4b6f29b1c 100644 --- a/hw/arm/bananapi_m2u.c +++ b/hw/arm/bananapi_m2u.c @@ -71,12 +71,6 @@ static void bpim2u_init(MachineState *machine) exit(1); } - /* Only allow Cortex-A7 for this board */ - if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a7")) != 0) { - error_report("This board can only be used with cortex-a7 CPU"); - exit(1); - } - r40 = AW_R40(object_new(TYPE_AW_R40)); object_property_add_child(OBJECT(machine), "soc", OBJECT(r40)); object_unref(OBJECT(r40)); @@ -133,12 +127,18 @@ static void bpim2u_init(MachineState *machine) static void bpim2u_machine_init(MachineClass *mc) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a7"), + NULL + }; + mc->desc = "Bananapi M2U (Cortex-A7)"; mc->init = bpim2u_init; mc->min_cpus = AW_R40_NUM_CPUS; mc->max_cpus = AW_R40_NUM_CPUS; mc->default_cpus = AW_R40_NUM_CPUS; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); + mc->valid_cpu_types = valid_cpu_types; mc->default_ram_size = 1 * GiB; mc->default_ram_id = "bpim2u.ram"; } diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c index 0233038b957..1695d8b453a 100644 --- a/hw/arm/bcm2835_peripherals.c +++ b/hw/arm/bcm2835_peripherals.c @@ -30,9 +30,12 @@ #define SEPARATE_DMA_IRQ_MAX 10 #define ORGATED_DMA_IRQ_COUNT 4 -static void create_unimp(BCM2835PeripheralState *ps, - UnimplementedDeviceState *uds, - const char *name, hwaddr ofs, hwaddr size) +/* All three I2C controllers share the same IRQ */ +#define ORGATED_I2C_IRQ_COUNT 3 + +void create_unimp(BCMSocPeripheralBaseState *ps, + UnimplementedDeviceState *uds, + const char *name, hwaddr ofs, hwaddr size) { object_initialize_child(OBJECT(ps), name, uds, TYPE_UNIMPLEMENTED_DEVICE); qdev_prop_set_string(DEVICE(uds), "name", name); @@ -45,9 +48,36 @@ static void create_unimp(BCM2835PeripheralState *ps, static void bcm2835_peripherals_init(Object *obj) { BCM2835PeripheralState *s = BCM2835_PERIPHERALS(obj); + BCMSocPeripheralBaseState *s_base = BCM_SOC_PERIPHERALS_BASE(obj); + + /* Random Number Generator */ + object_initialize_child(obj, "rng", &s->rng, TYPE_BCM2835_RNG); + + /* Thermal */ + object_initialize_child(obj, "thermal", &s->thermal, TYPE_BCM2835_THERMAL); + + /* GPIO */ + object_initialize_child(obj, "gpio", &s->gpio, TYPE_BCM2835_GPIO); + + object_property_add_const_link(OBJECT(&s->gpio), "sdbus-sdhci", + OBJECT(&s_base->sdhci.sdbus)); + object_property_add_const_link(OBJECT(&s->gpio), "sdbus-sdhost", + OBJECT(&s_base->sdhost.sdbus)); + + /* Gated DMA interrupts */ + object_initialize_child(obj, "orgated-dma-irq", + &s_base->orgated_dma_irq, TYPE_OR_IRQ); + object_property_set_int(OBJECT(&s_base->orgated_dma_irq), "num-lines", + ORGATED_DMA_IRQ_COUNT, &error_abort); +} + +static void raspi_peripherals_base_init(Object *obj) +{ + BCMSocPeripheralBaseState *s = BCM_SOC_PERIPHERALS_BASE(obj); + BCMSocPeripheralBaseClass *bc = BCM_SOC_PERIPHERALS_BASE_GET_CLASS(obj); /* Memory region for peripheral devices, which we export to our parent */ - memory_region_init(&s->peri_mr, obj,"bcm2835-peripherals", 0x1000000); + memory_region_init(&s->peri_mr, obj, "bcm2835-peripherals", bc->peri_size); sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->peri_mr); /* Internal memory region for peripheral bus addresses (not exported) */ @@ -81,6 +111,7 @@ static void bcm2835_peripherals_init(Object *obj) /* Framebuffer */ object_initialize_child(obj, "fb", &s->fb, TYPE_BCM2835_FB); object_property_add_alias(obj, "vcram-size", OBJECT(&s->fb), "vcram-size"); + object_property_add_alias(obj, "vcram-base", OBJECT(&s->fb), "vcram-base"); object_property_add_const_link(OBJECT(&s->fb), "dma-mr", OBJECT(&s->gpu_bus_mr)); @@ -98,9 +129,6 @@ static void bcm2835_peripherals_init(Object *obj) object_property_add_const_link(OBJECT(&s->property), "dma-mr", OBJECT(&s->gpu_bus_mr)); - /* Random Number Generator */ - object_initialize_child(obj, "rng", &s->rng, TYPE_BCM2835_RNG); - /* Extended Mass Media Controller */ object_initialize_child(obj, "sdhci", &s->sdhci, TYPE_SYSBUS_SDHCI); @@ -110,25 +138,9 @@ static void bcm2835_peripherals_init(Object *obj) /* DMA Channels */ object_initialize_child(obj, "dma", &s->dma, TYPE_BCM2835_DMA); - object_initialize_child(obj, "orgated-dma-irq", - &s->orgated_dma_irq, TYPE_OR_IRQ); - object_property_set_int(OBJECT(&s->orgated_dma_irq), "num-lines", - ORGATED_DMA_IRQ_COUNT, &error_abort); - object_property_add_const_link(OBJECT(&s->dma), "dma-mr", OBJECT(&s->gpu_bus_mr)); - /* Thermal */ - object_initialize_child(obj, "thermal", &s->thermal, TYPE_BCM2835_THERMAL); - - /* GPIO */ - object_initialize_child(obj, "gpio", &s->gpio, TYPE_BCM2835_GPIO); - - object_property_add_const_link(OBJECT(&s->gpio), "sdbus-sdhci", - OBJECT(&s->sdhci.sdbus)); - object_property_add_const_link(OBJECT(&s->gpio), "sdbus-sdhost", - OBJECT(&s->sdhost.sdbus)); - /* Mphi */ object_initialize_child(obj, "mphi", &s->mphi, TYPE_BCM2835_MPHI); @@ -144,15 +156,97 @@ static void bcm2835_peripherals_init(Object *obj) /* Power Management */ object_initialize_child(obj, "powermgt", &s->powermgt, TYPE_BCM2835_POWERMGT); + + /* SPI */ + object_initialize_child(obj, "bcm2835-spi0", &s->spi[0], + TYPE_BCM2835_SPI); + + /* I2C */ + object_initialize_child(obj, "bcm2835-i2c0", &s->i2c[0], + TYPE_BCM2835_I2C); + object_initialize_child(obj, "bcm2835-i2c1", &s->i2c[1], + TYPE_BCM2835_I2C); + object_initialize_child(obj, "bcm2835-i2c2", &s->i2c[2], + TYPE_BCM2835_I2C); + + object_initialize_child(obj, "orgated-i2c-irq", + &s->orgated_i2c_irq, TYPE_OR_IRQ); + object_property_set_int(OBJECT(&s->orgated_i2c_irq), "num-lines", + ORGATED_I2C_IRQ_COUNT, &error_abort); } static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) { + MemoryRegion *mphi_mr; BCM2835PeripheralState *s = BCM2835_PERIPHERALS(dev); + BCMSocPeripheralBaseState *s_base = BCM_SOC_PERIPHERALS_BASE(dev); + int n; + + bcm_soc_peripherals_common_realize(dev, errp); + + /* Extended Mass Media Controller */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->sdhci), 0, + qdev_get_gpio_in_named(DEVICE(&s_base->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_ARASANSDIO)); + + /* Connect DMA 0-12 to the interrupt controller */ + for (n = 0; n <= SEPARATE_DMA_IRQ_MAX; n++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->dma), n, + qdev_get_gpio_in_named(DEVICE(&s_base->ic), + BCM2835_IC_GPU_IRQ, + INTERRUPT_DMA0 + n)); + } + + if (!qdev_realize(DEVICE(&s_base->orgated_dma_irq), NULL, errp)) { + return; + } + for (n = 0; n < ORGATED_DMA_IRQ_COUNT; n++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->dma), + SEPARATE_DMA_IRQ_MAX + 1 + n, + qdev_get_gpio_in(DEVICE(&s_base->orgated_dma_irq), n)); + } + qdev_connect_gpio_out(DEVICE(&s_base->orgated_dma_irq), 0, + qdev_get_gpio_in_named(DEVICE(&s_base->ic), + BCM2835_IC_GPU_IRQ, + INTERRUPT_DMA0 + SEPARATE_DMA_IRQ_MAX + 1)); + + /* Random Number Generator */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rng), errp)) { + return; + } + memory_region_add_subregion( + &s_base->peri_mr, RNG_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->rng), 0)); + + /* THERMAL */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->thermal), errp)) { + return; + } + memory_region_add_subregion(&s_base->peri_mr, THERMAL_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->thermal), 0)); + + /* Map MPHI to the peripherals memory map */ + mphi_mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s_base->mphi), 0); + memory_region_add_subregion(&s_base->peri_mr, MPHI_OFFSET, mphi_mr); + + /* GPIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { + return; + } + memory_region_add_subregion( + &s_base->peri_mr, GPIO_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gpio), 0)); + + object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->gpio), "sd-bus"); +} + +void bcm_soc_peripherals_common_realize(DeviceState *dev, Error **errp) +{ + BCMSocPeripheralBaseState *s = BCM_SOC_PERIPHERALS_BASE(dev); Object *obj; MemoryRegion *ram; Error *err = NULL; - uint64_t ram_size, vcram_size; + uint64_t ram_size, vcram_size, vcram_base; int n; obj = object_property_get_link(OBJECT(dev), "ram", &error_abort); @@ -256,11 +350,21 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) return; } - if (!object_property_set_uint(OBJECT(&s->fb), "vcram-base", - ram_size - vcram_size, errp)) { + vcram_base = object_property_get_uint(OBJECT(s), "vcram-base", &err); + if (err) { + error_propagate(errp, err); return; } + if (vcram_base == 0) { + vcram_base = ram_size - vcram_size; + } + vcram_base = MIN(vcram_base, UPPER_RAM_BASE - vcram_size); + + if (!object_property_set_uint(OBJECT(&s->fb), "vcram-base", vcram_base, + errp)) { + return; + } if (!sysbus_realize(SYS_BUS_DEVICE(&s->fb), errp)) { return; } @@ -281,14 +385,6 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->property), 0, qdev_get_gpio_in(DEVICE(&s->mboxes), MBOX_CHAN_PROPERTY)); - /* Random Number Generator */ - if (!sysbus_realize(SYS_BUS_DEVICE(&s->rng), errp)) { - return; - } - - memory_region_add_subregion(&s->peri_mr, RNG_OFFSET, - sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->rng), 0)); - /* Extended Mass Media Controller * * Compatible with: @@ -311,9 +407,6 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(&s->peri_mr, EMMC1_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->sdhci), 0)); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, - qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, - INTERRUPT_ARASANSDIO)); /* SDHOST */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhost), errp)) { @@ -336,49 +429,11 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(&s->peri_mr, DMA15_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 1)); - for (n = 0; n <= SEPARATE_DMA_IRQ_MAX; n++) { - sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), n, - qdev_get_gpio_in_named(DEVICE(&s->ic), - BCM2835_IC_GPU_IRQ, - INTERRUPT_DMA0 + n)); - } - if (!qdev_realize(DEVICE(&s->orgated_dma_irq), NULL, errp)) { - return; - } - for (n = 0; n < ORGATED_DMA_IRQ_COUNT; n++) { - sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), - SEPARATE_DMA_IRQ_MAX + 1 + n, - qdev_get_gpio_in(DEVICE(&s->orgated_dma_irq), n)); - } - qdev_connect_gpio_out(DEVICE(&s->orgated_dma_irq), 0, - qdev_get_gpio_in_named(DEVICE(&s->ic), - BCM2835_IC_GPU_IRQ, - INTERRUPT_DMA0 + SEPARATE_DMA_IRQ_MAX + 1)); - - /* THERMAL */ - if (!sysbus_realize(SYS_BUS_DEVICE(&s->thermal), errp)) { - return; - } - memory_region_add_subregion(&s->peri_mr, THERMAL_OFFSET, - sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->thermal), 0)); - - /* GPIO */ - if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { - return; - } - - memory_region_add_subregion(&s->peri_mr, GPIO_OFFSET, - sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gpio), 0)); - - object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->gpio), "sd-bus"); - /* Mphi */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->mphi), errp)) { return; } - memory_region_add_subregion(&s->peri_mr, MPHI_OFFSET, - sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mphi), 0)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->mphi), 0, qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, INTERRUPT_HOSTPORT)); @@ -402,15 +457,49 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(&s->peri_mr, PM_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->powermgt), 0)); + /* SPI */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[0]), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, SPI0_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->spi[0]), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[0]), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), + BCM2835_IC_GPU_IRQ, + INTERRUPT_SPI)); + + /* I2C */ + for (n = 0; n < 3; n++) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c[n]), errp)) { + return; + } + } + + memory_region_add_subregion(&s->peri_mr, BSC0_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->i2c[0]), 0)); + memory_region_add_subregion(&s->peri_mr, BSC1_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->i2c[1]), 0)); + memory_region_add_subregion(&s->peri_mr, BSC2_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->i2c[2]), 0)); + + if (!qdev_realize(DEVICE(&s->orgated_i2c_irq), NULL, errp)) { + return; + } + for (n = 0; n < ORGATED_I2C_IRQ_COUNT; n++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[n]), 0, + qdev_get_gpio_in(DEVICE(&s->orgated_i2c_irq), n)); + } + qdev_connect_gpio_out(DEVICE(&s->orgated_i2c_irq), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), + BCM2835_IC_GPU_IRQ, + INTERRUPT_I2C)); + create_unimp(s, &s->txp, "bcm2835-txp", TXP_OFFSET, 0x1000); create_unimp(s, &s->armtmr, "bcm2835-sp804", ARMCTRL_TIMER0_1_OFFSET, 0x40); create_unimp(s, &s->i2s, "bcm2835-i2s", I2S_OFFSET, 0x100); create_unimp(s, &s->smi, "bcm2835-smi", SMI_OFFSET, 0x100); - create_unimp(s, &s->spi[0], "bcm2835-spi0", SPI0_OFFSET, 0x20); create_unimp(s, &s->bscsl, "bcm2835-spis", BSC_SL_OFFSET, 0x100); - create_unimp(s, &s->i2c[0], "bcm2835-i2c0", BSC0_OFFSET, 0x20); - create_unimp(s, &s->i2c[1], "bcm2835-i2c1", BSC1_OFFSET, 0x20); - create_unimp(s, &s->i2c[2], "bcm2835-i2c2", BSC2_OFFSET, 0x20); create_unimp(s, &s->otp, "bcm2835-otp", OTP_OFFSET, 0x80); create_unimp(s, &s->dbus, "bcm2835-dbus", DBUS_OFFSET, 0x8000); create_unimp(s, &s->ave0, "bcm2835-ave0", AVE0_OFFSET, 0x8000); @@ -421,21 +510,27 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) static void bcm2835_peripherals_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); + BCMSocPeripheralBaseClass *bc = BCM_SOC_PERIPHERALS_BASE_CLASS(oc); + bc->peri_size = 0x1000000; dc->realize = bcm2835_peripherals_realize; } -static const TypeInfo bcm2835_peripherals_type_info = { - .name = TYPE_BCM2835_PERIPHERALS, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(BCM2835PeripheralState), - .instance_init = bcm2835_peripherals_init, - .class_init = bcm2835_peripherals_class_init, +static const TypeInfo bcm2835_peripherals_types[] = { + { + .name = TYPE_BCM2835_PERIPHERALS, + .parent = TYPE_BCM_SOC_PERIPHERALS_BASE, + .instance_size = sizeof(BCM2835PeripheralState), + .instance_init = bcm2835_peripherals_init, + .class_init = bcm2835_peripherals_class_init, + }, { + .name = TYPE_BCM_SOC_PERIPHERALS_BASE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCMSocPeripheralBaseState), + .instance_init = raspi_peripherals_base_init, + .class_size = sizeof(BCMSocPeripheralBaseClass), + .abstract = true, + } }; -static void bcm2835_peripherals_register_types(void) -{ - type_register_static(&bcm2835_peripherals_type_info); -} - -type_init(bcm2835_peripherals_register_types) +DEFINE_TYPES(bcm2835_peripherals_types) diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c index 166dc896c09..db191661f29 100644 --- a/hw/arm/bcm2836.c +++ b/hw/arm/bcm2836.c @@ -15,6 +15,8 @@ #include "hw/arm/bcm2836.h" #include "hw/arm/raspi_platform.h" #include "hw/sysbus.h" +#include "target/arm/cpu-qom.h" +#include "target/arm/gtimer.h" struct BCM283XClass { /*< private >*/ @@ -29,12 +31,12 @@ struct BCM283XClass { }; static Property bcm2836_enabled_cores_property = - DEFINE_PROP_UINT32("enabled-cpus", BCM283XState, enabled_cpus, 0); + DEFINE_PROP_UINT32("enabled-cpus", BCM283XBaseState, enabled_cpus, 0); -static void bcm2836_init(Object *obj) +static void bcm283x_base_init(Object *obj) { - BCM283XState *s = BCM283X(obj); - BCM283XClass *bc = BCM283X_GET_CLASS(obj); + BCM283XBaseState *s = BCM283X_BASE(obj); + BCM283XBaseClass *bc = BCM283X_BASE_GET_CLASS(obj); int n; for (n = 0; n < bc->core_count; n++) { @@ -50,6 +52,11 @@ static void bcm2836_init(Object *obj) object_initialize_child(obj, "control", &s->control, TYPE_BCM2836_CONTROL); } +} + +static void bcm283x_init(Object *obj) +{ + BCM283XState *s = BCM283X(obj); object_initialize_child(obj, "peripherals", &s->peripherals, TYPE_BCM2835_PERIPHERALS); @@ -59,114 +66,116 @@ static void bcm2836_init(Object *obj) "command-line"); object_property_add_alias(obj, "vcram-size", OBJECT(&s->peripherals), "vcram-size"); + object_property_add_alias(obj, "vcram-base", OBJECT(&s->peripherals), + "vcram-base"); } -static bool bcm283x_common_realize(DeviceState *dev, Error **errp) +bool bcm283x_common_realize(DeviceState *dev, BCMSocPeripheralBaseState *ps, + Error **errp) { - BCM283XState *s = BCM283X(dev); - BCM283XClass *bc = BCM283X_GET_CLASS(dev); + BCM283XBaseState *s = BCM283X_BASE(dev); + BCM283XBaseClass *bc = BCM283X_BASE_GET_CLASS(dev); Object *obj; /* common peripherals from bcm2835 */ obj = object_property_get_link(OBJECT(dev), "ram", &error_abort); - object_property_add_const_link(OBJECT(&s->peripherals), "ram", obj); + object_property_add_const_link(OBJECT(ps), "ram", obj); - if (!sysbus_realize(SYS_BUS_DEVICE(&s->peripherals), errp)) { + if (!sysbus_realize(SYS_BUS_DEVICE(ps), errp)) { return false; } - object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->peripherals), - "sd-bus"); + object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(ps), "sd-bus"); - sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->peripherals), 0, - bc->peri_base, 1); + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(ps), 0, bc->peri_base, 1); return true; } static void bcm2835_realize(DeviceState *dev, Error **errp) { BCM283XState *s = BCM283X(dev); + BCM283XBaseState *s_base = BCM283X_BASE(dev); + BCMSocPeripheralBaseState *ps_base + = BCM_SOC_PERIPHERALS_BASE(&s->peripherals); - if (!bcm283x_common_realize(dev, errp)) { + if (!bcm283x_common_realize(dev, ps_base, errp)) { return; } - if (!qdev_realize(DEVICE(&s->cpu[0].core), NULL, errp)) { + if (!qdev_realize(DEVICE(&s_base->cpu[0].core), NULL, errp)) { return; } /* Connect irq/fiq outputs from the interrupt controller. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 0, - qdev_get_gpio_in(DEVICE(&s->cpu[0].core), ARM_CPU_IRQ)); + qdev_get_gpio_in(DEVICE(&s_base->cpu[0].core), ARM_CPU_IRQ)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 1, - qdev_get_gpio_in(DEVICE(&s->cpu[0].core), ARM_CPU_FIQ)); + qdev_get_gpio_in(DEVICE(&s_base->cpu[0].core), ARM_CPU_FIQ)); } static void bcm2836_realize(DeviceState *dev, Error **errp) { - BCM283XState *s = BCM283X(dev); - BCM283XClass *bc = BCM283X_GET_CLASS(dev); int n; + BCM283XState *s = BCM283X(dev); + BCM283XBaseState *s_base = BCM283X_BASE(dev); + BCM283XBaseClass *bc = BCM283X_BASE_GET_CLASS(dev); + BCMSocPeripheralBaseState *ps_base + = BCM_SOC_PERIPHERALS_BASE(&s->peripherals); - if (!bcm283x_common_realize(dev, errp)) { + if (!bcm283x_common_realize(dev, ps_base, errp)) { return; } /* bcm2836 interrupt controller (and mailboxes, etc.) */ - if (!sysbus_realize(SYS_BUS_DEVICE(&s->control), errp)) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s_base->control), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->control), 0, bc->ctrl_base); + sysbus_mmio_map(SYS_BUS_DEVICE(&s_base->control), 0, bc->ctrl_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 0, - qdev_get_gpio_in_named(DEVICE(&s->control), "gpu-irq", 0)); + qdev_get_gpio_in_named(DEVICE(&s_base->control), "gpu-irq", 0)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 1, - qdev_get_gpio_in_named(DEVICE(&s->control), "gpu-fiq", 0)); + qdev_get_gpio_in_named(DEVICE(&s_base->control), "gpu-fiq", 0)); for (n = 0; n < BCM283X_NCPUS; n++) { - /* TODO: this should be converted to a property of ARM_CPU */ - s->cpu[n].core.mp_affinity = (bc->clusterid << 8) | n; + object_property_set_int(OBJECT(&s_base->cpu[n].core), "mp-affinity", + (bc->clusterid << 8) | n, &error_abort); /* set periphbase/CBAR value for CPU-local registers */ - if (!object_property_set_int(OBJECT(&s->cpu[n].core), "reset-cbar", - bc->peri_base, errp)) { - return; - } + object_property_set_int(OBJECT(&s_base->cpu[n].core), "reset-cbar", + bc->peri_base, &error_abort); /* start powered off if not enabled */ - if (!object_property_set_bool(OBJECT(&s->cpu[n].core), - "start-powered-off", - n >= s->enabled_cpus, - errp)) { - return; - } + object_property_set_bool(OBJECT(&s_base->cpu[n].core), + "start-powered-off", + n >= s_base->enabled_cpus, &error_abort); - if (!qdev_realize(DEVICE(&s->cpu[n].core), NULL, errp)) { + if (!qdev_realize(DEVICE(&s_base->cpu[n].core), NULL, errp)) { return; } /* Connect irq/fiq outputs from the interrupt controller. */ - qdev_connect_gpio_out_named(DEVICE(&s->control), "irq", n, - qdev_get_gpio_in(DEVICE(&s->cpu[n].core), ARM_CPU_IRQ)); - qdev_connect_gpio_out_named(DEVICE(&s->control), "fiq", n, - qdev_get_gpio_in(DEVICE(&s->cpu[n].core), ARM_CPU_FIQ)); + qdev_connect_gpio_out_named(DEVICE(&s_base->control), "irq", n, + qdev_get_gpio_in(DEVICE(&s_base->cpu[n].core), ARM_CPU_IRQ)); + qdev_connect_gpio_out_named(DEVICE(&s_base->control), "fiq", n, + qdev_get_gpio_in(DEVICE(&s_base->cpu[n].core), ARM_CPU_FIQ)); /* Connect timers from the CPU to the interrupt controller */ - qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_PHYS, - qdev_get_gpio_in_named(DEVICE(&s->control), "cntpnsirq", n)); - qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_VIRT, - qdev_get_gpio_in_named(DEVICE(&s->control), "cntvirq", n)); - qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_HYP, - qdev_get_gpio_in_named(DEVICE(&s->control), "cnthpirq", n)); - qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_SEC, - qdev_get_gpio_in_named(DEVICE(&s->control), "cntpsirq", n)); + qdev_connect_gpio_out(DEVICE(&s_base->cpu[n].core), GTIMER_PHYS, + qdev_get_gpio_in_named(DEVICE(&s_base->control), "cntpnsirq", n)); + qdev_connect_gpio_out(DEVICE(&s_base->cpu[n].core), GTIMER_VIRT, + qdev_get_gpio_in_named(DEVICE(&s_base->control), "cntvirq", n)); + qdev_connect_gpio_out(DEVICE(&s_base->cpu[n].core), GTIMER_HYP, + qdev_get_gpio_in_named(DEVICE(&s_base->control), "cnthpirq", n)); + qdev_connect_gpio_out(DEVICE(&s_base->cpu[n].core), GTIMER_SEC, + qdev_get_gpio_in_named(DEVICE(&s_base->control), "cntpsirq", n)); } } -static void bcm283x_class_init(ObjectClass *oc, void *data) +static void bcm283x_base_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -177,7 +186,7 @@ static void bcm283x_class_init(ObjectClass *oc, void *data) static void bcm2835_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); - BCM283XClass *bc = BCM283X_CLASS(oc); + BCM283XBaseClass *bc = BCM283X_BASE_CLASS(oc); bc->cpu_type = ARM_CPU_TYPE_NAME("arm1176"); bc->core_count = 1; @@ -188,7 +197,7 @@ static void bcm2835_class_init(ObjectClass *oc, void *data) static void bcm2836_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); - BCM283XClass *bc = BCM283X_CLASS(oc); + BCM283XBaseClass *bc = BCM283X_BASE_CLASS(oc); bc->cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); bc->core_count = BCM283X_NCPUS; @@ -202,7 +211,7 @@ static void bcm2836_class_init(ObjectClass *oc, void *data) static void bcm2837_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); - BCM283XClass *bc = BCM283X_CLASS(oc); + BCM283XBaseClass *bc = BCM283X_BASE_CLASS(oc); bc->cpu_type = ARM_CPU_TYPE_NAME("cortex-a53"); bc->core_count = BCM283X_NCPUS; @@ -230,11 +239,17 @@ static const TypeInfo bcm283x_types[] = { #endif }, { .name = TYPE_BCM283X, - .parent = TYPE_DEVICE, + .parent = TYPE_BCM283X_BASE, .instance_size = sizeof(BCM283XState), - .instance_init = bcm2836_init, - .class_size = sizeof(BCM283XClass), - .class_init = bcm283x_class_init, + .instance_init = bcm283x_init, + .abstract = true, + }, { + .name = TYPE_BCM283X_BASE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(BCM283XBaseState), + .instance_init = bcm283x_base_init, + .class_size = sizeof(BCM283XBaseClass), + .class_init = bcm283x_base_class_init, .abstract = true, } }; diff --git a/hw/arm/bcm2838.c b/hw/arm/bcm2838.c new file mode 100644 index 00000000000..ddb7c5f757a --- /dev/null +++ b/hw/arm/bcm2838.c @@ -0,0 +1,263 @@ +/* + * BCM2838 SoC emulation + * + * Copyright (C) 2022 Ovchinnikov Vitalii + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/arm/raspi_platform.h" +#include "hw/sysbus.h" +#include "hw/arm/bcm2838.h" +#include "trace.h" + +#define GIC400_MAINTENANCE_IRQ 9 +#define GIC400_TIMER_NS_EL2_IRQ 10 +#define GIC400_TIMER_VIRT_IRQ 11 +#define GIC400_LEGACY_FIQ 12 +#define GIC400_TIMER_S_EL1_IRQ 13 +#define GIC400_TIMER_NS_EL1_IRQ 14 +#define GIC400_LEGACY_IRQ 15 + +/* Number of external interrupt lines to configure the GIC with */ +#define GIC_NUM_IRQS 192 + +#define PPI(cpu, irq) (GIC_NUM_IRQS + (cpu) * GIC_INTERNAL + GIC_NR_SGIS + irq) + +#define GIC_BASE_OFS 0x0000 +#define GIC_DIST_OFS 0x1000 +#define GIC_CPU_OFS 0x2000 +#define GIC_VIFACE_THIS_OFS 0x4000 +#define GIC_VIFACE_OTHER_OFS(cpu) (0x5000 + (cpu) * 0x200) +#define GIC_VCPU_OFS 0x6000 + +#define VIRTUAL_PMU_IRQ 7 + +static void bcm2838_gic_set_irq(void *opaque, int irq, int level) +{ + BCM2838State *s = (BCM2838State *)opaque; + + trace_bcm2838_gic_set_irq(irq, level); + qemu_set_irq(qdev_get_gpio_in(DEVICE(&s->gic), irq), level); +} + +static void bcm2838_init(Object *obj) +{ + BCM2838State *s = BCM2838(obj); + + object_initialize_child(obj, "peripherals", &s->peripherals, + TYPE_BCM2838_PERIPHERALS); + object_property_add_alias(obj, "board-rev", OBJECT(&s->peripherals), + "board-rev"); + object_property_add_alias(obj, "vcram-size", OBJECT(&s->peripherals), + "vcram-size"); + object_property_add_alias(obj, "vcram-base", OBJECT(&s->peripherals), + "vcram-base"); + object_property_add_alias(obj, "command-line", OBJECT(&s->peripherals), + "command-line"); + + object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GIC); +} + +static void bcm2838_realize(DeviceState *dev, Error **errp) +{ + BCM2838State *s = BCM2838(dev); + BCM283XBaseState *s_base = BCM283X_BASE(dev); + BCM283XBaseClass *bc_base = BCM283X_BASE_GET_CLASS(dev); + BCM2838PeripheralState *ps = BCM2838_PERIPHERALS(&s->peripherals); + BCMSocPeripheralBaseState *ps_base = + BCM_SOC_PERIPHERALS_BASE(&s->peripherals); + + DeviceState *gicdev = NULL; + + if (!bcm283x_common_realize(dev, ps_base, errp)) { + return; + } + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(ps), 1, BCM2838_PERI_LOW_BASE, 1); + + /* bcm2836 interrupt controller (and mailboxes, etc.) */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s_base->control), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s_base->control), 0, bc_base->ctrl_base); + + /* Create cores */ + for (int n = 0; n < bc_base->core_count; n++) { + + object_property_set_int(OBJECT(&s_base->cpu[n].core), "mp-affinity", + (bc_base->clusterid << 8) | n, &error_abort); + + /* set periphbase/CBAR value for CPU-local registers */ + object_property_set_int(OBJECT(&s_base->cpu[n].core), "reset-cbar", + bc_base->peri_base, &error_abort); + + /* start powered off if not enabled */ + object_property_set_bool(OBJECT(&s_base->cpu[n].core), + "start-powered-off", + n >= s_base->enabled_cpus, &error_abort); + + if (!qdev_realize(DEVICE(&s_base->cpu[n].core), NULL, errp)) { + return; + } + } + + if (!object_property_set_uint(OBJECT(&s->gic), "revision", 2, errp)) { + return; + } + + if (!object_property_set_uint(OBJECT(&s->gic), "num-cpu", BCM283X_NCPUS, + errp)) { + return; + } + + if (!object_property_set_uint(OBJECT(&s->gic), "num-irq", + GIC_NUM_IRQS + GIC_INTERNAL, errp)) { + return; + } + + if (!object_property_set_bool(OBJECT(&s->gic), + "has-virtualization-extensions", true, + errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 0, + bc_base->ctrl_base + BCM2838_GIC_BASE + GIC_DIST_OFS); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 1, + bc_base->ctrl_base + BCM2838_GIC_BASE + GIC_CPU_OFS); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 2, + bc_base->ctrl_base + BCM2838_GIC_BASE + GIC_VIFACE_THIS_OFS); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 3, + bc_base->ctrl_base + BCM2838_GIC_BASE + GIC_VCPU_OFS); + + for (int n = 0; n < BCM283X_NCPUS; n++) { + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 4 + n, + bc_base->ctrl_base + BCM2838_GIC_BASE + + GIC_VIFACE_OTHER_OFS(n)); + } + + gicdev = DEVICE(&s->gic); + + for (int n = 0; n < BCM283X_NCPUS; n++) { + DeviceState *cpudev = DEVICE(&s_base->cpu[n]); + + /* Connect the GICv2 outputs to the CPU */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), n, + qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), n + BCM283X_NCPUS, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), n + 2 * BCM283X_NCPUS, + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), n + 3 * BCM283X_NCPUS, + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), n + 4 * BCM283X_NCPUS, + qdev_get_gpio_in(gicdev, + PPI(n, GIC400_MAINTENANCE_IRQ))); + + /* Connect timers from the CPU to the interrupt controller */ + qdev_connect_gpio_out(cpudev, GTIMER_PHYS, + qdev_get_gpio_in(gicdev, PPI(n, GIC400_TIMER_NS_EL1_IRQ))); + qdev_connect_gpio_out(cpudev, GTIMER_VIRT, + qdev_get_gpio_in(gicdev, PPI(n, GIC400_TIMER_VIRT_IRQ))); + qdev_connect_gpio_out(cpudev, GTIMER_HYP, + qdev_get_gpio_in(gicdev, PPI(n, GIC400_TIMER_NS_EL2_IRQ))); + qdev_connect_gpio_out(cpudev, GTIMER_SEC, + qdev_get_gpio_in(gicdev, PPI(n, GIC400_TIMER_S_EL1_IRQ))); + /* PMU interrupt */ + qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, + qdev_get_gpio_in(gicdev, PPI(n, VIRTUAL_PMU_IRQ))); + } + + /* Connect UART0 to the interrupt controller */ + sysbus_connect_irq(SYS_BUS_DEVICE(&ps_base->uart0), 0, + qdev_get_gpio_in(gicdev, GIC_SPI_INTERRUPT_UART0)); + + /* Connect AUX / UART1 to the interrupt controller */ + sysbus_connect_irq(SYS_BUS_DEVICE(&ps_base->aux), 0, + qdev_get_gpio_in(gicdev, GIC_SPI_INTERRUPT_AUX_UART1)); + + /* Connect VC mailbox to the interrupt controller */ + sysbus_connect_irq(SYS_BUS_DEVICE(&ps_base->mboxes), 0, + qdev_get_gpio_in(gicdev, GIC_SPI_INTERRUPT_MBOX)); + + /* Connect SD host to the interrupt controller */ + sysbus_connect_irq(SYS_BUS_DEVICE(&ps_base->sdhost), 0, + qdev_get_gpio_in(gicdev, GIC_SPI_INTERRUPT_SDHOST)); + + /* According to DTS, EMMC and EMMC2 share one irq */ + DeviceState *mmc_irq_orgate = DEVICE(&ps->mmc_irq_orgate); + + /* Connect EMMC and EMMC2 to the interrupt controller */ + qdev_connect_gpio_out(mmc_irq_orgate, 0, + qdev_get_gpio_in(gicdev, GIC_SPI_INTERRUPT_EMMC_EMMC2)); + + /* Connect USB OTG and MPHI to the interrupt controller */ + sysbus_connect_irq(SYS_BUS_DEVICE(&ps_base->mphi), 0, + qdev_get_gpio_in(gicdev, GIC_SPI_INTERRUPT_MPHI)); + sysbus_connect_irq(SYS_BUS_DEVICE(&ps_base->dwc2), 0, + qdev_get_gpio_in(gicdev, GIC_SPI_INTERRUPT_DWC2)); + + /* Connect DMA 0-6 to the interrupt controller */ + for (int n = GIC_SPI_INTERRUPT_DMA_0; n <= GIC_SPI_INTERRUPT_DMA_6; n++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&ps_base->dma), + n - GIC_SPI_INTERRUPT_DMA_0, + qdev_get_gpio_in(gicdev, n)); + } + + /* According to DTS, DMA 7 and 8 share one irq */ + DeviceState *dma_7_8_irq_orgate = DEVICE(&ps->dma_7_8_irq_orgate); + + /* Connect DMA 7-8 to the interrupt controller */ + qdev_connect_gpio_out(dma_7_8_irq_orgate, 0, + qdev_get_gpio_in(gicdev, GIC_SPI_INTERRUPT_DMA_7_8)); + + /* According to DTS, DMA 9 and 10 share one irq */ + DeviceState *dma_9_10_irq_orgate = DEVICE(&ps->dma_9_10_irq_orgate); + + /* Connect DMA 9-10 to the interrupt controller */ + qdev_connect_gpio_out(dma_9_10_irq_orgate, 0, + qdev_get_gpio_in(gicdev, GIC_SPI_INTERRUPT_DMA_9_10)); + + /* Pass through inbound GPIO lines to the GIC */ + qdev_init_gpio_in(dev, bcm2838_gic_set_irq, GIC_NUM_IRQS); + + /* Pass through outbound IRQ lines from the GIC */ + qdev_pass_gpios(DEVICE(&s->gic), DEVICE(&s->peripherals), NULL); +} + +static void bcm2838_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + BCM283XBaseClass *bc_base = BCM283X_BASE_CLASS(oc); + + bc_base->cpu_type = ARM_CPU_TYPE_NAME("cortex-a72"); + bc_base->core_count = BCM283X_NCPUS; + bc_base->peri_base = 0xfe000000; + bc_base->ctrl_base = 0xff800000; + bc_base->clusterid = 0x0; + dc->realize = bcm2838_realize; +} + +static const TypeInfo bcm2838_type = { + .name = TYPE_BCM2838, + .parent = TYPE_BCM283X_BASE, + .instance_size = sizeof(BCM2838State), + .instance_init = bcm2838_init, + .class_size = sizeof(BCM283XBaseClass), + .class_init = bcm2838_class_init, +}; + +static void bcm2838_register_types(void) +{ + type_register_static(&bcm2838_type); +} + +type_init(bcm2838_register_types); diff --git a/hw/arm/bcm2838_peripherals.c b/hw/arm/bcm2838_peripherals.c new file mode 100644 index 00000000000..e28bef4a37d --- /dev/null +++ b/hw/arm/bcm2838_peripherals.c @@ -0,0 +1,224 @@ +/* + * BCM2838 peripherals emulation + * + * Copyright (C) 2022 Ovchinnikov Vitalii + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/arm/raspi_platform.h" +#include "hw/arm/bcm2838_peripherals.h" + +#define CLOCK_ISP_OFFSET 0xc11000 +#define CLOCK_ISP_SIZE 0x100 + +/* Lower peripheral base address on the VC (GPU) system bus */ +#define BCM2838_VC_PERI_LOW_BASE 0x7c000000 + +/* Capabilities for SD controller: no DMA, high-speed, default clocks etc. */ +#define BCM2835_SDHC_CAPAREG 0x52134b4 + +static void bcm2838_peripherals_init(Object *obj) +{ + BCM2838PeripheralState *s = BCM2838_PERIPHERALS(obj); + BCM2838PeripheralClass *bc = BCM2838_PERIPHERALS_GET_CLASS(obj); + BCMSocPeripheralBaseState *s_base = BCM_SOC_PERIPHERALS_BASE(obj); + + /* Lower memory region for peripheral devices (exported to the Soc) */ + memory_region_init(&s->peri_low_mr, obj, "bcm2838-peripherals", + bc->peri_low_size); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->peri_low_mr); + + /* Extended Mass Media Controller 2 */ + object_initialize_child(obj, "emmc2", &s->emmc2, TYPE_SYSBUS_SDHCI); + + /* GPIO */ + object_initialize_child(obj, "gpio", &s->gpio, TYPE_BCM2838_GPIO); + + object_property_add_const_link(OBJECT(&s->gpio), "sdbus-sdhci", + OBJECT(&s_base->sdhci.sdbus)); + object_property_add_const_link(OBJECT(&s->gpio), "sdbus-sdhost", + OBJECT(&s_base->sdhost.sdbus)); + + object_initialize_child(obj, "mmc_irq_orgate", &s->mmc_irq_orgate, + TYPE_OR_IRQ); + object_property_set_int(OBJECT(&s->mmc_irq_orgate), "num-lines", 2, + &error_abort); + + object_initialize_child(obj, "dma_7_8_irq_orgate", &s->dma_7_8_irq_orgate, + TYPE_OR_IRQ); + object_property_set_int(OBJECT(&s->dma_7_8_irq_orgate), "num-lines", 2, + &error_abort); + + object_initialize_child(obj, "dma_9_10_irq_orgate", &s->dma_9_10_irq_orgate, + TYPE_OR_IRQ); + object_property_set_int(OBJECT(&s->dma_9_10_irq_orgate), "num-lines", 2, + &error_abort); +} + +static void bcm2838_peripherals_realize(DeviceState *dev, Error **errp) +{ + DeviceState *mmc_irq_orgate; + DeviceState *dma_7_8_irq_orgate; + DeviceState *dma_9_10_irq_orgate; + MemoryRegion *mphi_mr; + BCM2838PeripheralState *s = BCM2838_PERIPHERALS(dev); + BCMSocPeripheralBaseState *s_base = BCM_SOC_PERIPHERALS_BASE(dev); + int n; + + bcm_soc_peripherals_common_realize(dev, errp); + + /* Map lower peripherals into the GPU address space */ + memory_region_init_alias(&s->peri_low_mr_alias, OBJECT(s), + "bcm2838-peripherals", &s->peri_low_mr, 0, + memory_region_size(&s->peri_low_mr)); + memory_region_add_subregion_overlap(&s_base->gpu_bus_mr, + BCM2838_VC_PERI_LOW_BASE, + &s->peri_low_mr_alias, 1); + + /* Extended Mass Media Controller 2 */ + object_property_set_uint(OBJECT(&s->emmc2), "sd-spec-version", 3, + &error_abort); + object_property_set_uint(OBJECT(&s->emmc2), "capareg", + BCM2835_SDHC_CAPAREG, &error_abort); + object_property_set_bool(OBJECT(&s->emmc2), "pending-insert-quirk", true, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->emmc2), errp)) { + return; + } + + memory_region_add_subregion(&s_base->peri_mr, EMMC2_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->emmc2), + 0)); + + /* According to DTS, EMMC and EMMC2 share one irq */ + if (!qdev_realize(DEVICE(&s->mmc_irq_orgate), NULL, errp)) { + return; + } + + mmc_irq_orgate = DEVICE(&s->mmc_irq_orgate); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->emmc2), 0, + qdev_get_gpio_in(mmc_irq_orgate, 0)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->sdhci), 0, + qdev_get_gpio_in(mmc_irq_orgate, 1)); + + /* Connect EMMC and EMMC2 to the interrupt controller */ + qdev_connect_gpio_out(mmc_irq_orgate, 0, + qdev_get_gpio_in_named(DEVICE(&s_base->ic), + BCM2835_IC_GPU_IRQ, + INTERRUPT_ARASANSDIO)); + + /* Connect DMA 0-6 to the interrupt controller */ + for (n = 0; n < 7; n++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->dma), n, + qdev_get_gpio_in_named(DEVICE(&s_base->ic), + BCM2835_IC_GPU_IRQ, + GPU_INTERRUPT_DMA0 + n)); + } + + /* According to DTS, DMA 7 and 8 share one irq */ + if (!qdev_realize(DEVICE(&s->dma_7_8_irq_orgate), NULL, errp)) { + return; + } + dma_7_8_irq_orgate = DEVICE(&s->dma_7_8_irq_orgate); + + /* Connect DMA 7-8 to the interrupt controller */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->dma), 7, + qdev_get_gpio_in(dma_7_8_irq_orgate, 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->dma), 8, + qdev_get_gpio_in(dma_7_8_irq_orgate, 1)); + + qdev_connect_gpio_out(dma_7_8_irq_orgate, 0, + qdev_get_gpio_in_named(DEVICE(&s_base->ic), + BCM2835_IC_GPU_IRQ, + GPU_INTERRUPT_DMA7_8)); + + /* According to DTS, DMA 9 and 10 share one irq */ + if (!qdev_realize(DEVICE(&s->dma_9_10_irq_orgate), NULL, errp)) { + return; + } + dma_9_10_irq_orgate = DEVICE(&s->dma_9_10_irq_orgate); + + /* Connect DMA 9-10 to the interrupt controller */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->dma), 9, + qdev_get_gpio_in(dma_9_10_irq_orgate, 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->dma), 10, + qdev_get_gpio_in(dma_9_10_irq_orgate, 1)); + + qdev_connect_gpio_out(dma_9_10_irq_orgate, 0, + qdev_get_gpio_in_named(DEVICE(&s_base->ic), + BCM2835_IC_GPU_IRQ, + GPU_INTERRUPT_DMA9_10)); + + /* Connect DMA 11-14 to the interrupt controller */ + for (n = 11; n < 15; n++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->dma), n, + qdev_get_gpio_in_named(DEVICE(&s_base->ic), + BCM2835_IC_GPU_IRQ, + GPU_INTERRUPT_DMA11 + n + - 11)); + } + + /* + * Connect DMA 15 to the interrupt controller, it is physically removed + * from other DMA channels and exclusively used by the GPU + */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s_base->dma), 15, + qdev_get_gpio_in_named(DEVICE(&s_base->ic), + BCM2835_IC_GPU_IRQ, + GPU_INTERRUPT_DMA15)); + + /* Map MPHI to BCM2838 memory map */ + mphi_mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s_base->mphi), 0); + memory_region_init_alias(&s->mphi_mr_alias, OBJECT(s), "mphi", mphi_mr, 0, + BCM2838_MPHI_SIZE); + memory_region_add_subregion(&s_base->peri_mr, BCM2838_MPHI_OFFSET, + &s->mphi_mr_alias); + + create_unimp(s_base, &s->clkisp, "bcm2835-clkisp", CLOCK_ISP_OFFSET, + CLOCK_ISP_SIZE); + + /* GPIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { + return; + } + memory_region_add_subregion( + &s_base->peri_mr, GPIO_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gpio), 0)); + + object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->gpio), "sd-bus"); + + /* BCM2838 RPiVid ASB must be mapped to prevent kernel crash */ + create_unimp(s_base, &s->asb, "bcm2838-asb", BRDG_OFFSET, 0x24); +} + +static void bcm2838_peripherals_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + BCM2838PeripheralClass *bc = BCM2838_PERIPHERALS_CLASS(oc); + BCMSocPeripheralBaseClass *bc_base = BCM_SOC_PERIPHERALS_BASE_CLASS(oc); + + bc->peri_low_size = 0x2000000; + bc_base->peri_size = 0x1800000; + dc->realize = bcm2838_peripherals_realize; +} + +static const TypeInfo bcm2838_peripherals_type_info = { + .name = TYPE_BCM2838_PERIPHERALS, + .parent = TYPE_BCM_SOC_PERIPHERALS_BASE, + .instance_size = sizeof(BCM2838PeripheralState), + .instance_init = bcm2838_peripherals_init, + .class_size = sizeof(BCM2838PeripheralClass), + .class_init = bcm2838_peripherals_class_init, +}; + +static void bcm2838_peripherals_register_types(void) +{ + type_register_static(&bcm2838_peripherals_type_info); +} + +type_init(bcm2838_peripherals_register_types) diff --git a/hw/arm/collie.c b/hw/arm/collie.c index a0ad1b8dc7e..eaa5c52d45a 100644 --- a/hw/arm/collie.c +++ b/hw/arm/collie.c @@ -17,7 +17,6 @@ #include "hw/arm/boot.h" #include "hw/block/flash.h" #include "exec/address-spaces.h" -#include "cpu.h" #include "qom/object.h" #include "qemu/error-report.h" diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c index 29146f50181..b976727eefd 100644 --- a/hw/arm/cubieboard.c +++ b/hw/arm/cubieboard.c @@ -52,12 +52,6 @@ static void cubieboard_init(MachineState *machine) exit(1); } - /* Only allow Cortex-A8 for this board */ - if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a8")) != 0) { - error_report("This board can only be used with cortex-a8 CPU"); - exit(1); - } - a10 = AW_A10(object_new(TYPE_AW_A10)); object_property_add_child(OBJECT(machine), "soc", OBJECT(a10)); object_unref(OBJECT(a10)); @@ -114,8 +108,14 @@ static void cubieboard_init(MachineState *machine) static void cubieboard_machine_init(MachineClass *mc) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a8"), + NULL + }; + mc->desc = "cubietech cubieboard (Cortex-A8)"; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a8"); + mc->valid_cpu_types = valid_cpu_types; mc->default_ram_size = 1 * GiB; mc->init = cubieboard_init; mc->block_default_type = IF_IDE; diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c index de39fb0ece8..e3f1de26317 100644 --- a/hw/arm/exynos4210.c +++ b/hw/arm/exynos4210.c @@ -23,6 +23,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "exec/tswap.h" #include "cpu.h" #include "hw/cpu/a9mpcore.h" #include "hw/irq.h" @@ -35,6 +36,7 @@ #include "hw/arm/exynos4210.h" #include "hw/sd/sdhci.h" #include "hw/usb/hcd-ehci.h" +#include "target/arm/cpu-qom.h" #define EXYNOS4210_CHIPID_ADDR 0x10000000 @@ -554,6 +556,7 @@ static void exynos4210_realize(DeviceState *socdev, Error **errp) for (n = 0; n < EXYNOS4210_NCPUS; n++) { Object *cpuobj = object_new(ARM_CPU_TYPE_NAME("cortex-a9")); + object_property_add_child(OBJECT(s), "cpu[*]", cpuobj); /* By default A9 CPUs have EL3 enabled. This board does not currently * support EL3 so the CPU EL3 property is disabled before realization. */ @@ -766,11 +769,15 @@ static void exynos4210_realize(DeviceState *socdev, Error **errp) } /*** Display controller (FIMD) ***/ - sysbus_create_varargs("exynos4210.fimd", EXYNOS4210_FIMD0_BASE_ADDR, - s->irq_table[exynos4210_get_irq(11, 0)], - s->irq_table[exynos4210_get_irq(11, 1)], - s->irq_table[exynos4210_get_irq(11, 2)], - NULL); + dev = qdev_new("exynos4210.fimd"); + object_property_set_link(OBJECT(dev), "framebuffer-memory", + OBJECT(system_mem), &error_fatal); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, EXYNOS4210_FIMD0_BASE_ADDR); + for (n = 0; n < 3; n++) { + sysbus_connect_irq(busdev, n, s->irq_table[exynos4210_get_irq(11, n)]); + } sysbus_create_simple(TYPE_EXYNOS4210_EHCI, EXYNOS4210_EHCI_BASE_ADDR, s->irq_table[exynos4210_get_irq(28, 3)]); diff --git a/hw/arm/exynos4_boards.c b/hw/arm/exynos4_boards.c index b0e13eb4f00..2410e2a28e8 100644 --- a/hw/arm/exynos4_boards.c +++ b/hw/arm/exynos4_boards.c @@ -34,6 +34,7 @@ #include "hw/qdev-properties.h" #include "hw/boards.h" #include "hw/irq.h" +#include "target/arm/cpu-qom.h" #define SMDK_LAN9118_BASE_ADDR 0x05000000 @@ -76,10 +77,8 @@ static void lan9215_init(uint32_t base, qemu_irq irq) SysBusDevice *s; /* This should be a 9215 but the 9118 is close enough */ - if (nd_table[0].used) { - qemu_check_nic_model(&nd_table[0], "lan9118"); - dev = qdev_new(TYPE_LAN9118); - qdev_set_nic_properties(dev, &nd_table[0]); + dev = qemu_create_nic_device(TYPE_LAN9118, true, NULL); + if (dev) { qdev_prop_set_uint32(dev, "mode_16bit", 1); s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); @@ -150,12 +149,18 @@ static void smdkc210_init(MachineState *machine) arm_load_kernel(s->soc.cpu[0], machine, &exynos4_board_binfo); } +static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a9"), + NULL +}; + static void nuri_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->desc = "Samsung NURI board (Exynos4210)"; mc->init = nuri_init; + mc->valid_cpu_types = valid_cpu_types; mc->max_cpus = EXYNOS4210_NCPUS; mc->min_cpus = EXYNOS4210_NCPUS; mc->default_cpus = EXYNOS4210_NCPUS; @@ -174,6 +179,7 @@ static void smdkc210_class_init(ObjectClass *oc, void *data) mc->desc = "Samsung SMDKC210 board (Exynos4210)"; mc->init = smdkc210_init; + mc->valid_cpu_types = valid_cpu_types; mc->max_cpus = EXYNOS4210_NCPUS; mc->min_cpus = EXYNOS4210_NCPUS; mc->default_cpus = EXYNOS4210_NCPUS; diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c index 9aabbf7f587..5ed87edfe4f 100644 --- a/hw/arm/fsl-imx25.c +++ b/hw/arm/fsl-imx25.c @@ -28,6 +28,7 @@ #include "sysemu/sysemu.h" #include "hw/qdev-properties.h" #include "chardev/char.h" +#include "target/arm/cpu-qom.h" #define IMX25_ESDHC_CAPABILITIES 0x07e20000 @@ -81,7 +82,6 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp) { FslIMX25State *s = FSL_IMX25(dev); uint8_t i; - Error *err = NULL; if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { return; @@ -171,7 +171,7 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp) object_property_set_uint(OBJECT(&s->fec), "phy-num", s->phy_num, &error_abort); - qdev_set_nic_properties(DEVICE(&s->fec), &nd_table[0]); + qemu_configure_nic_device(DEVICE(&s->fec), true, NULL); if (!sysbus_realize(SYS_BUS_DEVICE(&s->fec), errp)) { return; @@ -281,28 +281,22 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp) FSL_IMX25_WDT_IRQ)); /* initialize 2 x 16 KB ROM */ - memory_region_init_rom(&s->rom[0], OBJECT(dev), "imx25.rom0", - FSL_IMX25_ROM0_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->rom[0], OBJECT(dev), "imx25.rom0", + FSL_IMX25_ROM0_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX25_ROM0_ADDR, &s->rom[0]); - memory_region_init_rom(&s->rom[1], OBJECT(dev), "imx25.rom1", - FSL_IMX25_ROM1_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->rom[1], OBJECT(dev), "imx25.rom1", + FSL_IMX25_ROM1_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX25_ROM1_ADDR, &s->rom[1]); /* initialize internal RAM (128 KB) */ - memory_region_init_ram(&s->iram, NULL, "imx25.iram", FSL_IMX25_IRAM_SIZE, - &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->iram, NULL, "imx25.iram", + FSL_IMX25_IRAM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX25_IRAM_ADDR, diff --git a/hw/arm/fsl-imx31.c b/hw/arm/fsl-imx31.c index def27bb9136..4b8d9b8e4fe 100644 --- a/hw/arm/fsl-imx31.c +++ b/hw/arm/fsl-imx31.c @@ -26,6 +26,7 @@ #include "exec/address-spaces.h" #include "hw/qdev-properties.h" #include "chardev/char.h" +#include "target/arm/cpu-qom.h" static void fsl_imx31_init(Object *obj) { @@ -63,7 +64,6 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp) { FslIMX31State *s = FSL_IMX31(dev); uint16_t i; - Error *err = NULL; if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { return; @@ -188,30 +188,24 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt), 0, FSL_IMX31_WDT_ADDR); /* On a real system, the first 16k is a `secure boot rom' */ - memory_region_init_rom(&s->secure_rom, OBJECT(dev), "imx31.secure_rom", - FSL_IMX31_SECURE_ROM_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->secure_rom, OBJECT(dev), "imx31.secure_rom", + FSL_IMX31_SECURE_ROM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX31_SECURE_ROM_ADDR, &s->secure_rom); /* There is also a 16k ROM */ - memory_region_init_rom(&s->rom, OBJECT(dev), "imx31.rom", - FSL_IMX31_ROM_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->rom, OBJECT(dev), "imx31.rom", + FSL_IMX31_ROM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX31_ROM_ADDR, &s->rom); /* initialize internal RAM (16 KB) */ - memory_region_init_ram(&s->iram, NULL, "imx31.iram", FSL_IMX31_IRAM_SIZE, - &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->iram, NULL, "imx31.iram", + FSL_IMX31_IRAM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX31_IRAM_ADDR, diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c index 7dc42cbfe64..85748cb233e 100644 --- a/hw/arm/fsl-imx6.c +++ b/hw/arm/fsl-imx6.c @@ -22,6 +22,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/arm/fsl-imx6.h" +#include "hw/misc/unimp.h" #include "hw/usb/imx-usb-phy.h" #include "hw/boards.h" #include "hw/qdev-properties.h" @@ -29,6 +30,7 @@ #include "chardev/char.h" #include "qemu/error-report.h" #include "qemu/module.h" +#include "target/arm/cpu-qom.h" #define IMX6_ESDHC_CAPABILITIES 0x057834b4 @@ -102,6 +104,8 @@ static void fsl_imx6_init(Object *obj) object_initialize_child(obj, "eth", &s->eth, TYPE_IMX_ENET); + + object_initialize_child(obj, "pcie", &s->pcie, TYPE_DESIGNWARE_PCIE_HOST); } static void fsl_imx6_realize(DeviceState *dev, Error **errp) @@ -109,7 +113,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) MachineState *ms = MACHINE(qdev_get_machine()); FslIMX6State *s = FSL_IMX6(dev); uint16_t i; - Error *err = NULL; + qemu_irq irq; unsigned int smp_cpus = ms->smp.cpus; if (smp_cpus > FSL_IMX6_NUM_CPUS) { @@ -155,6 +159,9 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_FIQ)); } + /* L2 cache controller */ + sysbus_create_simple("l2x0", FSL_IMX6_PL310_ADDR, NULL); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ccm), errp)) { return; } @@ -381,7 +388,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) object_property_set_uint(OBJECT(&s->eth), "phy-num", s->phy_num, &error_abort); - qdev_set_nic_properties(DEVICE(&s->eth), &nd_table[0]); + qemu_configure_nic_device(DEVICE(&s->eth), true, NULL); if (!sysbus_realize(SYS_BUS_DEVICE(&s->eth), errp)) { return; } @@ -422,31 +429,46 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) FSL_IMX6_WDOGn_IRQ[i])); } + /* + * PCIe + */ + sysbus_realize(SYS_BUS_DEVICE(&s->pcie), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0, FSL_IMX6_PCIe_REG_ADDR); + + irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE1_IRQ); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 0, irq); + irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE2_IRQ); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 1, irq); + irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE3_IRQ); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 2, irq); + irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE4_IRQ); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3, irq); + + /* + * PCIe PHY + */ + create_unimplemented_device("pcie-phy", FSL_IMX6_PCIe_ADDR, + FSL_IMX6_PCIe_SIZE); + /* ROM memory */ - memory_region_init_rom(&s->rom, OBJECT(dev), "imx6.rom", - FSL_IMX6_ROM_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->rom, OBJECT(dev), "imx6.rom", + FSL_IMX6_ROM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX6_ROM_ADDR, &s->rom); /* CAAM memory */ - memory_region_init_rom(&s->caam, OBJECT(dev), "imx6.caam", - FSL_IMX6_CAAM_MEM_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->caam, OBJECT(dev), "imx6.caam", + FSL_IMX6_CAAM_MEM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX6_CAAM_MEM_ADDR, &s->caam); /* OCRAM memory */ - memory_region_init_ram(&s->ocram, NULL, "imx6.ocram", FSL_IMX6_OCRAM_SIZE, - &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->ocram, NULL, "imx6.ocram", + FSL_IMX6_OCRAM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX6_OCRAM_ADDR, diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c index e37b69a5e16..19f443570bf 100644 --- a/hw/arm/fsl-imx6ul.c +++ b/hw/arm/fsl-imx6ul.c @@ -25,6 +25,7 @@ #include "sysemu/sysemu.h" #include "qemu/error-report.h" #include "qemu/module.h" +#include "target/arm/cpu-qom.h" #define NAME_SIZE 20 @@ -192,6 +193,36 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) create_unimplemented_device("a7mpcore-dap", FSL_IMX6UL_A7MPCORE_DAP_ADDR, FSL_IMX6UL_A7MPCORE_DAP_SIZE); + /* + * MMDC + */ + create_unimplemented_device("a7mpcore-mmdc", FSL_IMX6UL_MMDC_CFG_ADDR, + FSL_IMX6UL_MMDC_CFG_SIZE); + + /* + * OCOTP + */ + create_unimplemented_device("a7mpcore-ocotp", FSL_IMX6UL_OCOTP_CTRL_ADDR, + FSL_IMX6UL_OCOTP_CTRL_SIZE); + + /* + * QSPI + */ + create_unimplemented_device("a7mpcore-qspi", FSL_IMX6UL_QSPI_ADDR, + FSL_IMX6UL_QSPI_SIZE); + + /* + * CAAM + */ + create_unimplemented_device("a7mpcore-qspi", FSL_IMX6UL_CAAM_ADDR, + FSL_IMX6UL_CAAM_SIZE); + + /* + * USBMISC + */ + create_unimplemented_device("a7mpcore-usbmisc", FSL_IMX6UL_USBO2_USBMISC_ADDR, + FSL_IMX6UL_USBO2_USBMISC_SIZE); + /* * GPTs */ @@ -442,7 +473,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) s->phy_num[i], &error_abort); object_property_set_uint(OBJECT(&s->eth[i]), "tx-ring-num", FSL_IMX6UL_ETH_NUM_TX_RINGS, &error_abort); - qdev_set_nic_properties(DEVICE(&s->eth[i]), &nd_table[i]); + qemu_configure_nic_device(DEVICE(&s->eth[i]), true, NULL); sysbus_realize(SYS_BUS_DEVICE(&s->eth[i]), &error_abort); sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth[i]), 0, diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c index 474cfdc87c6..9f2ef345557 100644 --- a/hw/arm/fsl-imx7.c +++ b/hw/arm/fsl-imx7.c @@ -26,6 +26,7 @@ #include "sysemu/sysemu.h" #include "qemu/error-report.h" #include "qemu/module.h" +#include "target/arm/cpu-qom.h" #define NAME_SIZE 20 @@ -446,7 +447,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) s->phy_num[i], &error_abort); object_property_set_uint(OBJECT(&s->eth[i]), "tx-ring-num", FSL_IMX7_ETH_NUM_TX_RINGS, &error_abort); - qdev_set_nic_properties(DEVICE(&s->eth[i]), &nd_table[i]); + qemu_configure_nic_device(DEVICE(&s->eth[i]), true, NULL); sysbus_realize(SYS_BUS_DEVICE(&s->eth[i]), &error_abort); sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth[i]), 0, FSL_IMX7_ENETn_ADDR[i]); diff --git a/hw/arm/gumstix.c b/hw/arm/gumstix.c index 2ca4140c9fc..91462691531 100644 --- a/hw/arm/gumstix.c +++ b/hw/arm/gumstix.c @@ -44,7 +44,6 @@ #include "hw/boards.h" #include "exec/address-spaces.h" #include "sysemu/qtest.h" -#include "cpu.h" #define CONNEX_FLASH_SIZE (16 * MiB) #define CONNEX_RAM_SIZE (64 * MiB) @@ -74,8 +73,7 @@ static void connex_init(MachineState *machine) FLASH_SECTOR_SIZE, 2, 0, 0, 0, 0, 0); /* Interrupt line of NIC is connected to GPIO line 36 */ - smc91c111_init(&nd_table[0], 0x04000300, - qdev_get_gpio_in(cpu->gpio, 36)); + smc91c111_init(0x04000300, qdev_get_gpio_in(cpu->gpio, 36)); } static void verdex_init(MachineState *machine) @@ -98,8 +96,7 @@ static void verdex_init(MachineState *machine) FLASH_SECTOR_SIZE, 2, 0, 0, 0, 0, 0); /* Interrupt line of NIC is connected to GPIO line 99 */ - smc91c111_init(&nd_table[0], 0x04000300, - qdev_get_gpio_in(cpu->gpio, 99)); + smc91c111_init(0x04000300, qdev_get_gpio_in(cpu->gpio, 99)); } static void connex_class_init(ObjectClass *oc, void *data) @@ -109,6 +106,7 @@ static void connex_class_init(ObjectClass *oc, void *data) mc->desc = "Gumstix Connex (PXA255)"; mc->init = connex_init; mc->ignore_memory_transaction_failures = true; + mc->deprecation_reason = "machine is old and unmaintained"; } static const TypeInfo connex_type = { @@ -124,6 +122,7 @@ static void verdex_class_init(ObjectClass *oc, void *data) mc->desc = "Gumstix Verdex Pro XL6P COMs (PXA270)"; mc->init = verdex_init; mc->ignore_memory_transaction_failures = true; + mc->deprecation_reason = "machine is old and unmaintained"; mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0"); } diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c index f12aacea6b8..c71b1a8db32 100644 --- a/hw/arm/highbank.c +++ b/hw/arm/highbank.c @@ -30,12 +30,13 @@ #include "hw/boards.h" #include "qemu/error-report.h" #include "hw/char/pl011.h" -#include "hw/ide/ahci.h" +#include "hw/ide/ahci-sysbus.h" #include "hw/cpu/a9mpcore.h" #include "hw/cpu/a15mpcore.h" #include "qemu/log.h" #include "qom/object.h" #include "cpu.h" +#include "target/arm/cpu-qom.h" #define SMP_BOOT_ADDR 0x100 #define SMP_BOOT_REG 0x40 @@ -112,7 +113,7 @@ static const VMStateDescription vmstate_highbank_regs = { .name = "highbank-regs", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, HighbankRegsState, NUM_REGS), VMSTATE_END_OF_LIST(), }, @@ -208,6 +209,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id) cpuobj = object_new(machine->cpu_type); cpu = ARM_CPU(cpuobj); + object_property_add_child(OBJECT(machine), "cpu[*]", cpuobj); object_property_set_int(cpuobj, "psci-conduit", QEMU_PSCI_CONDUIT_SMC, &error_abort); @@ -296,19 +298,17 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id) sysbus_create_simple(TYPE_SYSBUS_AHCI, 0xffe08000, pic[83]); - if (nd_table[0].used) { - qemu_check_nic_model(&nd_table[0], "xgmac"); - dev = qdev_new("xgmac"); - qdev_set_nic_properties(dev, &nd_table[0]); + dev = qemu_create_nic_device("xgmac", true, NULL); + if (dev) { sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xfff50000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[77]); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, pic[78]); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, pic[79]); + } - qemu_check_nic_model(&nd_table[1], "xgmac"); - dev = qdev_new("xgmac"); - qdev_set_nic_properties(dev, &nd_table[1]); + dev = qemu_create_nic_device("xgmac", true, NULL); + if (dev) { sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xfff51000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[80]); @@ -343,10 +343,15 @@ static void midway_init(MachineState *machine) static void highbank_class_init(ObjectClass *oc, void *data) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a9"), + NULL + }; MachineClass *mc = MACHINE_CLASS(oc); mc->desc = "Calxeda Highbank (ECX-1000)"; mc->init = highbank_init; + mc->valid_cpu_types = valid_cpu_types; mc->block_default_type = IF_IDE; mc->units_per_default_bus = 1; mc->max_cpus = 4; @@ -362,10 +367,15 @@ static const TypeInfo highbank_type = { static void midway_class_init(ObjectClass *oc, void *data) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a15"), + NULL + }; MachineClass *mc = MACHINE_CLASS(oc); mc->desc = "Calxeda Midway (ECX-2000)"; mc->init = midway_init; + mc->valid_cpu_types = valid_cpu_types; mc->block_default_type = IF_IDE; mc->units_per_default_bus = 1; mc->max_cpus = 4; diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c index d176e9af7ee..feb0dd63df5 100644 --- a/hw/arm/integratorcp.c +++ b/hw/arm/integratorcp.c @@ -9,7 +9,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "cpu.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "hw/boards.h" @@ -28,6 +27,7 @@ #include "hw/sd/sd.h" #include "qom/object.h" #include "audio/audio.h" +#include "target/arm/cpu-qom.h" #define TYPE_INTEGRATOR_CM "integrator_core" OBJECT_DECLARE_SIMPLE_TYPE(IntegratorCMState, INTEGRATOR_CM) @@ -63,7 +63,7 @@ static const VMStateDescription vmstate_integratorcm = { .name = "integratorcm", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cm_osc, IntegratorCMState), VMSTATE_UINT32(cm_ctrl, IntegratorCMState), VMSTATE_UINT32(cm_lock, IntegratorCMState), @@ -291,12 +291,9 @@ static void integratorcm_realize(DeviceState *d, Error **errp) { IntegratorCMState *s = INTEGRATOR_CM(d); SysBusDevice *dev = SYS_BUS_DEVICE(d); - Error *local_err = NULL; - memory_region_init_ram(&s->flash, OBJECT(d), "integrator.flash", 0x100000, - &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram(&s->flash, OBJECT(d), "integrator.flash", + 0x100000, errp)) { return; } @@ -346,7 +343,7 @@ static const VMStateDescription vmstate_icp_pic = { .name = "icp_pic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, icp_pic_state), VMSTATE_UINT32(irq_enabled, icp_pic_state), VMSTATE_UINT32(fiq_enabled, icp_pic_state), @@ -488,7 +485,7 @@ static const VMStateDescription vmstate_icp_control = { .name = "icp_control", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(intreg_state, ICPCtrlRegsState), VMSTATE_END_OF_LIST() } @@ -669,10 +666,16 @@ static void integratorcp_init(MachineState *machine) sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x1d000000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[25]); - if (nd_table[0].used) - smc91c111_init(&nd_table[0], 0xc8000000, pic[27]); + if (qemu_find_nic_info("smc91c111", true, NULL)) { + smc91c111_init(0xc8000000, pic[27]); + } - sysbus_create_simple("pl110", 0xc0000000, pic[22]); + dev = qdev_new("pl110"); + object_property_set_link(OBJECT(dev), "framebuffer-memory", + OBJECT(address_space_mem), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xc0000000); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[22]); integrator_binfo.ram_size = ram_size; arm_load_kernel(cpu, machine, &integrator_binfo); diff --git a/hw/arm/kzm.c b/hw/arm/kzm.c index 9be91ebeaaf..2ccd6f8a768 100644 --- a/hw/arm/kzm.c +++ b/hw/arm/kzm.c @@ -113,8 +113,8 @@ static void kzm_init(MachineState *machine) alias_offset += ram[i].size; } - if (nd_table[0].used) { - lan9118_init(&nd_table[0], KZM_LAN9118_ADDR, + if (qemu_find_nic_info("lan9118", true, NULL)) { + lan9118_init(KZM_LAN9118_ADDR, qdev_get_gpio_in(DEVICE(&s->soc.avic), 52)); } diff --git a/hw/arm/mainstone.c b/hw/arm/mainstone.c index 68329c46178..3a6c22fddbc 100644 --- a/hw/arm/mainstone.c +++ b/hw/arm/mainstone.c @@ -23,7 +23,6 @@ #include "hw/block/flash.h" #include "hw/sysbus.h" #include "exec/address-spaces.h" -#include "cpu.h" /* Device addresses */ #define MST_FPGA_PHYS 0x08000000 @@ -153,8 +152,7 @@ static void mainstone_common_init(MachineState *machine, qdev_get_gpio_in(mst_irq, S1_IRQ), qdev_get_gpio_in(mst_irq, S1_CD_IRQ)); - smc91c111_init(&nd_table[0], MST_ETH_PHYS, - qdev_get_gpio_in(mst_irq, ETHERNET_IRQ)); + smc91c111_init(MST_ETH_PHYS, qdev_get_gpio_in(mst_irq, ETHERNET_IRQ)); mainstone_binfo.board_id = arm_id; arm_load_kernel(mpu->cpu, machine, &mainstone_binfo); @@ -171,6 +169,7 @@ static void mainstone2_machine_init(MachineClass *mc) mc->init = mainstone_init; mc->ignore_memory_transaction_failures = true; mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5"); + mc->deprecation_reason = "machine is old and unmaintained"; } DEFINE_MACHINE("mainstone", mainstone2_machine_init) diff --git a/hw/arm/meson.build b/hw/arm/meson.build index 68245d3ad10..6808135c1f7 100644 --- a/hw/arm/meson.build +++ b/hw/arm/meson.build @@ -8,24 +8,16 @@ arm_ss.add(when: 'CONFIG_HIGHBANK', if_true: files('highbank.c')) arm_ss.add(when: 'CONFIG_INTEGRATOR', if_true: files('integratorcp.c')) arm_ss.add(when: 'CONFIG_MAINSTONE', if_true: files('mainstone.c')) arm_ss.add(when: 'CONFIG_MICROBIT', if_true: files('microbit.c')) +arm_ss.add(when: 'CONFIG_MPS3R', if_true: files('mps3r.c')) arm_ss.add(when: 'CONFIG_MUSICPAL', if_true: files('musicpal.c')) -arm_ss.add(when: 'CONFIG_NETDUINO2', if_true: files('netduino2.c')) arm_ss.add(when: 'CONFIG_NETDUINOPLUS2', if_true: files('netduinoplus2.c')) arm_ss.add(when: 'CONFIG_OLIMEX_STM32_H405', if_true: files('olimex-stm32-h405.c')) arm_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx.c', 'npcm7xx_boards.c')) arm_ss.add(when: 'CONFIG_NSERIES', if_true: files('nseries.c')) -arm_ss.add(when: 'CONFIG_SX1', if_true: files('omap_sx1.c')) -arm_ss.add(when: 'CONFIG_CHEETAH', if_true: files('palm.c')) -arm_ss.add(when: 'CONFIG_GUMSTIX', if_true: files('gumstix.c')) -arm_ss.add(when: 'CONFIG_SPITZ', if_true: files('spitz.c')) -arm_ss.add(when: 'CONFIG_Z2', if_true: files('z2.c')) arm_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview.c')) arm_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa-ref.c')) arm_ss.add(when: 'CONFIG_STELLARIS', if_true: files('stellaris.c')) arm_ss.add(when: 'CONFIG_STM32VLDISCOVERY', if_true: files('stm32vldiscovery.c')) -arm_ss.add(when: 'CONFIG_COLLIE', if_true: files('collie.c')) -arm_ss.add(when: 'CONFIG_VERSATILE', if_true: files('versatilepb.c')) -arm_ss.add(when: 'CONFIG_VEXPRESS', if_true: files('vexpress.c')) arm_ss.add(when: 'CONFIG_ZYNQ', if_true: files('xilinx_zynq.c')) arm_ss.add(when: 'CONFIG_SABRELITE', if_true: files('sabrelite.c')) @@ -33,15 +25,17 @@ arm_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m.c')) arm_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210.c')) arm_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx.c', 'pxa2xx_gpio.c', 'pxa2xx_pic.c')) arm_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic.c')) -arm_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c', 'omap2.c')) -arm_ss.add(when: 'CONFIG_STRONGARM', if_true: files('strongarm.c')) +arm_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c')) arm_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('allwinner-a10.c', 'cubieboard.c')) arm_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3.c', 'orangepi.c')) arm_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40.c', 'bananapi_m2u.c')) arm_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2836.c', 'raspi.c')) +arm_ss.add(when: ['CONFIG_RASPI', 'TARGET_AARCH64'], if_true: files('bcm2838.c', 'raspi4b.c')) arm_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c')) arm_ss.add(when: 'CONFIG_STM32F205_SOC', if_true: files('stm32f205_soc.c')) arm_ss.add(when: 'CONFIG_STM32F405_SOC', if_true: files('stm32f405_soc.c')) +arm_ss.add(when: 'CONFIG_B_L475E_IOT01A', if_true: files('b-l475e-iot01a.c')) +arm_ss.add(when: 'CONFIG_STM32L4X5_SOC', if_true: files('stm32l4x5_soc.c')) arm_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp.c', 'xlnx-zcu102.c')) arm_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal.c', 'xlnx-versal-virt.c')) arm_ss.add(when: 'CONFIG_FSL_IMX25', if_true: files('fsl-imx25.c', 'imx25_pdk.c')) @@ -67,8 +61,20 @@ arm_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_soc.c')) arm_ss.add(when: 'CONFIG_XEN', if_true: files('xen_arm.c')) system_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c')) +system_ss.add(when: 'CONFIG_CHEETAH', if_true: files('palm.c')) +system_ss.add(when: 'CONFIG_COLLIE', if_true: files('collie.c')) system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4_boards.c')) +system_ss.add(when: 'CONFIG_GUMSTIX', if_true: files('gumstix.c')) +system_ss.add(when: 'CONFIG_NETDUINO2', if_true: files('netduino2.c')) +system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap2.c')) system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_peripherals.c')) +system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2838_peripherals.c')) +system_ss.add(when: 'CONFIG_SPITZ', if_true: files('spitz.c')) +system_ss.add(when: 'CONFIG_STRONGARM', if_true: files('strongarm.c')) +system_ss.add(when: 'CONFIG_SX1', if_true: files('omap_sx1.c')) system_ss.add(when: 'CONFIG_TOSA', if_true: files('tosa.c')) +system_ss.add(when: 'CONFIG_VERSATILE', if_true: files('versatilepb.c')) +system_ss.add(when: 'CONFIG_VEXPRESS', if_true: files('vexpress.c')) +system_ss.add(when: 'CONFIG_Z2', if_true: files('z2.c')) hw_arch += {'arm': arm_ss} diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c index 668db5ed619..a2d18afd792 100644 --- a/hw/arm/mps2-tz.c +++ b/hw/arm/mps2-tz.c @@ -503,14 +503,12 @@ static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque, const PPCExtraData *extradata) { SysBusDevice *s; - NICInfo *nd = &nd_table[0]; /* In hardware this is a LAN9220; the LAN9118 is software compatible * except that it doesn't support the checksum-offload feature. */ - qemu_check_nic_model(nd, "lan9118"); mms->lan9118 = qdev_new(TYPE_LAN9118); - qdev_set_nic_properties(mms->lan9118, nd); + qemu_configure_nic_device(mms->lan9118, true, NULL); s = SYS_BUS_DEVICE(mms->lan9118); sysbus_realize_and_unref(s, &error_fatal); @@ -528,7 +526,6 @@ static MemoryRegion *make_eth_usb(MPS2TZMachineState *mms, void *opaque, * irqs[] is the ethernet IRQ. */ SysBusDevice *s; - NICInfo *nd = &nd_table[0]; memory_region_init(&mms->eth_usb_container, OBJECT(mms), "mps2-tz-eth-usb-container", 0x200000); @@ -537,9 +534,8 @@ static MemoryRegion *make_eth_usb(MPS2TZMachineState *mms, void *opaque, * In hardware this is a LAN9220; the LAN9118 is software compatible * except that it doesn't support the checksum-offload feature. */ - qemu_check_nic_model(nd, "lan9118"); mms->lan9118 = qdev_new(TYPE_LAN9118); - qdev_set_nic_properties(mms->lan9118, nd); + qemu_configure_nic_device(mms->lan9118, true, NULL); s = SYS_BUS_DEVICE(mms->lan9118); sysbus_realize_and_unref(s, &error_fatal); @@ -813,12 +809,6 @@ static void mps2tz_common_init(MachineState *machine) int num_ppcs; int i; - if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { - error_report("This board can only be used with CPU %s", - mc->default_cpu_type); - exit(1); - } - if (machine->ram_size != mc->default_ram_size) { char *sz = size_to_str(mc->default_ram_size); error_report("Invalid RAM size, should be %s", sz); @@ -1318,6 +1308,10 @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m33"), + NULL + }; mc->desc = "ARM MPS2 with AN505 FPGA image for Cortex-M33"; mc->default_cpus = 1; @@ -1325,6 +1319,7 @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data) mc->max_cpus = mc->default_cpus; mmc->fpga_type = FPGA_AN505; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41045050; mmc->sysclk_frq = 20 * 1000 * 1000; /* 20MHz */ mmc->apb_periph_frq = mmc->sysclk_frq; @@ -1347,6 +1342,10 @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m33"), + NULL + }; mc->desc = "ARM MPS2 with AN521 FPGA image for dual Cortex-M33"; mc->default_cpus = 2; @@ -1354,6 +1353,7 @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data) mc->max_cpus = mc->default_cpus; mmc->fpga_type = FPGA_AN521; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41045210; mmc->sysclk_frq = 20 * 1000 * 1000; /* 20MHz */ mmc->apb_periph_frq = mmc->sysclk_frq; @@ -1376,6 +1376,10 @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m33"), + NULL + }; mc->desc = "ARM MPS3 with AN524 FPGA image for dual Cortex-M33"; mc->default_cpus = 2; @@ -1383,6 +1387,7 @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data) mc->max_cpus = mc->default_cpus; mmc->fpga_type = FPGA_AN524; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41045240; mmc->sysclk_frq = 32 * 1000 * 1000; /* 32MHz */ mmc->apb_periph_frq = mmc->sysclk_frq; @@ -1410,6 +1415,10 @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m55"), + NULL + }; mc->desc = "ARM MPS3 with AN547 FPGA image for Cortex-M55"; mc->default_cpus = 1; @@ -1417,6 +1426,7 @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data) mc->max_cpus = mc->default_cpus; mmc->fpga_type = FPGA_AN547; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m55"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41055470; mmc->sysclk_frq = 32 * 1000 * 1000; /* 32MHz */ mmc->apb_periph_frq = 25 * 1000 * 1000; /* 25MHz */ diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c index 292a180ad27..50919ee46d7 100644 --- a/hw/arm/mps2.c +++ b/hw/arm/mps2.c @@ -142,12 +142,6 @@ static void mps2_common_init(MachineState *machine) QList *oscclk; int i; - if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { - error_report("This board can only be used with CPU %s", - mc->default_cpu_type); - exit(1); - } - if (machine->ram_size != mc->default_ram_size) { char *sz = size_to_str(mc->default_ram_size); error_report("Invalid RAM size, should be %s", sz); @@ -462,7 +456,7 @@ static void mps2_common_init(MachineState *machine) /* In hardware this is a LAN9220; the LAN9118 is software compatible * except that it doesn't support the checksum-offload feature. */ - lan9118_init(&nd_table[0], mmc->ethernet_base, + lan9118_init(mmc->ethernet_base, qdev_get_gpio_in(armv7m, mmc->fpga_type == FPGA_AN511 ? 47 : 13)); @@ -484,10 +478,15 @@ static void mps2_an385_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m3"), + NULL + }; mc->desc = "ARM MPS2 with AN385 FPGA image for Cortex-M3"; mmc->fpga_type = FPGA_AN385; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41043850; mmc->psram_base = 0x21000000; mmc->ethernet_base = 0x40200000; @@ -498,10 +497,15 @@ static void mps2_an386_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m4"), + NULL + }; mc->desc = "ARM MPS2 with AN386 FPGA image for Cortex-M4"; mmc->fpga_type = FPGA_AN386; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m4"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41043860; mmc->psram_base = 0x21000000; mmc->ethernet_base = 0x40200000; @@ -512,10 +516,15 @@ static void mps2_an500_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m7"), + NULL + }; mc->desc = "ARM MPS2 with AN500 FPGA image for Cortex-M7"; mmc->fpga_type = FPGA_AN500; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m7"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41045000; mmc->psram_base = 0x60000000; mmc->ethernet_base = 0xa0000000; @@ -526,10 +535,15 @@ static void mps2_an511_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m3"), + NULL + }; mc->desc = "ARM MPS2 with AN511 DesignStart FPGA image for Cortex-M3"; mmc->fpga_type = FPGA_AN511; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41045110; mmc->psram_base = 0x21000000; mmc->ethernet_base = 0x40200000; diff --git a/hw/arm/mps3r.c b/hw/arm/mps3r.c new file mode 100644 index 00000000000..4d55a6564c6 --- /dev/null +++ b/hw/arm/mps3r.c @@ -0,0 +1,640 @@ +/* + * Arm MPS3 board emulation for Cortex-R-based FPGA images. + * (For M-profile images see mps2.c and mps2tz.c.) + * + * Copyright (c) 2017 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * The MPS3 is an FPGA based dev board. This file handles FPGA images + * which use the Cortex-R CPUs. We model these separately from the + * M-profile images, because on M-profile the FPGA image is based on + * a "Subsystem for Embedded" which is similar to an SoC, whereas + * the R-profile FPGA images don't have that abstraction layer. + * + * We model the following FPGA images here: + * "mps3-an536" -- dual Cortex-R52 as documented in Arm Application Note AN536 + * + * Application Note AN536: + * https://developer.arm.com/documentation/dai0536/latest/ + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qapi/qmp/qlist.h" +#include "exec/address-spaces.h" +#include "cpu.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/or-irq.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/arm/boot.h" +#include "hw/arm/bsa.h" +#include "hw/char/cmsdk-apb-uart.h" +#include "hw/i2c/arm_sbcon_i2c.h" +#include "hw/intc/arm_gicv3.h" +#include "hw/misc/mps2-scc.h" +#include "hw/misc/mps2-fpgaio.h" +#include "hw/misc/unimp.h" +#include "hw/net/lan9118.h" +#include "hw/rtc/pl031.h" +#include "hw/ssi/pl022.h" +#include "hw/timer/cmsdk-apb-dualtimer.h" +#include "hw/watchdog/cmsdk-apb-watchdog.h" + +/* Define the layout of RAM and ROM in a board */ +typedef struct RAMInfo { + const char *name; + hwaddr base; + hwaddr size; + int mrindex; /* index into rams[]; -1 for the system RAM block */ + int flags; +} RAMInfo; + +/* + * The MPS3 DDR is 3GiB, but on a 32-bit host QEMU doesn't permit + * emulation of that much guest RAM, so artificially make it smaller. + */ +#if HOST_LONG_BITS == 32 +#define MPS3_DDR_SIZE (1 * GiB) +#else +#define MPS3_DDR_SIZE (3 * GiB) +#endif + +/* + * Flag values: + * IS_MAIN: this is the main machine RAM + * IS_ROM: this area is read-only + */ +#define IS_MAIN 1 +#define IS_ROM 2 + +#define MPS3R_RAM_MAX 9 +#define MPS3R_CPU_MAX 2 +#define MPS3R_UART_MAX 4 /* shared UART count */ + +#define PERIPHBASE 0xf0000000 +#define NUM_SPIS 96 + +typedef enum MPS3RFPGAType { + FPGA_AN536, +} MPS3RFPGAType; + +struct MPS3RMachineClass { + MachineClass parent; + MPS3RFPGAType fpga_type; + const RAMInfo *raminfo; + hwaddr loader_start; +}; + +struct MPS3RMachineState { + MachineState parent; + struct arm_boot_info bootinfo; + MemoryRegion ram[MPS3R_RAM_MAX]; + Object *cpu[MPS3R_CPU_MAX]; + MemoryRegion cpu_sysmem[MPS3R_CPU_MAX]; + MemoryRegion sysmem_alias[MPS3R_CPU_MAX]; + MemoryRegion cpu_ram[MPS3R_CPU_MAX]; + GICv3State gic; + /* per-CPU UARTs followed by the shared UARTs */ + CMSDKAPBUART uart[MPS3R_CPU_MAX + MPS3R_UART_MAX]; + OrIRQState cpu_uart_oflow[MPS3R_CPU_MAX]; + OrIRQState uart_oflow; + CMSDKAPBWatchdog watchdog; + CMSDKAPBDualTimer dualtimer; + ArmSbconI2CState i2c[5]; + PL022State spi[3]; + MPS2SCC scc; + MPS2FPGAIO fpgaio; + UnimplementedDeviceState i2s_audio; + PL031State rtc; + Clock *clk; +}; + +#define TYPE_MPS3R_MACHINE "mps3r" +#define TYPE_MPS3R_AN536_MACHINE MACHINE_TYPE_NAME("mps3-an536") + +OBJECT_DECLARE_TYPE(MPS3RMachineState, MPS3RMachineClass, MPS3R_MACHINE) + +/* + * Main clock frequency CLK in Hz (50MHz). In the image there are also + * ACLK, MCLK, GPUCLK and PERIPHCLK at the same frequency; for our + * model we just roll them all into one. + */ +#define CLK_FRQ 50000000 + +static const RAMInfo an536_raminfo[] = { + { + .name = "ATCM", + .base = 0x00000000, + .size = 0x00008000, + .mrindex = 0, + }, { + /* We model the QSPI flash as simple ROM for now */ + .name = "QSPI", + .base = 0x08000000, + .size = 0x00800000, + .flags = IS_ROM, + .mrindex = 1, + }, { + .name = "BRAM", + .base = 0x10000000, + .size = 0x00080000, + .mrindex = 2, + }, { + .name = "DDR", + .base = 0x20000000, + .size = MPS3_DDR_SIZE, + .mrindex = -1, + }, { + .name = "ATCM0", + .base = 0xee000000, + .size = 0x00008000, + .mrindex = 3, + }, { + .name = "BTCM0", + .base = 0xee100000, + .size = 0x00008000, + .mrindex = 4, + }, { + .name = "CTCM0", + .base = 0xee200000, + .size = 0x00008000, + .mrindex = 5, + }, { + .name = "ATCM1", + .base = 0xee400000, + .size = 0x00008000, + .mrindex = 6, + }, { + .name = "BTCM1", + .base = 0xee500000, + .size = 0x00008000, + .mrindex = 7, + }, { + .name = "CTCM1", + .base = 0xee600000, + .size = 0x00008000, + .mrindex = 8, + }, { + .name = NULL, + } +}; + +static const int an536_oscclk[] = { + 24000000, /* 24MHz reference for RTC and timers */ + 50000000, /* 50MHz ACLK */ + 50000000, /* 50MHz MCLK */ + 50000000, /* 50MHz GPUCLK */ + 24576000, /* 24.576MHz AUDCLK */ + 23750000, /* 23.75MHz HDLCDCLK */ + 100000000, /* 100MHz DDR4_REF_CLK */ +}; + +static MemoryRegion *mr_for_raminfo(MPS3RMachineState *mms, + const RAMInfo *raminfo) +{ + /* Return an initialized MemoryRegion for the RAMInfo. */ + MemoryRegion *ram; + + if (raminfo->mrindex < 0) { + /* Means this RAMInfo is for QEMU's "system memory" */ + MachineState *machine = MACHINE(mms); + assert(!(raminfo->flags & IS_ROM)); + return machine->ram; + } + + assert(raminfo->mrindex < MPS3R_RAM_MAX); + ram = &mms->ram[raminfo->mrindex]; + + memory_region_init_ram(ram, NULL, raminfo->name, + raminfo->size, &error_fatal); + if (raminfo->flags & IS_ROM) { + memory_region_set_readonly(ram, true); + } + return ram; +} + +/* + * There is no defined secondary boot protocol for Linux for the AN536, + * because real hardware has a restriction that atomic operations between + * the two CPUs do not function correctly, and so true SMP is not + * possible. Therefore for cases where the user is directly booting + * a kernel, we treat the system as essentially uniprocessor, and + * put the secondary CPU into power-off state (as if the user on the + * real hardware had configured the secondary to be halted via the + * SCC config registers). + * + * Note that the default secondary boot code would not work here anyway + * as it assumes a GICv2, and we have a GICv3. + */ +static void mps3r_write_secondary_boot(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + /* + * Power the secondary CPU off. This means we don't need to write any + * boot code into guest memory. Note that the 'cpu' argument to this + * function is the primary CPU we passed to arm_load_kernel(), not + * the secondary. Loop around all the other CPUs, as the boot.c + * code does for the "disable secondaries if PSCI is enabled" case. + */ + for (CPUState *cs = first_cpu; cs; cs = CPU_NEXT(cs)) { + if (cs != first_cpu) { + object_property_set_bool(OBJECT(cs), "start-powered-off", true, + &error_abort); + } + } +} + +static void mps3r_secondary_cpu_reset(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + /* We don't need to do anything here because the CPU will be off */ +} + +static void create_gic(MPS3RMachineState *mms, MemoryRegion *sysmem) +{ + MachineState *machine = MACHINE(mms); + DeviceState *gicdev; + QList *redist_region_count; + + object_initialize_child(OBJECT(mms), "gic", &mms->gic, TYPE_ARM_GICV3); + gicdev = DEVICE(&mms->gic); + qdev_prop_set_uint32(gicdev, "num-cpu", machine->smp.cpus); + qdev_prop_set_uint32(gicdev, "num-irq", NUM_SPIS + GIC_INTERNAL); + redist_region_count = qlist_new(); + qlist_append_int(redist_region_count, machine->smp.cpus); + qdev_prop_set_array(gicdev, "redist-region-count", redist_region_count); + object_property_set_link(OBJECT(&mms->gic), "sysmem", + OBJECT(sysmem), &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&mms->gic), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->gic), 0, PERIPHBASE); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->gic), 1, PERIPHBASE + 0x100000); + /* + * Wire the outputs from each CPU's generic timer and the GICv3 + * maintenance interrupt signal to the appropriate GIC PPI inputs, + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. + */ + for (int i = 0; i < machine->smp.cpus; i++) { + DeviceState *cpudev = DEVICE(mms->cpu[i]); + SysBusDevice *gicsbd = SYS_BUS_DEVICE(&mms->gic); + int intidbase = NUM_SPIS + i * GIC_INTERNAL; + int irq; + /* + * Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs used for this board. This isn't a BSA board, + * but it uses the standard convention for the PPI numbers. + */ + const int timer_irq[] = { + [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ, + [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, + [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, + }; + + for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { + qdev_connect_gpio_out(cpudev, irq, + qdev_get_gpio_in(gicdev, + intidbase + timer_irq[irq])); + } + + qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0, + qdev_get_gpio_in(gicdev, + intidbase + ARCH_GIC_MAINT_IRQ)); + + qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, + qdev_get_gpio_in(gicdev, + intidbase + VIRTUAL_PMU_IRQ)); + + sysbus_connect_irq(gicsbd, i, + qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(gicsbd, i + machine->smp.cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(gicsbd, i + 2 * machine->smp.cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(gicsbd, i + 3 * machine->smp.cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + } +} + +/* + * Create UART uartno, and map it into the MemoryRegion mem at address baseaddr. + * The qemu_irq arguments are where we connect the various IRQs from the UART. + */ +static void create_uart(MPS3RMachineState *mms, int uartno, MemoryRegion *mem, + hwaddr baseaddr, qemu_irq txirq, qemu_irq rxirq, + qemu_irq txoverirq, qemu_irq rxoverirq, + qemu_irq combirq) +{ + g_autofree char *s = g_strdup_printf("uart%d", uartno); + SysBusDevice *sbd; + + assert(uartno < ARRAY_SIZE(mms->uart)); + object_initialize_child(OBJECT(mms), s, &mms->uart[uartno], + TYPE_CMSDK_APB_UART); + qdev_prop_set_uint32(DEVICE(&mms->uart[uartno]), "pclk-frq", CLK_FRQ); + qdev_prop_set_chr(DEVICE(&mms->uart[uartno]), "chardev", serial_hd(uartno)); + sbd = SYS_BUS_DEVICE(&mms->uart[uartno]); + sysbus_realize(sbd, &error_fatal); + memory_region_add_subregion(mem, baseaddr, + sysbus_mmio_get_region(sbd, 0)); + sysbus_connect_irq(sbd, 0, txirq); + sysbus_connect_irq(sbd, 1, rxirq); + sysbus_connect_irq(sbd, 2, txoverirq); + sysbus_connect_irq(sbd, 3, rxoverirq); + sysbus_connect_irq(sbd, 4, combirq); +} + +static void mps3r_common_init(MachineState *machine) +{ + MPS3RMachineState *mms = MPS3R_MACHINE(machine); + MPS3RMachineClass *mmc = MPS3R_MACHINE_GET_CLASS(mms); + MemoryRegion *sysmem = get_system_memory(); + DeviceState *gicdev; + QList *oscclk; + + mms->clk = clock_new(OBJECT(machine), "CLK"); + clock_set_hz(mms->clk, CLK_FRQ); + + for (const RAMInfo *ri = mmc->raminfo; ri->name; ri++) { + MemoryRegion *mr = mr_for_raminfo(mms, ri); + memory_region_add_subregion(sysmem, ri->base, mr); + } + + assert(machine->smp.cpus <= MPS3R_CPU_MAX); + for (int i = 0; i < machine->smp.cpus; i++) { + g_autofree char *sysmem_name = g_strdup_printf("cpu-%d-memory", i); + g_autofree char *ramname = g_strdup_printf("cpu-%d-memory", i); + g_autofree char *alias_name = g_strdup_printf("sysmem-alias-%d", i); + + /* + * Each CPU has some private RAM/peripherals, so create the container + * which will house those, with the whole-machine system memory being + * used where there's no CPU-specific device. Note that we need the + * sysmem_alias aliases because we can't put one MR (the original + * 'sysmem') into more than one other MR. + */ + memory_region_init(&mms->cpu_sysmem[i], OBJECT(machine), + sysmem_name, UINT64_MAX); + memory_region_init_alias(&mms->sysmem_alias[i], OBJECT(machine), + alias_name, sysmem, 0, UINT64_MAX); + memory_region_add_subregion_overlap(&mms->cpu_sysmem[i], 0, + &mms->sysmem_alias[i], -1); + + mms->cpu[i] = object_new(machine->cpu_type); + object_property_set_link(mms->cpu[i], "memory", + OBJECT(&mms->cpu_sysmem[i]), &error_abort); + object_property_set_int(mms->cpu[i], "reset-cbar", + PERIPHBASE, &error_abort); + qdev_realize(DEVICE(mms->cpu[i]), NULL, &error_fatal); + object_unref(mms->cpu[i]); + + /* Per-CPU RAM */ + memory_region_init_ram(&mms->cpu_ram[i], NULL, ramname, + 0x1000, &error_fatal); + memory_region_add_subregion(&mms->cpu_sysmem[i], 0xe7c01000, + &mms->cpu_ram[i]); + } + + create_gic(mms, sysmem); + gicdev = DEVICE(&mms->gic); + + /* + * UARTs 0 and 1 are per-CPU; their interrupts are wired to + * the relevant CPU's PPI 0..3, aka INTID 16..19 + */ + for (int i = 0; i < machine->smp.cpus; i++) { + int intidbase = NUM_SPIS + i * GIC_INTERNAL; + g_autofree char *s = g_strdup_printf("cpu-uart-oflow-orgate%d", i); + DeviceState *orgate; + + /* The two overflow IRQs from the UART are ORed together into PPI 3 */ + object_initialize_child(OBJECT(mms), s, &mms->cpu_uart_oflow[i], + TYPE_OR_IRQ); + orgate = DEVICE(&mms->cpu_uart_oflow[i]); + qdev_prop_set_uint32(orgate, "num-lines", 2); + qdev_realize(orgate, NULL, &error_fatal); + qdev_connect_gpio_out(orgate, 0, + qdev_get_gpio_in(gicdev, intidbase + 19)); + + create_uart(mms, i, &mms->cpu_sysmem[i], 0xe7c00000, + qdev_get_gpio_in(gicdev, intidbase + 17), /* tx */ + qdev_get_gpio_in(gicdev, intidbase + 16), /* rx */ + qdev_get_gpio_in(orgate, 0), /* txover */ + qdev_get_gpio_in(orgate, 1), /* rxover */ + qdev_get_gpio_in(gicdev, intidbase + 18) /* combined */); + } + /* + * UARTs 2 to 5 are whole-system; all overflow IRQs are ORed + * together into IRQ 17 + */ + object_initialize_child(OBJECT(mms), "uart-oflow-orgate", + &mms->uart_oflow, TYPE_OR_IRQ); + qdev_prop_set_uint32(DEVICE(&mms->uart_oflow), "num-lines", + MPS3R_UART_MAX * 2); + qdev_realize(DEVICE(&mms->uart_oflow), NULL, &error_fatal); + qdev_connect_gpio_out(DEVICE(&mms->uart_oflow), 0, + qdev_get_gpio_in(gicdev, 17)); + + for (int i = 0; i < MPS3R_UART_MAX; i++) { + hwaddr baseaddr = 0xe0205000 + i * 0x1000; + int rxirq = 5 + i * 2, txirq = 6 + i * 2, combirq = 13 + i; + + create_uart(mms, i + MPS3R_CPU_MAX, sysmem, baseaddr, + qdev_get_gpio_in(gicdev, txirq), + qdev_get_gpio_in(gicdev, rxirq), + qdev_get_gpio_in(DEVICE(&mms->uart_oflow), i * 2), + qdev_get_gpio_in(DEVICE(&mms->uart_oflow), i * 2 + 1), + qdev_get_gpio_in(gicdev, combirq)); + } + + for (int i = 0; i < 4; i++) { + /* CMSDK GPIO controllers */ + g_autofree char *s = g_strdup_printf("gpio%d", i); + create_unimplemented_device(s, 0xe0000000 + i * 0x1000, 0x1000); + } + + object_initialize_child(OBJECT(mms), "watchdog", &mms->watchdog, + TYPE_CMSDK_APB_WATCHDOG); + qdev_connect_clock_in(DEVICE(&mms->watchdog), "WDOGCLK", mms->clk); + sysbus_realize(SYS_BUS_DEVICE(&mms->watchdog), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(&mms->watchdog), 0, + qdev_get_gpio_in(gicdev, 0)); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->watchdog), 0, 0xe0100000); + + object_initialize_child(OBJECT(mms), "dualtimer", &mms->dualtimer, + TYPE_CMSDK_APB_DUALTIMER); + qdev_connect_clock_in(DEVICE(&mms->dualtimer), "TIMCLK", mms->clk); + sysbus_realize(SYS_BUS_DEVICE(&mms->dualtimer), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(&mms->dualtimer), 0, + qdev_get_gpio_in(gicdev, 3)); + sysbus_connect_irq(SYS_BUS_DEVICE(&mms->dualtimer), 1, + qdev_get_gpio_in(gicdev, 1)); + sysbus_connect_irq(SYS_BUS_DEVICE(&mms->dualtimer), 2, + qdev_get_gpio_in(gicdev, 2)); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->dualtimer), 0, 0xe0101000); + + for (int i = 0; i < ARRAY_SIZE(mms->i2c); i++) { + static const hwaddr i2cbase[] = {0xe0102000, /* Touch */ + 0xe0103000, /* Audio */ + 0xe0107000, /* Shield0 */ + 0xe0108000, /* Shield1 */ + 0xe0109000}; /* DDR4 EEPROM */ + g_autofree char *s = g_strdup_printf("i2c%d", i); + + object_initialize_child(OBJECT(mms), s, &mms->i2c[i], + TYPE_ARM_SBCON_I2C); + sysbus_realize(SYS_BUS_DEVICE(&mms->i2c[i]), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->i2c[i]), 0, i2cbase[i]); + if (i != 2 && i != 3) { + /* + * internal-only bus: mark it full to avoid user-created + * i2c devices being plugged into it. + */ + qbus_mark_full(qdev_get_child_bus(DEVICE(&mms->i2c[i]), "i2c")); + } + } + + for (int i = 0; i < ARRAY_SIZE(mms->spi); i++) { + g_autofree char *s = g_strdup_printf("spi%d", i); + hwaddr baseaddr = 0xe0104000 + i * 0x1000; + + object_initialize_child(OBJECT(mms), s, &mms->spi[i], TYPE_PL022); + sysbus_realize(SYS_BUS_DEVICE(&mms->spi[i]), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->spi[i]), 0, baseaddr); + sysbus_connect_irq(SYS_BUS_DEVICE(&mms->spi[i]), 0, + qdev_get_gpio_in(gicdev, 22 + i)); + } + + object_initialize_child(OBJECT(mms), "scc", &mms->scc, TYPE_MPS2_SCC); + qdev_prop_set_uint32(DEVICE(&mms->scc), "scc-cfg0", 0); + qdev_prop_set_uint32(DEVICE(&mms->scc), "scc-cfg4", 0x2); + qdev_prop_set_uint32(DEVICE(&mms->scc), "scc-aid", 0x00200008); + qdev_prop_set_uint32(DEVICE(&mms->scc), "scc-id", 0x41055360); + oscclk = qlist_new(); + for (int i = 0; i < ARRAY_SIZE(an536_oscclk); i++) { + qlist_append_int(oscclk, an536_oscclk[i]); + } + qdev_prop_set_array(DEVICE(&mms->scc), "oscclk", oscclk); + sysbus_realize(SYS_BUS_DEVICE(&mms->scc), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->scc), 0, 0xe0200000); + + create_unimplemented_device("i2s-audio", 0xe0201000, 0x1000); + + object_initialize_child(OBJECT(mms), "fpgaio", &mms->fpgaio, + TYPE_MPS2_FPGAIO); + qdev_prop_set_uint32(DEVICE(&mms->fpgaio), "prescale-clk", an536_oscclk[1]); + qdev_prop_set_uint32(DEVICE(&mms->fpgaio), "num-leds", 10); + qdev_prop_set_bit(DEVICE(&mms->fpgaio), "has-switches", true); + qdev_prop_set_bit(DEVICE(&mms->fpgaio), "has-dbgctrl", false); + sysbus_realize(SYS_BUS_DEVICE(&mms->fpgaio), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->fpgaio), 0, 0xe0202000); + + create_unimplemented_device("clcd", 0xe0209000, 0x1000); + + object_initialize_child(OBJECT(mms), "rtc", &mms->rtc, TYPE_PL031); + sysbus_realize(SYS_BUS_DEVICE(&mms->rtc), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->rtc), 0, 0xe020a000); + sysbus_connect_irq(SYS_BUS_DEVICE(&mms->rtc), 0, + qdev_get_gpio_in(gicdev, 4)); + + /* + * In hardware this is a LAN9220; the LAN9118 is software compatible + * except that it doesn't support the checksum-offload feature. + */ + lan9118_init(0xe0300000, + qdev_get_gpio_in(gicdev, 18)); + + create_unimplemented_device("usb", 0xe0301000, 0x1000); + create_unimplemented_device("qspi-write-config", 0xe0600000, 0x1000); + + mms->bootinfo.ram_size = machine->ram_size; + mms->bootinfo.board_id = -1; + mms->bootinfo.loader_start = mmc->loader_start; + mms->bootinfo.write_secondary_boot = mps3r_write_secondary_boot; + mms->bootinfo.secondary_cpu_reset_hook = mps3r_secondary_cpu_reset; + arm_load_kernel(ARM_CPU(mms->cpu[0]), machine, &mms->bootinfo); +} + +static void mps3r_set_default_ram_info(MPS3RMachineClass *mmc) +{ + /* + * Set mc->default_ram_size and default_ram_id from the + * information in mmc->raminfo. + */ + MachineClass *mc = MACHINE_CLASS(mmc); + const RAMInfo *p; + + for (p = mmc->raminfo; p->name; p++) { + if (p->mrindex < 0) { + /* Found the entry for "system memory" */ + mc->default_ram_size = p->size; + mc->default_ram_id = p->name; + mmc->loader_start = p->base; + return; + } + } + g_assert_not_reached(); +} + +static void mps3r_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = mps3r_common_init; +} + +static void mps3r_an536_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MPS3RMachineClass *mmc = MPS3R_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-r52"), + NULL + }; + + mc->desc = "ARM MPS3 with AN536 FPGA image for Cortex-R52"; + /* + * In the real FPGA image there are always two cores, but the standard + * initial setting for the SCC SYSCON 0x000 register is 0x21, meaning + * that the second core is held in reset and halted. Many images built for + * the board do not expect the second core to run at startup (especially + * since on the real FPGA image it is not possible to use LDREX/STREX + * in RAM between the two cores, so a true SMP setup isn't supported). + * + * As QEMU's equivalent of this, we support both -smp 1 and -smp 2, + * with the default being -smp 1. This seems a more intuitive UI for + * QEMU users than, for instance, having a machine property to allow + * the user to set the initial value of the SYSCON 0x000 register. + */ + mc->default_cpus = 1; + mc->min_cpus = 1; + mc->max_cpus = 2; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-r52"); + mc->valid_cpu_types = valid_cpu_types; + mmc->raminfo = an536_raminfo; + mps3r_set_default_ram_info(mmc); +} + +static const TypeInfo mps3r_machine_types[] = { + { + .name = TYPE_MPS3R_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(MPS3RMachineState), + .class_size = sizeof(MPS3RMachineClass), + .class_init = mps3r_class_init, + }, { + .name = TYPE_MPS3R_AN536_MACHINE, + .parent = TYPE_MPS3R_MACHINE, + .class_init = mps3r_an536_class_init, + }, +}; + +DEFINE_TYPES(mps3r_machine_types); diff --git a/hw/arm/msf2-soc.c b/hw/arm/msf2-soc.c index b5fe9f364d5..a94a10adcca 100644 --- a/hw/arm/msf2-soc.c +++ b/hw/arm/msf2-soc.c @@ -134,7 +134,7 @@ static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp) armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 81); - qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); + qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m3")); qdev_prop_set_bit(armv7m, "enable-bitband", true); qdev_connect_clock_in(armv7m, "cpuclk", s->m3clk); qdev_connect_clock_in(armv7m, "refclk", s->refclk); @@ -197,12 +197,8 @@ static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp) g_free(bus_name); } - /* FIXME use qdev NIC properties instead of nd_table[] */ - if (nd_table[0].used) { - qemu_check_nic_model(&nd_table[0], TYPE_MSS_EMAC); - qdev_set_nic_properties(DEVICE(&s->emac), &nd_table[0]); - } dev = DEVICE(&s->emac); + qemu_configure_nic_device(dev, true, NULL); object_property_set_link(OBJECT(&s->emac), "ahb-bus", OBJECT(get_system_memory()), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->emac), errp)) { @@ -231,7 +227,6 @@ static Property m2sxxx_soc_properties[] = { * part name specifies the type of SmartFusion2 device variant(this * property is for information purpose only. */ - DEFINE_PROP_STRING("cpu-type", MSF2State, cpu_type), DEFINE_PROP_STRING("part-name", MSF2State, part_name), DEFINE_PROP_UINT64("eNVM-size", MSF2State, envm_size, MSF2_ENVM_MAX_SIZE), DEFINE_PROP_UINT64("eSRAM-size", MSF2State, esram_size, diff --git a/hw/arm/msf2-som.c b/hw/arm/msf2-som.c index 7b3106c790c..5c415abe852 100644 --- a/hw/arm/msf2-som.c +++ b/hw/arm/msf2-som.c @@ -47,7 +47,6 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) DeviceState *dev; DeviceState *spi_flash; MSF2State *soc; - MachineClass *mc = MACHINE_GET_CLASS(machine); DriveInfo *dinfo = drive_get(IF_MTD, 0, 0); qemu_irq cs_line; BusState *spi_bus; @@ -55,20 +54,13 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) MemoryRegion *ddr = g_new(MemoryRegion, 1); Clock *m3clk; - if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { - error_report("This board can only be used with CPU %s", - mc->default_cpu_type); - exit(1); - } - memory_region_init_ram(ddr, NULL, "ddr-ram", DDR_SIZE, &error_fatal); memory_region_add_subregion(sysmem, DDR_BASE_ADDRESS, ddr); dev = qdev_new(TYPE_MSF2_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); qdev_prop_set_string(dev, "part-name", "M2S010"); - qdev_prop_set_string(dev, "cpu-type", mc->default_cpu_type); - qdev_prop_set_uint64(dev, "eNVM-size", M2S010_ENVM_SIZE); qdev_prop_set_uint64(dev, "eSRAM-size", M2S010_ESRAM_SIZE); @@ -106,9 +98,14 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) static void emcraft_sf2_machine_init(MachineClass *mc) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m3"), + NULL + }; + mc->desc = "SmartFusion2 SOM kit from Emcraft (M2S010)"; mc->init = emcraft_sf2_s2s010_init; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); + mc->valid_cpu_types = valid_cpu_types; } DEFINE_MACHINE("emcraft-sf2", emcraft_sf2_machine_init) diff --git a/hw/arm/musca.c b/hw/arm/musca.c index 6eeee57c9dd..e2c9d49af58 100644 --- a/hw/arm/musca.c +++ b/hw/arm/musca.c @@ -355,7 +355,6 @@ static void musca_init(MachineState *machine) { MuscaMachineState *mms = MUSCA_MACHINE(machine); MuscaMachineClass *mmc = MUSCA_MACHINE_GET_CLASS(mms); - MachineClass *mc = MACHINE_GET_CLASS(machine); MemoryRegion *system_memory = get_system_memory(); DeviceState *ssedev; DeviceState *dev_splitter; @@ -366,12 +365,6 @@ static void musca_init(MachineState *machine) assert(mmc->num_irqs <= MUSCA_NUMIRQ_MAX); assert(mmc->num_mpcs <= MUSCA_MPC_MAX); - if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { - error_report("This board can only be used with CPU %s", - mc->default_cpu_type); - exit(1); - } - mms->sysclk = clock_new(OBJECT(machine), "SYSCLK"); clock_set_hz(mms->sysclk, SYSCLK_FRQ); mms->s32kclk = clock_new(OBJECT(machine), "S32KCLK"); @@ -604,11 +597,15 @@ static void musca_init(MachineState *machine) static void musca_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m33"), + NULL + }; mc->default_cpus = 2; mc->min_cpus = mc->default_cpus; mc->max_cpus = mc->default_cpus; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mc->valid_cpu_types = valid_cpu_types; mc->init = musca_init; } diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c index 9703bfb97fb..2020f73a576 100644 --- a/hw/arm/musicpal.c +++ b/hw/arm/musicpal.c @@ -12,7 +12,6 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "cpu.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "hw/arm/boot.h" @@ -39,6 +38,7 @@ #include "hw/net/mv88w8618_eth.h" #include "audio/audio.h" #include "qemu/error-report.h" +#include "target/arm/cpu-qom.h" #define MP_MISC_BASE 0x80002000 #define MP_MISC_SIZE 0x00001000 @@ -275,7 +275,7 @@ static const VMStateDescription musicpal_lcd_vmsd = { .name = "musicpal_lcd", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(brightness, musicpal_lcd_state), VMSTATE_UINT32(mode, musicpal_lcd_state), VMSTATE_UINT32(irqctrl, musicpal_lcd_state), @@ -400,7 +400,7 @@ static const VMStateDescription mv88w8618_pic_vmsd = { .name = "mv88w8618_pic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, mv88w8618_pic_state), VMSTATE_UINT32(enabled, mv88w8618_pic_state), VMSTATE_END_OF_LIST() @@ -583,7 +583,7 @@ static const VMStateDescription mv88w8618_timer_vmsd = { .name = "timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(ptimer, mv88w8618_timer_state), VMSTATE_UINT32(limit, mv88w8618_timer_state), VMSTATE_END_OF_LIST() @@ -594,7 +594,7 @@ static const VMStateDescription mv88w8618_pit_vmsd = { .name = "mv88w8618_pit", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(timer, mv88w8618_pit_state, 4, 1, mv88w8618_timer_vmsd, mv88w8618_timer_state), VMSTATE_END_OF_LIST() @@ -681,7 +681,7 @@ static const VMStateDescription mv88w8618_flashcfg_vmsd = { .name = "mv88w8618_flashcfg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cfgr0, mv88w8618_flashcfg_state), VMSTATE_END_OF_LIST() } @@ -1015,7 +1015,7 @@ static const VMStateDescription musicpal_gpio_vmsd = { .name = "musicpal_gpio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(lcd_brightness, musicpal_gpio_state), VMSTATE_UINT32(out_state, musicpal_gpio_state), VMSTATE_UINT32(in_state, musicpal_gpio_state), @@ -1043,20 +1043,6 @@ static const TypeInfo musicpal_gpio_info = { }; /* Keyboard codes & masks */ -#define KEY_RELEASED 0x80 -#define KEY_CODE 0x7f - -#define KEYCODE_TAB 0x0f -#define KEYCODE_ENTER 0x1c -#define KEYCODE_F 0x21 -#define KEYCODE_M 0x32 - -#define KEYCODE_EXTENDED 0xe0 -#define KEYCODE_UP 0x48 -#define KEYCODE_DOWN 0x50 -#define KEYCODE_LEFT 0x4b -#define KEYCODE_RIGHT 0x4d - #define MP_KEY_WHEEL_VOL (1 << 0) #define MP_KEY_WHEEL_VOL_INV (1 << 1) #define MP_KEY_WHEEL_NAV (1 << 2) @@ -1074,67 +1060,66 @@ struct musicpal_key_state { SysBusDevice parent_obj; /*< public >*/ - uint32_t kbd_extended; uint32_t pressed_keys; qemu_irq out[8]; }; -static void musicpal_key_event(void *opaque, int keycode) +static void musicpal_key_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) { - musicpal_key_state *s = opaque; + musicpal_key_state *s = MUSICPAL_KEY(dev); + InputKeyEvent *key = evt->u.key.data; + int qcode = qemu_input_key_value_to_qcode(key->key); uint32_t event = 0; int i; - if (keycode == KEYCODE_EXTENDED) { - s->kbd_extended = 1; - return; - } + switch (qcode) { + case Q_KEY_CODE_UP: + event = MP_KEY_WHEEL_NAV | MP_KEY_WHEEL_NAV_INV; + break; - if (s->kbd_extended) { - switch (keycode & KEY_CODE) { - case KEYCODE_UP: - event = MP_KEY_WHEEL_NAV | MP_KEY_WHEEL_NAV_INV; - break; + case Q_KEY_CODE_DOWN: + event = MP_KEY_WHEEL_NAV; + break; - case KEYCODE_DOWN: - event = MP_KEY_WHEEL_NAV; - break; + case Q_KEY_CODE_LEFT: + event = MP_KEY_WHEEL_VOL | MP_KEY_WHEEL_VOL_INV; + break; - case KEYCODE_LEFT: - event = MP_KEY_WHEEL_VOL | MP_KEY_WHEEL_VOL_INV; - break; + case Q_KEY_CODE_RIGHT: + event = MP_KEY_WHEEL_VOL; + break; - case KEYCODE_RIGHT: - event = MP_KEY_WHEEL_VOL; - break; - } - } else { - switch (keycode & KEY_CODE) { - case KEYCODE_F: - event = MP_KEY_BTN_FAVORITS; - break; - - case KEYCODE_TAB: - event = MP_KEY_BTN_VOLUME; - break; - - case KEYCODE_ENTER: - event = MP_KEY_BTN_NAVIGATION; - break; - - case KEYCODE_M: - event = MP_KEY_BTN_MENU; - break; - } - /* Do not repeat already pressed buttons */ - if (!(keycode & KEY_RELEASED) && (s->pressed_keys & event)) { + case Q_KEY_CODE_F: + event = MP_KEY_BTN_FAVORITS; + break; + + case Q_KEY_CODE_TAB: + event = MP_KEY_BTN_VOLUME; + break; + + case Q_KEY_CODE_RET: + event = MP_KEY_BTN_NAVIGATION; + break; + + case Q_KEY_CODE_M: + event = MP_KEY_BTN_MENU; + break; + } + + /* + * We allow repeated wheel-events when the arrow keys are held down, + * but do not repeat already-pressed buttons for the other key inputs. + */ + if (!(event & (MP_KEY_WHEEL_NAV | MP_KEY_WHEEL_VOL))) { + if (key->down && (s->pressed_keys & event)) { event = 0; } } if (event) { /* Raise GPIO pin first if repeating a key */ - if (!(keycode & KEY_RELEASED) && (s->pressed_keys & event)) { + if (key->down && (s->pressed_keys & event)) { for (i = 0; i <= 7; i++) { if (event & (1 << i)) { qemu_set_irq(s->out[i], 1); @@ -1143,17 +1128,15 @@ static void musicpal_key_event(void *opaque, int keycode) } for (i = 0; i <= 7; i++) { if (event & (1 << i)) { - qemu_set_irq(s->out[i], !!(keycode & KEY_RELEASED)); + qemu_set_irq(s->out[i], !key->down); } } - if (keycode & KEY_RELEASED) { - s->pressed_keys &= ~event; - } else { + if (key->down) { s->pressed_keys |= event; + } else { + s->pressed_keys &= ~event; } } - - s->kbd_extended = 0; } static void musicpal_key_init(Object *obj) @@ -1162,20 +1145,27 @@ static void musicpal_key_init(Object *obj) DeviceState *dev = DEVICE(sbd); musicpal_key_state *s = MUSICPAL_KEY(dev); - s->kbd_extended = 0; s->pressed_keys = 0; qdev_init_gpio_out(dev, s->out, ARRAY_SIZE(s->out)); +} + +static const QemuInputHandler musicpal_key_handler = { + .name = "musicpal_key", + .mask = INPUT_EVENT_MASK_KEY, + .event = musicpal_key_event, +}; - qemu_add_kbd_event_handler(musicpal_key_event, s); +static void musicpal_key_realize(DeviceState *dev, Error **errp) +{ + qemu_input_handler_register(dev, &musicpal_key_handler); } static const VMStateDescription musicpal_key_vmsd = { .name = "musicpal_key", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_UINT32(kbd_extended, musicpal_key_state), + .version_id = 2, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { VMSTATE_UINT32(pressed_keys, musicpal_key_state), VMSTATE_END_OF_LIST() } @@ -1186,6 +1176,7 @@ static void musicpal_key_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &musicpal_key_vmsd; + dc->realize = musicpal_key_realize; } static const TypeInfo musicpal_key_info = { @@ -1286,9 +1277,8 @@ static void musicpal_init(MachineState *machine) } sysbus_create_simple(TYPE_MV88W8618_FLASHCFG, MP_FLASHCFG_BASE, NULL); - qemu_check_nic_model(&nd_table[0], "mv88w8618"); dev = qdev_new(TYPE_MV88W8618_ETH); - qdev_set_nic_properties(dev, &nd_table[0]); + qemu_configure_nic_device(dev, true, "mv88w8618"); object_property_set_link(OBJECT(dev), "dma-memory", OBJECT(get_system_memory()), &error_fatal); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/arm/netduino2.c b/hw/arm/netduino2.c index 501f63a77f9..8b1a9a24379 100644 --- a/hw/arm/netduino2.c +++ b/hw/arm/netduino2.c @@ -44,6 +44,7 @@ static void netduino2_init(MachineState *machine) clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F205_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/arm/netduinoplus2.c b/hw/arm/netduinoplus2.c index 2e589849478..bccd1003549 100644 --- a/hw/arm/netduinoplus2.c +++ b/hw/arm/netduinoplus2.c @@ -44,6 +44,7 @@ static void netduinoplus2_init(MachineState *machine) clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F405_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/arm/npcm7xx.c b/hw/arm/npcm7xx.c index 15ff21d0472..cc68b5d8f12 100644 --- a/hw/arm/npcm7xx.c +++ b/hw/arm/npcm7xx.c @@ -26,6 +26,7 @@ #include "qapi/error.h" #include "qemu/units.h" #include "sysemu/sysemu.h" +#include "target/arm/cpu-qom.h" /* * This covers the whole MMIO space. We'll use this to catch any MMIO accesses @@ -83,8 +84,10 @@ enum NPCM7xxInterrupt { NPCM7XX_UART1_IRQ, NPCM7XX_UART2_IRQ, NPCM7XX_UART3_IRQ, + NPCM7XX_GMAC1_IRQ = 14, NPCM7XX_EMC1RX_IRQ = 15, NPCM7XX_EMC1TX_IRQ, + NPCM7XX_GMAC2_IRQ, NPCM7XX_MMC_IRQ = 26, NPCM7XX_PSPI2_IRQ = 28, NPCM7XX_PSPI1_IRQ = 31, @@ -228,6 +231,12 @@ static const hwaddr npcm7xx_pspi_addr[] = { 0xf0201000, }; +/* Register base address for each GMAC Module */ +static const hwaddr npcm7xx_gmac_addr[] = { + 0xf0802000, + 0xf0804000, +}; + static const struct { hwaddr regs_addr; uint32_t unconnected_pins; @@ -456,6 +465,10 @@ static void npcm7xx_init(Object *obj) object_initialize_child(obj, "pspi[*]", &s->pspi[i], TYPE_NPCM_PSPI); } + for (i = 0; i < ARRAY_SIZE(s->gmac); i++) { + object_initialize_child(obj, "gmac[*]", &s->gmac[i], TYPE_NPCM_GMAC); + } + object_initialize_child(obj, "mmc", &s->mmc, TYPE_NPCM7XX_SDHCI); } @@ -474,7 +487,7 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) /* CPUs */ for (i = 0; i < nc->num_cpus; i++) { object_property_set_int(OBJECT(&s->cpu[i]), "mp-affinity", - arm_cpu_mp_affinity(i, NPCM7XX_MAX_NUM_CPUS), + arm_build_mp_affinity(i, NPCM7XX_MAX_NUM_CPUS), &error_abort); object_property_set_int(OBJECT(&s->cpu[i]), "reset-cbar", NPCM7XX_GIC_CPU_IF_ADDR, &error_abort); @@ -655,8 +668,9 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) /* * EMC Modules. Cannot fail. - * The mapping of the device to its netdev backend works as follows: - * emc[i] = nd_table[i] + * Use the available NIC configurations in order, allowing 'emc0' and + * 'emc1' to by used as aliases for the model= parameter to override. + * * This works around the inability to specify the netdev property for the * emc device: it's not pluggable and thus the -device option can't be * used. @@ -664,12 +678,13 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_emc_addr) != ARRAY_SIZE(s->emc)); QEMU_BUILD_BUG_ON(ARRAY_SIZE(s->emc) != 2); for (i = 0; i < ARRAY_SIZE(s->emc); i++) { - s->emc[i].emc_num = i; SysBusDevice *sbd = SYS_BUS_DEVICE(&s->emc[i]); - if (nd_table[i].used) { - qemu_check_nic_model(&nd_table[i], TYPE_NPCM7XX_EMC); - qdev_set_nic_properties(DEVICE(sbd), &nd_table[i]); - } + char alias[6]; + + s->emc[i].emc_num = i; + snprintf(alias, sizeof(alias), "emc%u", i); + qemu_configure_nic_device(DEVICE(sbd), true, alias); + /* * The device exists regardless of whether it's connected to a QEMU * netdev backend. So always instantiate it even if there is no @@ -687,6 +702,30 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(sbd, 1, npcm7xx_irq(s, rx_irq)); } + /* + * GMAC Modules. Cannot fail. + */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_gmac_addr) != ARRAY_SIZE(s->gmac)); + QEMU_BUILD_BUG_ON(ARRAY_SIZE(s->gmac) != 2); + for (i = 0; i < ARRAY_SIZE(s->gmac); i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->gmac[i]); + + qemu_configure_nic_device(DEVICE(sbd), false, NULL); + /* + * The device exists regardless of whether it's connected to a QEMU + * netdev backend. So always instantiate it even if there is no + * backend. + */ + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, npcm7xx_gmac_addr[i]); + int irq = i == 0 ? NPCM7XX_GMAC1_IRQ : NPCM7XX_GMAC2_IRQ; + /* + * N.B. The values for the second argument sysbus_connect_irq are + * chosen to match the registration order in npcm7xx_emc_realize. + */ + sysbus_connect_irq(sbd, 0, npcm7xx_irq(s, irq)); + } + /* * Flash Interface Unit (FIU). Can fail if incorrect number of chip selects * specified, but this is a programming error. @@ -749,8 +788,6 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) create_unimplemented_device("npcm7xx.siox[2]", 0xf0102000, 4 * KiB); create_unimplemented_device("npcm7xx.ahbpci", 0xf0400000, 1 * MiB); create_unimplemented_device("npcm7xx.mcphy", 0xf05f0000, 64 * KiB); - create_unimplemented_device("npcm7xx.gmac1", 0xf0802000, 8 * KiB); - create_unimplemented_device("npcm7xx.gmac2", 0xf0804000, 8 * KiB); create_unimplemented_device("npcm7xx.vcd", 0xf0810000, 64 * KiB); create_unimplemented_device("npcm7xx.ece", 0xf0820000, 8 * KiB); create_unimplemented_device("npcm7xx.vdma", 0xf0822000, 8 * KiB); diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c index 2aef579aacc..e229efb4472 100644 --- a/hw/arm/npcm7xx_boards.c +++ b/hw/arm/npcm7xx_boards.c @@ -121,15 +121,8 @@ static NPCM7xxState *npcm7xx_create_soc(MachineState *machine, uint32_t hw_straps) { NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_GET_CLASS(machine); - MachineClass *mc = MACHINE_CLASS(nmc); Object *obj; - if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { - error_report("This board can only be used with %s", - mc->default_cpu_type); - exit(1); - } - obj = object_new_with_props(nmc->soc_type, OBJECT(machine), "soc", &error_abort, NULL); object_property_set_uint(obj, "power-on-straps", hw_straps, &error_abort); @@ -463,12 +456,16 @@ static void npcm7xx_set_soc_type(NPCM7xxMachineClass *nmc, const char *type) static void npcm7xx_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a9"), + NULL + }; mc->no_floppy = 1; mc->no_cdrom = 1; mc->no_parallel = 1; mc->default_ram_id = "ram"; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9"); + mc->valid_cpu_types = valid_cpu_types; } /* diff --git a/hw/arm/nrf51_soc.c b/hw/arm/nrf51_soc.c index 34da0d62f00..ac53441630f 100644 --- a/hw/arm/nrf51_soc.c +++ b/hw/arm/nrf51_soc.c @@ -58,7 +58,6 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp) { NRF51State *s = NRF51_SOC(dev_soc); MemoryRegion *mr; - Error *err = NULL; uint8_t i = 0; hwaddr base_addr = 0; @@ -92,10 +91,8 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp) memory_region_add_subregion_overlap(&s->container, 0, s->board_memory, -1); - memory_region_init_ram(&s->sram, OBJECT(s), "nrf51.sram", s->sram_size, - &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->sram, OBJECT(s), "nrf51.sram", s->sram_size, + errp)) { return; } memory_region_add_subregion(&s->container, NRF51_SRAM_BASE, &s->sram); diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c index 35aff46b4b4..35364312c73 100644 --- a/hw/arm/nseries.c +++ b/hw/arm/nseries.c @@ -1353,9 +1353,7 @@ static void n8x0_init(MachineState *machine, n8x0_spi_setup(s); n8x0_dss_setup(s); n8x0_cbus_setup(s); - if (machine_usb(machine)) { - n8x0_usb_setup(s); - } + n8x0_usb_setup(s); if (machine->kernel_filename) { /* Or at the linux loader. */ @@ -1432,6 +1430,7 @@ static void n800_class_init(ObjectClass *oc, void *data) /* Actually two chips of 0x4000000 bytes each */ mc->default_ram_size = 0x08000000; mc->default_ram_id = "omap2.dram"; + mc->deprecation_reason = "machine is old and unmaintained"; machine_add_audiodev_property(mc); } @@ -1454,6 +1453,7 @@ static void n810_class_init(ObjectClass *oc, void *data) /* Actually two chips of 0x4000000 bytes each */ mc->default_ram_size = 0x08000000; mc->default_ram_id = "omap2.dram"; + mc->deprecation_reason = "machine is old and unmaintained"; machine_add_audiodev_property(mc); } diff --git a/hw/arm/olimex-stm32-h405.c b/hw/arm/olimex-stm32-h405.c index d793de7c97f..4ad7b043be0 100644 --- a/hw/arm/olimex-stm32-h405.c +++ b/hw/arm/olimex-stm32-h405.c @@ -47,6 +47,7 @@ static void olimex_stm32_h405_init(MachineState *machine) clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F405_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c index d5438156ee9..86ee336e599 100644 --- a/hw/arm/omap1.c +++ b/hw/arm/omap1.c @@ -40,6 +40,7 @@ #include "hw/sysbus.h" #include "qemu/cutils.h" #include "qemu/bcd.h" +#include "target/arm/cpu-qom.h" static inline void omap_log_badwidth(const char *funcname, hwaddr addr, int sz) { diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c index f170728e7ec..d9683276c68 100644 --- a/hw/arm/omap2.c +++ b/hw/arm/omap2.c @@ -21,7 +21,6 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "cpu.h" #include "exec/address-spaces.h" #include "sysemu/blockdev.h" #include "sysemu/qtest.h" @@ -39,6 +38,7 @@ #include "hw/sysbus.h" #include "hw/boards.h" #include "audio/audio.h" +#include "target/arm/cpu-qom.h" /* Enhanced Audio Controller (CODEC only) */ struct omap_eac_s { diff --git a/hw/arm/omap_sx1.c b/hw/arm/omap_sx1.c index 4bf1579f8c1..62d7915fb8f 100644 --- a/hw/arm/omap_sx1.c +++ b/hw/arm/omap_sx1.c @@ -35,7 +35,6 @@ #include "hw/block/flash.h" #include "sysemu/qtest.h" #include "exec/address-spaces.h" -#include "cpu.h" #include "qemu/cutils.h" #include "qemu/error-report.h" diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c index f3784d45caf..77e328191d7 100644 --- a/hw/arm/orangepi.c +++ b/hw/arm/orangepi.c @@ -49,12 +49,6 @@ static void orangepi_init(MachineState *machine) exit(1); } - /* Only allow Cortex-A7 for this board */ - if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a7")) != 0) { - error_report("This board can only be used with cortex-a7 CPU"); - exit(1); - } - h3 = AW_H3(object_new(TYPE_AW_H3)); object_property_add_child(OBJECT(machine), "soc", OBJECT(h3)); object_unref(OBJECT(h3)); @@ -111,6 +105,11 @@ static void orangepi_init(MachineState *machine) static void orangepi_machine_init(MachineClass *mc) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a7"), + NULL + }; + mc->desc = "Orange Pi PC (Cortex-A7)"; mc->init = orangepi_init; mc->block_default_type = IF_SD; @@ -119,6 +118,7 @@ static void orangepi_machine_init(MachineClass *mc) mc->max_cpus = AW_H3_NUM_CPUS; mc->default_cpus = AW_H3_NUM_CPUS; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); + mc->valid_cpu_types = valid_cpu_types; mc->default_ram_size = 1 * GiB; mc->default_ram_id = "orangepi.ram"; } diff --git a/hw/arm/palm.c b/hw/arm/palm.c index b86f2c331bb..e04ac92eb7f 100644 --- a/hw/arm/palm.c +++ b/hw/arm/palm.c @@ -29,7 +29,6 @@ #include "hw/input/tsc2xxx.h" #include "hw/irq.h" #include "hw/loader.h" -#include "cpu.h" #include "qemu/cutils.h" #include "qom/object.h" #include "qemu/error-report.h" @@ -310,6 +309,7 @@ static void palmte_machine_init(MachineClass *mc) mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t"); mc->default_ram_size = 0x02000000; mc->default_ram_id = "omap1.dram"; + mc->deprecation_reason = "machine is old and unmaintained"; machine_add_audiodev_property(mc); } diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c index f0bf407e664..6b2e54473b3 100644 --- a/hw/arm/pxa2xx.c +++ b/hw/arm/pxa2xx.c @@ -168,7 +168,7 @@ static const VMStateDescription vmstate_pxa2xx_pm = { .name = "pxa2xx_pm", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(pm_regs, PXA2xxState, 0x40), VMSTATE_END_OF_LIST() } @@ -238,7 +238,7 @@ static const VMStateDescription vmstate_pxa2xx_cm = { .name = "pxa2xx_cm", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(cm_regs, PXA2xxState, 4), VMSTATE_UINT32(clkcfg, PXA2xxState), VMSTATE_UINT32(pmnc, PXA2xxState), @@ -465,7 +465,7 @@ static const VMStateDescription vmstate_pxa2xx_mm = { .name = "pxa2xx_mm", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(mm_regs, PXA2xxState, 0x1a), VMSTATE_END_OF_LIST() } @@ -510,7 +510,7 @@ static const VMStateDescription vmstate_pxa2xx_ssp = { .name = "pxa2xx-ssp", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(enable, PXA2xxSSPState), VMSTATE_UINT32_ARRAY(sscr, PXA2xxSSPState, 2), VMSTATE_UINT32(sspsp, PXA2xxSSPState), @@ -1200,7 +1200,7 @@ static const VMStateDescription vmstate_pxa2xx_rtc_regs = { .minimum_version_id = 0, .pre_save = pxa2xx_rtc_pre_save, .post_load = pxa2xx_rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(rttr, PXA2xxRTCState), VMSTATE_UINT32(rtsr, PXA2xxRTCState), VMSTATE_UINT32(rtar, PXA2xxRTCState), @@ -1464,7 +1464,7 @@ static const VMStateDescription vmstate_pxa2xx_i2c_slave = { .name = "pxa2xx_i2c_slave", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, PXA2xxI2CSlaveState), VMSTATE_END_OF_LIST() } @@ -1474,7 +1474,7 @@ static const VMStateDescription vmstate_pxa2xx_i2c = { .name = "pxa2xx_i2c", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(control, PXA2xxI2CState), VMSTATE_UINT16(status, PXA2xxI2CState), VMSTATE_UINT8(ibmr, PXA2xxI2CState), @@ -1728,7 +1728,7 @@ static const VMStateDescription vmstate_pxa2xx_i2s = { .name = "pxa2xx_i2s", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(control, PXA2xxI2SState, 2), VMSTATE_UINT32(status, PXA2xxI2SState), VMSTATE_UINT32(mask, PXA2xxI2SState), @@ -2027,7 +2027,7 @@ static const VMStateDescription pxa2xx_fir_vmsd = { .name = "pxa2xx-fir", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(enable, PXA2xxFIrState), VMSTATE_UINT8_ARRAY(control, PXA2xxFIrState, 3), VMSTATE_UINT8_ARRAY(status, PXA2xxFIrState, 2), diff --git a/hw/arm/pxa2xx_gpio.c b/hw/arm/pxa2xx_gpio.c index c8db5e8e2b7..41dca036fbb 100644 --- a/hw/arm/pxa2xx_gpio.c +++ b/hw/arm/pxa2xx_gpio.c @@ -320,7 +320,7 @@ static const VMStateDescription vmstate_pxa2xx_gpio_regs = { .name = "pxa2xx-gpio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(ilevel, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), VMSTATE_UINT32_ARRAY(olevel, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), VMSTATE_UINT32_ARRAY(dir, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c index 1373a0d275f..f54546cd4df 100644 --- a/hw/arm/pxa2xx_pic.c +++ b/hw/arm/pxa2xx_pic.c @@ -316,7 +316,7 @@ static const VMStateDescription vmstate_pxa2xx_pic_regs = { .version_id = 0, .minimum_version_id = 0, .post_load = pxa2xx_pic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(int_enabled, PXA2xxPICState, 2), VMSTATE_UINT32_ARRAY(int_pending, PXA2xxPICState, 2), VMSTATE_UINT32_ARRAY(is_fiq, PXA2xxPICState, 2), diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c index cc4c4ec9bfc..a7a662f40db 100644 --- a/hw/arm/raspi.c +++ b/hw/arm/raspi.c @@ -18,6 +18,8 @@ #include "qapi/error.h" #include "hw/arm/boot.h" #include "hw/arm/bcm2836.h" +#include "hw/arm/bcm2838.h" +#include "hw/arm/raspi_platform.h" #include "hw/registerfields.h" #include "qemu/error-report.h" #include "hw/boards.h" @@ -25,6 +27,9 @@ #include "hw/arm/boot.h" #include "qom/object.h" +#define TYPE_RASPI_MACHINE MACHINE_TYPE_NAME("raspi-common") +OBJECT_DECLARE_SIMPLE_TYPE(RaspiMachineState, RASPI_MACHINE) + #define SMPBOOT_ADDR 0x300 /* this should leave enough space for ATAGS */ #define MVBAR_ADDR 0x400 /* secure vectors */ #define BOARDSETUP_ADDR (MVBAR_ADDR + 0x20) /* board setup code */ @@ -32,30 +37,12 @@ #define FIRMWARE_ADDR_3 0x80000 /* Pi 3 loads kernel.img here by default */ #define SPINTABLE_ADDR 0xd8 /* Pi 3 bootloader spintable */ -/* Registered machine type (matches RPi Foundation bootloader and U-Boot) */ -#define MACH_TYPE_BCM2708 3138 - struct RaspiMachineState { /*< private >*/ - MachineState parent_obj; + RaspiBaseMachineState parent_obj; /*< public >*/ BCM283XState soc; - struct arm_boot_info binfo; -}; -typedef struct RaspiMachineState RaspiMachineState; - -struct RaspiMachineClass { - /*< private >*/ - MachineClass parent_obj; - /*< public >*/ - uint32_t board_rev; }; -typedef struct RaspiMachineClass RaspiMachineClass; - -#define TYPE_RASPI_MACHINE MACHINE_TYPE_NAME("raspi-common") -DECLARE_OBJ_CHECKERS(RaspiMachineState, RaspiMachineClass, - RASPI_MACHINE, TYPE_RASPI_MACHINE) - /* * Board revision codes: @@ -72,6 +59,7 @@ typedef enum RaspiProcessorId { PROCESSOR_ID_BCM2835 = 0, PROCESSOR_ID_BCM2836 = 1, PROCESSOR_ID_BCM2837 = 2, + PROCESSOR_ID_BCM2838 = 3, } RaspiProcessorId; static const struct { @@ -81,9 +69,10 @@ static const struct { [PROCESSOR_ID_BCM2835] = {TYPE_BCM2835, 1}, [PROCESSOR_ID_BCM2836] = {TYPE_BCM2836, BCM283X_NCPUS}, [PROCESSOR_ID_BCM2837] = {TYPE_BCM2837, BCM283X_NCPUS}, + [PROCESSOR_ID_BCM2838] = {TYPE_BCM2838, BCM283X_NCPUS}, }; -static uint64_t board_ram_size(uint32_t board_rev) +uint64_t board_ram_size(uint32_t board_rev) { assert(FIELD_EX32(board_rev, REV_CODE, STYLE)); /* Only new style */ return 256 * MiB << FIELD_EX32(board_rev, REV_CODE, MEMORY_SIZE); @@ -99,7 +88,7 @@ static RaspiProcessorId board_processor_id(uint32_t board_rev) return proc_id; } -static const char *board_soc_type(uint32_t board_rev) +const char *board_soc_type(uint32_t board_rev) { return soc_property[board_processor_id(board_rev)].type; } @@ -200,13 +189,12 @@ static void reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info) cpu_set_pc(cs, info->smp_loader_start); } -static void setup_boot(MachineState *machine, RaspiProcessorId processor_id, - size_t ram_size) +static void setup_boot(MachineState *machine, ARMCPU *cpu, + RaspiProcessorId processor_id, size_t ram_size) { - RaspiMachineState *s = RASPI_MACHINE(machine); + RaspiBaseMachineState *s = RASPI_BASE_MACHINE(machine); int r; - s->binfo.board_id = MACH_TYPE_BCM2708; s->binfo.ram_size = ram_size; if (processor_id <= PROCESSOR_ID_BCM2836) { @@ -252,16 +240,17 @@ static void setup_boot(MachineState *machine, RaspiProcessorId processor_id, s->binfo.firmware_loaded = true; } - arm_load_kernel(&s->soc.cpu[0].core, machine, &s->binfo); + arm_load_kernel(cpu, machine, &s->binfo); } -static void raspi_machine_init(MachineState *machine) +void raspi_base_machine_init(MachineState *machine, + BCM283XBaseState *soc) { - RaspiMachineClass *mc = RASPI_MACHINE_GET_CLASS(machine); - RaspiMachineState *s = RASPI_MACHINE(machine); + RaspiBaseMachineClass *mc = RASPI_BASE_MACHINE_GET_CLASS(machine); uint32_t board_rev = mc->board_rev; uint64_t ram_size = board_ram_size(board_rev); - uint32_t vcram_size; + uint32_t vcram_base, vcram_size; + size_t boot_ram_size; DriveInfo *di; BlockBackend *blk; BusState *bus; @@ -279,19 +268,17 @@ static void raspi_machine_init(MachineState *machine) machine->ram, 0); /* Setup the SOC */ - object_initialize_child(OBJECT(machine), "soc", &s->soc, - board_soc_type(board_rev)); - object_property_add_const_link(OBJECT(&s->soc), "ram", OBJECT(machine->ram)); - object_property_set_int(OBJECT(&s->soc), "board-rev", board_rev, + object_property_add_const_link(OBJECT(soc), "ram", OBJECT(machine->ram)); + object_property_set_int(OBJECT(soc), "board-rev", board_rev, &error_abort); - object_property_set_str(OBJECT(&s->soc), "command-line", + object_property_set_str(OBJECT(soc), "command-line", machine->kernel_cmdline, &error_abort); - qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); + qdev_realize(DEVICE(soc), NULL, &error_fatal); /* Create and plug in the SD cards */ di = drive_get(IF_SD, 0, 0); blk = di ? blk_by_legacy_dinfo(di) : NULL; - bus = qdev_get_child_bus(DEVICE(&s->soc), "sd-bus"); + bus = qdev_get_child_bus(DEVICE(soc), "sd-bus"); if (bus == NULL) { error_report("No SD bus found in SOC object"); exit(1); @@ -300,19 +287,40 @@ static void raspi_machine_init(MachineState *machine) qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); qdev_realize_and_unref(carddev, bus, &error_fatal); - vcram_size = object_property_get_uint(OBJECT(&s->soc), "vcram-size", + vcram_size = object_property_get_uint(OBJECT(soc), "vcram-size", &error_abort); - setup_boot(machine, board_processor_id(mc->board_rev), - machine->ram_size - vcram_size); + vcram_base = object_property_get_uint(OBJECT(soc), "vcram-base", + &error_abort); + + if (vcram_base == 0) { + vcram_base = ram_size - vcram_size; + } + boot_ram_size = MIN(vcram_base, UPPER_RAM_BASE - vcram_size); + + setup_boot(machine, &soc->cpu[0].core, board_processor_id(board_rev), + boot_ram_size); } -static void raspi_machine_class_common_init(MachineClass *mc, - uint32_t board_rev) +void raspi_machine_init(MachineState *machine) +{ + RaspiMachineState *s = RASPI_MACHINE(machine); + RaspiBaseMachineState *s_base = RASPI_BASE_MACHINE(machine); + RaspiBaseMachineClass *mc = RASPI_BASE_MACHINE_GET_CLASS(machine); + BCM283XState *soc = &s->soc; + + s_base->binfo.board_id = MACH_TYPE_BCM2708; + + object_initialize_child(OBJECT(machine), "soc", soc, + board_soc_type(mc->board_rev)); + raspi_base_machine_init(machine, &soc->parent_obj); +} + +void raspi_machine_class_common_init(MachineClass *mc, + uint32_t board_rev) { mc->desc = g_strdup_printf("Raspberry Pi %s (revision 1.%u)", board_type(board_rev), FIELD_EX32(board_rev, REV_CODE, REVISION)); - mc->init = raspi_machine_init; mc->block_default_type = IF_SD; mc->no_parallel = 1; mc->no_floppy = 1; @@ -322,50 +330,57 @@ static void raspi_machine_class_common_init(MachineClass *mc, mc->default_ram_id = "ram"; }; +static void raspi_machine_class_init(MachineClass *mc, + uint32_t board_rev) +{ + raspi_machine_class_common_init(mc, board_rev); + mc->init = raspi_machine_init; +}; + static void raspi0_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); - RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); + RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); rmc->board_rev = 0x920092; /* Revision 1.2 */ - raspi_machine_class_common_init(mc, rmc->board_rev); + raspi_machine_class_init(mc, rmc->board_rev); }; static void raspi1ap_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); - RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); + RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); rmc->board_rev = 0x900021; /* Revision 1.1 */ - raspi_machine_class_common_init(mc, rmc->board_rev); + raspi_machine_class_init(mc, rmc->board_rev); }; static void raspi2b_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); - RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); + RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); rmc->board_rev = 0xa21041; - raspi_machine_class_common_init(mc, rmc->board_rev); + raspi_machine_class_init(mc, rmc->board_rev); }; #ifdef TARGET_AARCH64 static void raspi3ap_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); - RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); + RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); rmc->board_rev = 0x9020e0; /* Revision 1.0 */ - raspi_machine_class_common_init(mc, rmc->board_rev); + raspi_machine_class_init(mc, rmc->board_rev); }; static void raspi3b_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); - RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); + RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); rmc->board_rev = 0xa02082; - raspi_machine_class_common_init(mc, rmc->board_rev); + raspi_machine_class_init(mc, rmc->board_rev); }; #endif /* TARGET_AARCH64 */ @@ -394,9 +409,14 @@ static const TypeInfo raspi_machine_types[] = { #endif }, { .name = TYPE_RASPI_MACHINE, - .parent = TYPE_MACHINE, + .parent = TYPE_RASPI_BASE_MACHINE, .instance_size = sizeof(RaspiMachineState), - .class_size = sizeof(RaspiMachineClass), + .abstract = true, + }, { + .name = TYPE_RASPI_BASE_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(RaspiBaseMachineState), + .class_size = sizeof(RaspiBaseMachineClass), .abstract = true, } }; diff --git a/hw/arm/raspi4b.c b/hw/arm/raspi4b.c new file mode 100644 index 00000000000..85877880fc7 --- /dev/null +++ b/hw/arm/raspi4b.c @@ -0,0 +1,136 @@ +/* + * Raspberry Pi 4B emulation + * + * Copyright (C) 2022 Ovchinnikov Vitalii + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "hw/arm/raspi_platform.h" +#include "hw/display/bcm2835_fb.h" +#include "hw/registerfields.h" +#include "qemu/error-report.h" +#include "sysemu/device_tree.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/arm/boot.h" +#include "qom/object.h" +#include "hw/arm/bcm2838.h" +#include + +#define TYPE_RASPI4B_MACHINE MACHINE_TYPE_NAME("raspi4b") +OBJECT_DECLARE_SIMPLE_TYPE(Raspi4bMachineState, RASPI4B_MACHINE) + +struct Raspi4bMachineState { + RaspiBaseMachineState parent_obj; + BCM2838State soc; +}; + +/* + * Add second memory region if board RAM amount exceeds VC base address + * (see https://datasheets.raspberrypi.com/bcm2711/bcm2711-peripherals.pdf + * 1.2 Address Map) + */ +static int raspi_add_memory_node(void *fdt, hwaddr mem_base, hwaddr mem_len) +{ + int ret; + uint32_t acells, scells; + char *nodename = g_strdup_printf("/memory@%" PRIx64, mem_base); + + acells = qemu_fdt_getprop_cell(fdt, "/", "#address-cells", + NULL, &error_fatal); + scells = qemu_fdt_getprop_cell(fdt, "/", "#size-cells", + NULL, &error_fatal); + if (acells == 0 || scells == 0) { + fprintf(stderr, "dtb file invalid (#address-cells or #size-cells 0)\n"); + ret = -1; + } else { + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory"); + ret = qemu_fdt_setprop_sized_cells(fdt, nodename, "reg", + acells, mem_base, + scells, mem_len); + } + + g_free(nodename); + return ret; +} + +static void raspi4_modify_dtb(const struct arm_boot_info *info, void *fdt) +{ + uint64_t ram_size; + + /* Temporarily disable following devices until they are implemented */ + const char *nodes_to_remove[] = { + "brcm,bcm2711-pcie", + "brcm,bcm2711-rng200", + "brcm,bcm2711-thermal", + "brcm,bcm2711-genet-v5", + }; + + for (int i = 0; i < ARRAY_SIZE(nodes_to_remove); i++) { + const char *dev_str = nodes_to_remove[i]; + + int offset = fdt_node_offset_by_compatible(fdt, -1, dev_str); + if (offset >= 0) { + if (!fdt_nop_node(fdt, offset)) { + warn_report("bcm2711 dtc: %s has been disabled!", dev_str); + } + } + } + + ram_size = board_ram_size(info->board_id); + + if (info->ram_size > UPPER_RAM_BASE) { + raspi_add_memory_node(fdt, UPPER_RAM_BASE, ram_size - UPPER_RAM_BASE); + } +} + +static void raspi4b_machine_init(MachineState *machine) +{ + Raspi4bMachineState *s = RASPI4B_MACHINE(machine); + RaspiBaseMachineState *s_base = RASPI_BASE_MACHINE(machine); + RaspiBaseMachineClass *mc = RASPI_BASE_MACHINE_GET_CLASS(machine); + BCM2838State *soc = &s->soc; + + s_base->binfo.modify_dtb = raspi4_modify_dtb; + s_base->binfo.board_id = mc->board_rev; + + object_initialize_child(OBJECT(machine), "soc", soc, + board_soc_type(mc->board_rev)); + + raspi_base_machine_init(machine, &soc->parent_obj); +} + +static void raspi4b_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); + +#if HOST_LONG_BITS == 32 + rmc->board_rev = 0xa03111; /* Revision 1.1, 1 Gb RAM */ +#else + rmc->board_rev = 0xb03115; /* Revision 1.5, 2 Gb RAM */ +#endif + raspi_machine_class_common_init(mc, rmc->board_rev); + mc->init = raspi4b_machine_init; +} + +static const TypeInfo raspi4b_machine_type = { + .name = TYPE_RASPI4B_MACHINE, + .parent = TYPE_RASPI_BASE_MACHINE, + .instance_size = sizeof(Raspi4bMachineState), + .class_init = raspi4b_machine_class_init, +}; + +static void raspi4b_machine_register_type(void) +{ + type_register_static(&raspi4b_machine_type); +} + +type_init(raspi4b_machine_register_type) diff --git a/hw/arm/realview.c b/hw/arm/realview.c index 132217b2edd..b186f965c68 100644 --- a/hw/arm/realview.c +++ b/hw/arm/realview.c @@ -30,6 +30,7 @@ #include "hw/i2c/arm_sbcon_i2c.h" #include "hw/sd/sd.h" #include "audio/audio.h" +#include "target/arm/cpu-qom.h" #define SMP_BOOT_ADDR 0xe0000000 #define SMP_BOOTREG_ADDR 0x10000030 @@ -84,12 +85,10 @@ static void realview_init(MachineState *machine, SysBusDevice *busdev; qemu_irq pic[64]; PCIBus *pci_bus = NULL; - NICInfo *nd; DriveInfo *dinfo; I2CBus *i2c; int n; unsigned int smp_cpus = machine->smp.cpus; - int done_nic = 0; qemu_irq cpu_irq[4]; int is_mpcore = 0; int is_pb = 0; @@ -239,7 +238,12 @@ static void realview_init(MachineState *machine, sysbus_create_simple("pl061", 0x10014000, pic[7]); gpio2 = sysbus_create_simple("pl061", 0x10015000, pic[8]); - sysbus_create_simple("pl111", 0x10020000, pic[23]); + dev = qdev_new("pl111"); + object_property_set_link(OBJECT(dev), "framebuffer-memory", + OBJECT(sysmem), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x10020000); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[23]); dev = sysbus_create_varargs("pl181", 0x10005000, pic[17], pic[18], NULL); /* Wire up MMC card detect and read-only signals. These have @@ -295,24 +299,19 @@ static void realview_init(MachineState *machine, n--; } } - for(n = 0; n < nb_nics; n++) { - nd = &nd_table[n]; - - if (!done_nic && (!nd->model || - strcmp(nd->model, is_pb ? "lan9118" : "smc91c111") == 0)) { - if (is_pb) { - lan9118_init(nd, 0x4e000000, pic[28]); - } else { - smc91c111_init(nd, 0x4e000000, pic[28]); - } - done_nic = 1; + + if (qemu_find_nic_info(is_pb ? "lan9118" : "smc91c111", true, NULL)) { + if (is_pb) { + lan9118_init(0x4e000000, pic[28]); } else { - if (pci_bus) { - pci_nic_init_nofail(nd, pci_bus, "rtl8139", NULL); - } + smc91c111_init(0x4e000000, pic[28]); } } + if (pci_bus) { + pci_init_nic_devices(pci_bus, "rtl8139"); + } + dev = sysbus_create_simple(TYPE_ARM_SBCON_I2C, 0x10002000, NULL); i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); i2c_slave_create_simple(i2c, "ds1338", 0x68); diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index f3c97046939..f5709d6c141 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -36,8 +36,8 @@ #include "hw/arm/smmuv3.h" #include "hw/block/flash.h" #include "hw/boards.h" -#include "hw/ide/internal.h" -#include "hw/ide/ahci_internal.h" +#include "hw/ide/ide-bus.h" +#include "hw/ide/ahci-sysbus.h" #include "hw/intc/arm_gicv3_common.h" #include "hw/intc/arm_gicv3_its_common.h" #include "hw/loader.h" @@ -50,6 +50,8 @@ #include "net/net.h" #include "qapi/qmp/qlist.h" #include "qom/object.h" +#include "target/arm/cpu-qom.h" +#include "target/arm/gtimer.h" #define RAMLIMIT_GB 8192 #define RAMLIMIT_BYTES (RAMLIMIT_GB * GiB) @@ -145,31 +147,10 @@ static const int sbsa_ref_irqmap[] = { [SBSA_GWDT_WS0] = 16, }; -static const char * const valid_cpus[] = { - ARM_CPU_TYPE_NAME("cortex-a57"), - ARM_CPU_TYPE_NAME("cortex-a72"), - ARM_CPU_TYPE_NAME("neoverse-n1"), - ARM_CPU_TYPE_NAME("neoverse-v1"), - ARM_CPU_TYPE_NAME("neoverse-n2"), - ARM_CPU_TYPE_NAME("max"), -}; - -static bool cpu_type_valid(const char *cpu) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(valid_cpus); i++) { - if (strcmp(cpu, valid_cpus[i]) == 0) { - return true; - } - } - return false; -} - static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx) { uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER; - return arm_cpu_mp_affinity(idx, clustersz); + return arm_build_mp_affinity(idx, clustersz); } static void sbsa_fdt_add_gic_node(SBSAMachineState *sms) @@ -589,8 +570,6 @@ static void create_ahci(const SBSAMachineState *sms) DeviceState *dev; DriveInfo *hd[NUM_SATA_PORTS]; SysbusAHCIState *sysahci; - AHCIState *ahci; - int i; dev = qdev_new("sysbus-ahci"); qdev_prop_set_uint32(dev, "num-ports", NUM_SATA_PORTS); @@ -599,14 +578,8 @@ static void create_ahci(const SBSAMachineState *sms) sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(sms->gic, irq)); sysahci = SYSBUS_AHCI(dev); - ahci = &sysahci->ahci; ide_drive_get(hd, ARRAY_SIZE(hd)); - for (i = 0; i < ahci->ports; i++) { - if (hd[i] == NULL) { - continue; - } - ide_bus_create_drive(&ahci->dev[i].port, 0, hd[i]); - } + ahci_ide_create_devs(&sysahci->ahci, hd); } static void create_xhci(const SBSAMachineState *sms) @@ -691,11 +664,8 @@ static void create_pcie(SBSAMachineState *sms) } pci = PCI_HOST_BRIDGE(dev); - if (pci->bus) { - for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci->bus, mc->default_nic, NULL); - } - } + + pci_init_nic_devices(pci->bus, mc->default_nic); pci_create_simple(pci->bus, -1, "bochs-display"); @@ -733,11 +703,6 @@ static void sbsa_ref_init(MachineState *machine) const CPUArchIdList *possible_cpus; int n, sbsa_max_cpus; - if (!cpu_type_valid(machine->cpu_type)) { - error_report("sbsa-ref: CPU type %s not supported", machine->cpu_type); - exit(1); - } - if (kvm_enabled()) { error_report("sbsa-ref: KVM is not supported for this machine"); exit(1); @@ -898,10 +863,20 @@ static void sbsa_ref_instance_init(Object *obj) static void sbsa_ref_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a57"), + ARM_CPU_TYPE_NAME("cortex-a72"), + ARM_CPU_TYPE_NAME("neoverse-n1"), + ARM_CPU_TYPE_NAME("neoverse-v1"), + ARM_CPU_TYPE_NAME("neoverse-n2"), + ARM_CPU_TYPE_NAME("max"), + NULL, + }; mc->init = sbsa_ref_init; mc->desc = "QEMU 'SBSA Reference' ARM Virtual Machine"; mc->default_cpu_type = ARM_CPU_TYPE_NAME("neoverse-n1"); + mc->valid_cpu_types = valid_cpu_types; mc->max_cpus = 512; mc->pci_allow_0_address = true; mc->minimum_page_bits = 12; diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index f58261bb81e..c4b540656c1 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -197,7 +197,7 @@ void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid) g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); } -inline void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid) +void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid) { trace_smmu_iotlb_inv_vmid(vmid); g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); @@ -364,6 +364,17 @@ static int smmu_ptw_64_s1(SMMUTransCfg *cfg, pte_addr, pte, iova, gpa, block_size >> 20); } + + /* + * QEMU does not currently implement HTTU, so if AFFD and PTE.AF + * are 0 we take an Access flag fault. (5.4. Context Descriptor) + * An Access flag fault takes priority over a Permission fault. + */ + if (!PTE_AF(pte) && !cfg->affd) { + info->type = SMMU_PTW_ERR_ACCESS; + goto error; + } + ap = PTE_AP(pte); if (is_permission_fault(ap, perm)) { info->type = SMMU_PTW_ERR_PERMISSION; diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h index 6076025ad6a..e4dd11e1e62 100644 --- a/hw/arm/smmuv3-internal.h +++ b/hw/arm/smmuv3-internal.h @@ -21,6 +21,7 @@ #ifndef HW_ARM_SMMUV3_INTERNAL_H #define HW_ARM_SMMUV3_INTERNAL_H +#include "hw/registerfields.h" #include "hw/arm/smmu-common.h" typedef enum SMMUTranslationStatus { @@ -623,6 +624,7 @@ static inline int pa_range(STE *ste) #define CD_EPD(x, sel) extract32((x)->word[0], (16 * (sel)) + 14, 1) #define CD_ENDI(x) extract32((x)->word[0], 15, 1) #define CD_IPS(x) extract32((x)->word[1], 0 , 3) +#define CD_AFFD(x) extract32((x)->word[1], 3 , 1) #define CD_TBI(x) extract32((x)->word[1], 6 , 2) #define CD_HD(x) extract32((x)->word[1], 10 , 1) #define CD_HA(x) extract32((x)->word[1], 11 , 1) diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index c3871ae067f..9eb56a70f39 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -684,6 +684,7 @@ static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); cfg->tbi = CD_TBI(cd); cfg->asid = CD_ASID(cd); + cfg->affd = CD_AFFD(cd); trace_smmuv3_decode_cd(cfg->oas); @@ -1768,7 +1769,7 @@ static const VMStateDescription vmstate_smmuv3_queue = { .name = "smmuv3_queue", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(base, SMMUQueue), VMSTATE_UINT32(prod, SMMUQueue), VMSTATE_UINT32(cons, SMMUQueue), @@ -1790,7 +1791,7 @@ static const VMStateDescription vmstate_gbpa = { .version_id = 1, .minimum_version_id = 1, .needed = smmuv3_gbpa_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(gbpa, SMMUv3State), VMSTATE_END_OF_LIST() } @@ -1801,7 +1802,7 @@ static const VMStateDescription vmstate_smmuv3 = { .version_id = 1, .minimum_version_id = 1, .priority = MIG_PRI_IOMMU, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(features, SMMUv3State), VMSTATE_UINT8(sid_size, SMMUv3State), VMSTATE_UINT8(sid_split, SMMUv3State), @@ -1826,7 +1827,7 @@ static const VMStateDescription vmstate_smmuv3 = { VMSTATE_END_OF_LIST(), }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_gbpa, NULL } @@ -1857,8 +1858,8 @@ static void smmuv3_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_smmuv3; resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL, &c->parent_phases); - c->parent_realize = dc->realize; - dc->realize = smmu_realize; + device_class_set_parent_realize(dc, smmu_realize, + &c->parent_realize); device_class_set_props(dc, smmuv3_properties); } diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c index cc268c6ac0b..62cd55ba914 100644 --- a/hw/arm/spitz.c +++ b/hw/arm/spitz.c @@ -33,7 +33,6 @@ #include "hw/adc/max111x.h" #include "migration/vmstate.h" #include "exec/address-spaces.h" -#include "cpu.h" #include "qom/object.h" #include "audio/audio.h" @@ -1042,6 +1041,7 @@ static void spitz_common_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_IDE; mc->ignore_memory_transaction_failures = true; mc->init = spitz_common_init; + mc->deprecation_reason = "machine is old and unmaintained"; machine_add_audiodev_property(mc); } @@ -1143,7 +1143,7 @@ static const VMStateDescription vmstate_sl_nand_info = { .name = "sl-nand", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(ctl, SLNANDState), VMSTATE_STRUCT(ecc, SLNANDState, 0, vmstate_ecc_state, ECCState), VMSTATE_END_OF_LIST(), @@ -1180,7 +1180,7 @@ static const VMStateDescription vmstate_spitz_kbd = { .version_id = 1, .minimum_version_id = 0, .post_load = spitz_keyboard_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(sense_state, SpitzKeyboardState), VMSTATE_UINT16(strobe_state, SpitzKeyboardState), VMSTATE_UNUSED_TEST(is_version_0, 5), @@ -1208,7 +1208,7 @@ static const VMStateDescription vmstate_corgi_ssp_regs = { .name = "corgi-ssp", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SSI_PERIPHERAL(ssidev, CorgiSSPState), VMSTATE_UINT32_ARRAY(enable, CorgiSSPState, 3), VMSTATE_END_OF_LIST(), @@ -1236,7 +1236,7 @@ static const VMStateDescription vmstate_spitz_lcdtg_regs = { .name = "spitz-lcdtg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SSI_PERIPHERAL(ssidev, SpitzLCDTG), VMSTATE_UINT32(bl_intensity, SpitzLCDTG), VMSTATE_UINT32(bl_power, SpitzLCDTG), diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c index dd90f686bfa..a2f998bf9e2 100644 --- a/hw/arm/stellaris.c +++ b/hw/arm/stellaris.c @@ -47,6 +47,7 @@ #define BP_GAMEPAD 0x04 #define NUM_IRQ_LINES 64 +#define NUM_PRIO_BITS 3 typedef const struct { const char *name; @@ -419,7 +420,7 @@ static const VMStateDescription vmstate_stellaris_sys = { .version_id = 2, .minimum_version_id = 1, .post_load = stellaris_sys_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pborctl, ssys_state), VMSTATE_UINT32(ldopctl, ssys_state), VMSTATE_UINT32(int_mask, ssys_state), @@ -461,7 +462,10 @@ static void stellaris_sys_instance_init(Object *obj) s->sysclk = qdev_init_clock_out(DEVICE(s), "SYSCLK"); } -/* I2C controller. */ +/* + * I2C controller. + * ??? For now we only implement the master interface. + */ #define TYPE_STELLARIS_I2C "stellaris-i2c" OBJECT_DECLARE_SIMPLE_TYPE(stellaris_i2c_state, STELLARIS_I2C) @@ -606,10 +610,17 @@ static void stellaris_i2c_write(void *opaque, hwaddr offset, stellaris_i2c_update(s); } -static void stellaris_i2c_reset(stellaris_i2c_state *s) +static void stellaris_i2c_reset_enter(Object *obj, ResetType type) { + stellaris_i2c_state *s = STELLARIS_I2C(obj); + if (s->mcs & STELLARIS_I2C_MCS_BUSBSY) i2c_end_transfer(s->bus); +} + +static void stellaris_i2c_reset_hold(Object *obj) +{ + stellaris_i2c_state *s = STELLARIS_I2C(obj); s->msa = 0; s->mcs = 0; @@ -618,6 +629,12 @@ static void stellaris_i2c_reset(stellaris_i2c_state *s) s->mimr = 0; s->mris = 0; s->mcr = 0; +} + +static void stellaris_i2c_reset_exit(Object *obj) +{ + stellaris_i2c_state *s = STELLARIS_I2C(obj); + stellaris_i2c_update(s); } @@ -631,7 +648,7 @@ static const VMStateDescription vmstate_stellaris_i2c = { .name = "stellaris_i2c", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(msa, stellaris_i2c_state), VMSTATE_UINT32(mcs, stellaris_i2c_state), VMSTATE_UINT32(mdr, stellaris_i2c_state), @@ -657,8 +674,6 @@ static void stellaris_i2c_init(Object *obj) memory_region_init_io(&s->iomem, obj, &stellaris_i2c_ops, s, "i2c", 0x1000); sysbus_init_mmio(sbd, &s->iomem); - /* ??? For now we only implement the master interface. */ - stellaris_i2c_reset(s); } /* Analogue to Digital Converter. This is only partially implemented, @@ -772,8 +787,9 @@ static void stellaris_adc_trigger(void *opaque, int irq, int level) } } -static void stellaris_adc_reset(StellarisADCState *s) +static void stellaris_adc_reset_hold(Object *obj) { + StellarisADCState *s = STELLARIS_ADC(obj); int n; for (n = 0; n < 4; n++) { @@ -901,7 +917,7 @@ static const VMStateDescription vmstate_stellaris_adc = { .name = "stellaris_adc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(actss, StellarisADCState), VMSTATE_UINT32(ris, StellarisADCState), VMSTATE_UINT32(im, StellarisADCState), @@ -945,7 +961,6 @@ static void stellaris_adc_init(Object *obj) memory_region_init_io(&s->iomem, obj, &stellaris_adc_ops, s, "adc", 0x1000); sysbus_init_mmio(sbd, &s->iomem); - stellaris_adc_reset(s); qdev_init_gpio_in(dev, stellaris_adc_trigger, 1); } @@ -1016,6 +1031,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) * 400fe000 system control */ + Object *soc_container; DeviceState *gpio_dev[7], *nvic; qemu_irq gpio_in[7][8]; qemu_irq gpio_out[7][8]; @@ -1027,7 +1043,8 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) DeviceState *ssys_dev; int i; int j; - const uint8_t *macaddr; + NICInfo *nd; + MACAddr mac; MemoryRegion *sram = g_new(MemoryRegion, 1); MemoryRegion *flash = g_new(MemoryRegion, 1); @@ -1036,6 +1053,9 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) flash_size = (((board->dc0 & 0xffff) + 1) << 1) * 1024; sram_size = ((board->dc0 >> 18) + 1) * 1024; + soc_container = object_new("container"); + object_property_add_child(OBJECT(ms), "soc", soc_container); + /* Flash programming is done via the SCU, so pretend it is ROM. */ memory_region_init_rom(flash, NULL, "stellaris.flash", flash_size, &error_fatal); @@ -1050,12 +1070,23 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) * need its sysclk output. */ ssys_dev = qdev_new(TYPE_STELLARIS_SYS); - /* Most devices come preprogrammed with a MAC address in the user data. */ - macaddr = nd_table[0].macaddr.a; + object_property_add_child(soc_container, "sys", OBJECT(ssys_dev)); + + /* + * Most devices come preprogrammed with a MAC address in the user data. + * Generate a MAC address now, if there isn't a matching -nic for it. + */ + nd = qemu_find_nic_info("stellaris_enet", true, "stellaris"); + if (nd) { + memcpy(mac.a, nd->macaddr.a, sizeof(mac.a)); + } else { + qemu_macaddr_default_if_unset(&mac); + } + qdev_prop_set_uint32(ssys_dev, "user0", - macaddr[0] | (macaddr[1] << 8) | (macaddr[2] << 16)); + mac.a[0] | (mac.a[1] << 8) | (mac.a[2] << 16)); qdev_prop_set_uint32(ssys_dev, "user1", - macaddr[3] | (macaddr[4] << 8) | (macaddr[5] << 16)); + mac.a[3] | (mac.a[4] << 8) | (mac.a[5] << 16)); qdev_prop_set_uint32(ssys_dev, "did0", board->did0); qdev_prop_set_uint32(ssys_dev, "did1", board->did1); qdev_prop_set_uint32(ssys_dev, "dc0", board->dc0); @@ -1066,7 +1097,9 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) sysbus_realize_and_unref(SYS_BUS_DEVICE(ssys_dev), &error_fatal); nvic = qdev_new(TYPE_ARMV7M); + object_property_add_child(soc_container, "v7m", OBJECT(nvic)); qdev_prop_set_uint32(nvic, "num-irq", NUM_IRQ_LINES); + qdev_prop_set_uint8(nvic, "num-prio-bits", NUM_PRIO_BITS); qdev_prop_set_string(nvic, "cpu-type", ms->cpu_type); qdev_prop_set_bit(nvic, "enable-bitband", true); qdev_connect_clock_in(nvic, "cpuclk", @@ -1098,6 +1131,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) dev = qdev_new(TYPE_STELLARIS_GPTM); sbd = SYS_BUS_DEVICE(dev); + object_property_add_child(soc_container, "gptm[*]", OBJECT(dev)); qdev_connect_clock_in(dev, "clk", qdev_get_clock_out(ssys_dev, "SYSCLK")); sysbus_realize_and_unref(sbd, &error_fatal); @@ -1111,7 +1145,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) if (board->dc1 & (1 << 3)) { /* watchdog present */ dev = qdev_new(TYPE_LUMINARY_WATCHDOG); - + object_property_add_child(soc_container, "wdg", OBJECT(dev)); qdev_connect_clock_in(dev, "WDOGCLK", qdev_get_clock_out(ssys_dev, "SYSCLK")); @@ -1151,6 +1185,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) SysBusDevice *sbd; dev = qdev_new("pl011_luminary"); + object_property_add_child(soc_container, "uart[*]", OBJECT(dev)); sbd = SYS_BUS_DEVICE(dev); qdev_prop_set_chr(dev, "chardev", serial_hd(i)); sysbus_realize_and_unref(sbd, &error_fatal); @@ -1244,10 +1279,13 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) &error_fatal); ssddev = qdev_new("ssd0323"); + object_property_add_child(OBJECT(ms), "oled", OBJECT(ssddev)); qdev_prop_set_uint8(ssddev, "cs", 1); qdev_realize_and_unref(ssddev, bus, &error_fatal); gpio_d_splitter = qdev_new(TYPE_SPLIT_IRQ); + object_property_add_child(OBJECT(ms), "splitter", + OBJECT(gpio_d_splitter)); qdev_prop_set_uint32(gpio_d_splitter, "num-lines", 2); qdev_realize_and_unref(gpio_d_splitter, NULL, &error_fatal); qdev_connect_gpio_out( @@ -1267,10 +1305,14 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) if (board->dc4 & (1 << 28)) { DeviceState *enet; - qemu_check_nic_model(&nd_table[0], "stellaris"); - enet = qdev_new("stellaris_enet"); - qdev_set_nic_properties(enet, &nd_table[0]); + object_property_add_child(soc_container, "enet", OBJECT(enet)); + if (nd) { + qdev_set_nic_properties(enet, nd); + } else { + qdev_prop_set_macaddr(enet, "mac", mac.a); + } + sysbus_realize_and_unref(SYS_BUS_DEVICE(enet), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(enet), 0, 0x40048000); sysbus_connect_irq(SYS_BUS_DEVICE(enet), 0, qdev_get_gpio_in(nvic, 42)); @@ -1284,6 +1326,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) DeviceState *gpad; gpad = qdev_new(TYPE_STELLARIS_GAMEPAD); + object_property_add_child(OBJECT(ms), "gamepad", OBJECT(gpad)); for (i = 0; i < ARRAY_SIZE(gpad_keycode); i++) { qlist_append_int(gpad_keycode_list, gpad_keycode[i]); } @@ -1380,7 +1423,11 @@ type_init(stellaris_machine_init) static void stellaris_i2c_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + rc->phases.enter = stellaris_i2c_reset_enter; + rc->phases.hold = stellaris_i2c_reset_hold; + rc->phases.exit = stellaris_i2c_reset_exit; dc->vmsd = &vmstate_stellaris_i2c; } @@ -1395,7 +1442,9 @@ static const TypeInfo stellaris_i2c_info = { static void stellaris_adc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + rc->phases.hold = stellaris_adc_reset_hold; dc->vmsd = &vmstate_stellaris_adc; } diff --git a/hw/arm/stm32f100_soc.c b/hw/arm/stm32f100_soc.c index b90d440d7aa..808b783515d 100644 --- a/hw/arm/stm32f100_soc.c +++ b/hw/arm/stm32f100_soc.c @@ -115,6 +115,7 @@ static void stm32f100_soc_realize(DeviceState *dev_soc, Error **errp) /* Init ARMv7m */ armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 61); + qdev_prop_set_uint8(armv7m, "num-prio-bits", 4); qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m3")); qdev_prop_set_bit(armv7m, "enable-bitband", true); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); diff --git a/hw/arm/stm32f205_soc.c b/hw/arm/stm32f205_soc.c index 1a548646f6e..a451e21f59c 100644 --- a/hw/arm/stm32f205_soc.c +++ b/hw/arm/stm32f205_soc.c @@ -127,6 +127,7 @@ static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp) armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 96); + qdev_prop_set_uint8(armv7m, "num-prio-bits", 4); qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m3")); qdev_prop_set_bit(armv7m, "enable-bitband", true); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); diff --git a/hw/arm/stm32f405_soc.c b/hw/arm/stm32f405_soc.c index a65bbe298d2..2ad5b79a069 100644 --- a/hw/arm/stm32f405_soc.c +++ b/hw/arm/stm32f405_soc.c @@ -149,6 +149,7 @@ static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp) armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 96); + qdev_prop_set_uint8(armv7m, "num-prio-bits", 4); qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m4")); qdev_prop_set_bit(armv7m, "enable-bitband", true); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); diff --git a/hw/arm/stm32l4x5_soc.c b/hw/arm/stm32l4x5_soc.c new file mode 100644 index 00000000000..40e294f838f --- /dev/null +++ b/hw/arm/stm32l4x5_soc.c @@ -0,0 +1,421 @@ +/* + * STM32L4x5 SoC family + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This work is heavily inspired by the stm32f405_soc by Alistair Francis. + * Original code is licensed under the MIT License: + * + * Copyright (c) 2014 Alistair Francis + */ + +/* + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "exec/address-spaces.h" +#include "sysemu/sysemu.h" +#include "hw/or-irq.h" +#include "hw/arm/stm32l4x5_soc.h" +#include "hw/gpio/stm32l4x5_gpio.h" +#include "hw/qdev-clock.h" +#include "hw/misc/unimp.h" + +#define FLASH_BASE_ADDRESS 0x08000000 +#define SRAM1_BASE_ADDRESS 0x20000000 +#define SRAM1_SIZE (96 * KiB) +#define SRAM2_BASE_ADDRESS 0x10000000 +#define SRAM2_SIZE (32 * KiB) + +#define EXTI_ADDR 0x40010400 +#define SYSCFG_ADDR 0x40010000 + +#define NUM_EXTI_IRQ 40 +/* Match exti line connections with their CPU IRQ number */ +/* See Vector Table (Reference Manual p.396) */ +/* + * Some IRQs are connected to the same CPU IRQ (denoted by -1) + * and require an intermediary OR gate to function correctly. + */ +static const int exti_irq[NUM_EXTI_IRQ] = { + 6, /* GPIO[0] */ + 7, /* GPIO[1] */ + 8, /* GPIO[2] */ + 9, /* GPIO[3] */ + 10, /* GPIO[4] */ + -1, -1, -1, -1, -1, /* GPIO[5..9] OR gate 23 */ + -1, -1, -1, -1, -1, -1, /* GPIO[10..15] OR gate 40 */ + -1, /* PVD OR gate 1 */ + 67, /* OTG_FS_WKUP, Direct */ + 41, /* RTC_ALARM */ + 2, /* RTC_TAMP_STAMP2/CSS_LSE */ + 3, /* RTC wakeup timer */ + -1, -1, /* COMP[1..2] OR gate 63 */ + 31, /* I2C1 wakeup, Direct */ + 33, /* I2C2 wakeup, Direct */ + 72, /* I2C3 wakeup, Direct */ + 37, /* USART1 wakeup, Direct */ + 38, /* USART2 wakeup, Direct */ + 39, /* USART3 wakeup, Direct */ + 52, /* UART4 wakeup, Direct */ + 53, /* UART4 wakeup, Direct */ + 70, /* LPUART1 wakeup, Direct */ + 65, /* LPTIM1, Direct */ + 66, /* LPTIM2, Direct */ + 76, /* SWPMI1 wakeup, Direct */ + -1, -1, -1, -1, /* PVM[1..4] OR gate 1 */ + 78 /* LCD wakeup, Direct */ +}; +#define RCC_BASE_ADDRESS 0x40021000 +#define RCC_IRQ 5 + +static const int exti_or_gates_out[NUM_EXTI_OR_GATES] = { + 23, 40, 63, 1, +}; + +static const int exti_or_gates_num_lines_in[NUM_EXTI_OR_GATES] = { + 5, 6, 2, 5, +}; + +/* 3 OR gates with consecutive inputs */ +#define NUM_EXTI_SIMPLE_OR_GATES 3 +static const int exti_or_gates_first_line_in[NUM_EXTI_SIMPLE_OR_GATES] = { + 5, 10, 21, +}; + +/* 1 OR gate with non-consecutive inputs */ +#define EXTI_OR_GATE1_NUM_LINES_IN 5 +static const int exti_or_gate1_lines_in[EXTI_OR_GATE1_NUM_LINES_IN] = { + 16, 35, 36, 37, 38, +}; + +static const struct { + uint32_t addr; + uint32_t moder_reset; + uint32_t ospeedr_reset; + uint32_t pupdr_reset; +} stm32l4x5_gpio_cfg[NUM_GPIOS] = { + { 0x48000000, 0xABFFFFFF, 0x0C000000, 0x64000000 }, + { 0x48000400, 0xFFFFFEBF, 0x00000000, 0x00000100 }, + { 0x48000800, 0xFFFFFFFF, 0x00000000, 0x00000000 }, + { 0x48000C00, 0xFFFFFFFF, 0x00000000, 0x00000000 }, + { 0x48001000, 0xFFFFFFFF, 0x00000000, 0x00000000 }, + { 0x48001400, 0xFFFFFFFF, 0x00000000, 0x00000000 }, + { 0x48001800, 0xFFFFFFFF, 0x00000000, 0x00000000 }, + { 0x48001C00, 0x0000000F, 0x00000000, 0x00000000 }, +}; + +static void stm32l4x5_soc_initfn(Object *obj) +{ + Stm32l4x5SocState *s = STM32L4X5_SOC(obj); + + object_initialize_child(obj, "exti", &s->exti, TYPE_STM32L4X5_EXTI); + for (unsigned i = 0; i < NUM_EXTI_OR_GATES; i++) { + object_initialize_child(obj, "exti_or_gates[*]", &s->exti_or_gates[i], + TYPE_OR_IRQ); + } + object_initialize_child(obj, "syscfg", &s->syscfg, TYPE_STM32L4X5_SYSCFG); + object_initialize_child(obj, "rcc", &s->rcc, TYPE_STM32L4X5_RCC); + + for (unsigned i = 0; i < NUM_GPIOS; i++) { + g_autofree char *name = g_strdup_printf("gpio%c", 'a' + i); + object_initialize_child(obj, name, &s->gpio[i], TYPE_STM32L4X5_GPIO); + } +} + +static void stm32l4x5_soc_realize(DeviceState *dev_soc, Error **errp) +{ + ERRP_GUARD(); + Stm32l4x5SocState *s = STM32L4X5_SOC(dev_soc); + const Stm32l4x5SocClass *sc = STM32L4X5_SOC_GET_CLASS(dev_soc); + MemoryRegion *system_memory = get_system_memory(); + DeviceState *armv7m, *dev; + SysBusDevice *busdev; + uint32_t pin_index; + + if (!memory_region_init_rom(&s->flash, OBJECT(dev_soc), "flash", + sc->flash_size, errp)) { + return; + } + memory_region_init_alias(&s->flash_alias, OBJECT(dev_soc), + "flash_boot_alias", &s->flash, 0, + sc->flash_size); + + memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash); + memory_region_add_subregion(system_memory, 0, &s->flash_alias); + + if (!memory_region_init_ram(&s->sram1, OBJECT(dev_soc), "SRAM1", SRAM1_SIZE, + errp)) { + return; + } + memory_region_add_subregion(system_memory, SRAM1_BASE_ADDRESS, &s->sram1); + + if (!memory_region_init_ram(&s->sram2, OBJECT(dev_soc), "SRAM2", SRAM2_SIZE, + errp)) { + return; + } + memory_region_add_subregion(system_memory, SRAM2_BASE_ADDRESS, &s->sram2); + + object_initialize_child(OBJECT(dev_soc), "armv7m", &s->armv7m, TYPE_ARMV7M); + armv7m = DEVICE(&s->armv7m); + qdev_prop_set_uint32(armv7m, "num-irq", 96); + qdev_prop_set_uint32(armv7m, "num-prio-bits", 4); + qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m4")); + qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", + qdev_get_clock_out(DEVICE(&(s->rcc)), "cortex-fclk-out")); + qdev_connect_clock_in(armv7m, "refclk", + qdev_get_clock_out(DEVICE(&(s->rcc)), "cortex-refclk-out")); + object_property_set_link(OBJECT(&s->armv7m), "memory", + OBJECT(system_memory), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { + return; + } + + /* GPIOs */ + for (unsigned i = 0; i < NUM_GPIOS; i++) { + g_autofree char *name = g_strdup_printf("%c", 'A' + i); + dev = DEVICE(&s->gpio[i]); + qdev_prop_set_string(dev, "name", name); + qdev_prop_set_uint32(dev, "mode-reset", + stm32l4x5_gpio_cfg[i].moder_reset); + qdev_prop_set_uint32(dev, "ospeed-reset", + stm32l4x5_gpio_cfg[i].ospeedr_reset); + qdev_prop_set_uint32(dev, "pupd-reset", + stm32l4x5_gpio_cfg[i].pupdr_reset); + busdev = SYS_BUS_DEVICE(&s->gpio[i]); + g_free(name); + name = g_strdup_printf("gpio%c-out", 'a' + i); + qdev_connect_clock_in(DEVICE(&s->gpio[i]), "clk", + qdev_get_clock_out(DEVICE(&(s->rcc)), name)); + if (!sysbus_realize(busdev, errp)) { + return; + } + sysbus_mmio_map(busdev, 0, stm32l4x5_gpio_cfg[i].addr); + } + + /* System configuration controller */ + busdev = SYS_BUS_DEVICE(&s->syscfg); + if (!sysbus_realize(busdev, errp)) { + return; + } + sysbus_mmio_map(busdev, 0, SYSCFG_ADDR); + + for (unsigned i = 0; i < NUM_GPIOS; i++) { + for (unsigned j = 0; j < GPIO_NUM_PINS; j++) { + pin_index = GPIO_NUM_PINS * i + j; + qdev_connect_gpio_out(DEVICE(&s->gpio[i]), j, + qdev_get_gpio_in(DEVICE(&s->syscfg), + pin_index)); + } + } + + /* EXTI device */ + busdev = SYS_BUS_DEVICE(&s->exti); + if (!sysbus_realize(busdev, errp)) { + return; + } + sysbus_mmio_map(busdev, 0, EXTI_ADDR); + + /* IRQs with fan-in that require an OR gate */ + for (unsigned i = 0; i < NUM_EXTI_OR_GATES; i++) { + if (!object_property_set_int(OBJECT(&s->exti_or_gates[i]), "num-lines", + exti_or_gates_num_lines_in[i], errp)) { + return; + } + if (!qdev_realize(DEVICE(&s->exti_or_gates[i]), NULL, errp)) { + return; + } + + qdev_connect_gpio_out(DEVICE(&s->exti_or_gates[i]), 0, + qdev_get_gpio_in(armv7m, exti_or_gates_out[i])); + + if (i < NUM_EXTI_SIMPLE_OR_GATES) { + /* consecutive inputs for OR gates 23, 40, 63 */ + for (unsigned j = 0; j < exti_or_gates_num_lines_in[i]; j++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->exti), + exti_or_gates_first_line_in[i] + j, + qdev_get_gpio_in(DEVICE(&s->exti_or_gates[i]), j)); + } + } else { + /* non-consecutive inputs for OR gate 1 */ + for (unsigned j = 0; j < EXTI_OR_GATE1_NUM_LINES_IN; j++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->exti), + exti_or_gate1_lines_in[j], + qdev_get_gpio_in(DEVICE(&s->exti_or_gates[i]), j)); + } + } + } + + /* IRQs that don't require fan-in */ + for (unsigned i = 0; i < NUM_EXTI_IRQ; i++) { + if (exti_irq[i] != -1) { + sysbus_connect_irq(busdev, i, + qdev_get_gpio_in(armv7m, exti_irq[i])); + } + } + + for (unsigned i = 0; i < GPIO_NUM_PINS; i++) { + qdev_connect_gpio_out(DEVICE(&s->syscfg), i, + qdev_get_gpio_in(DEVICE(&s->exti), i)); + } + + /* RCC device */ + busdev = SYS_BUS_DEVICE(&s->rcc); + if (!sysbus_realize(busdev, errp)) { + return; + } + sysbus_mmio_map(busdev, 0, RCC_BASE_ADDRESS); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, RCC_IRQ)); + + /* APB1 BUS */ + create_unimplemented_device("TIM2", 0x40000000, 0x400); + create_unimplemented_device("TIM3", 0x40000400, 0x400); + create_unimplemented_device("TIM4", 0x40000800, 0x400); + create_unimplemented_device("TIM5", 0x40000C00, 0x400); + create_unimplemented_device("TIM6", 0x40001000, 0x400); + create_unimplemented_device("TIM7", 0x40001400, 0x400); + /* RESERVED: 0x40001800, 0x1000 */ + create_unimplemented_device("RTC", 0x40002800, 0x400); + create_unimplemented_device("WWDG", 0x40002C00, 0x400); + create_unimplemented_device("IWDG", 0x40003000, 0x400); + /* RESERVED: 0x40001800, 0x400 */ + create_unimplemented_device("SPI2", 0x40003800, 0x400); + create_unimplemented_device("SPI3", 0x40003C00, 0x400); + /* RESERVED: 0x40004000, 0x400 */ + create_unimplemented_device("USART2", 0x40004400, 0x400); + create_unimplemented_device("USART3", 0x40004800, 0x400); + create_unimplemented_device("UART4", 0x40004C00, 0x400); + create_unimplemented_device("UART5", 0x40005000, 0x400); + create_unimplemented_device("I2C1", 0x40005400, 0x400); + create_unimplemented_device("I2C2", 0x40005800, 0x400); + create_unimplemented_device("I2C3", 0x40005C00, 0x400); + /* RESERVED: 0x40006000, 0x400 */ + create_unimplemented_device("CAN1", 0x40006400, 0x400); + /* RESERVED: 0x40006800, 0x400 */ + create_unimplemented_device("PWR", 0x40007000, 0x400); + create_unimplemented_device("DAC1", 0x40007400, 0x400); + create_unimplemented_device("OPAMP", 0x40007800, 0x400); + create_unimplemented_device("LPTIM1", 0x40007C00, 0x400); + create_unimplemented_device("LPUART1", 0x40008000, 0x400); + /* RESERVED: 0x40008400, 0x400 */ + create_unimplemented_device("SWPMI1", 0x40008800, 0x400); + /* RESERVED: 0x40008C00, 0x800 */ + create_unimplemented_device("LPTIM2", 0x40009400, 0x400); + /* RESERVED: 0x40009800, 0x6800 */ + + /* APB2 BUS */ + create_unimplemented_device("VREFBUF", 0x40010030, 0x1D0); + create_unimplemented_device("COMP", 0x40010200, 0x200); + /* RESERVED: 0x40010800, 0x1400 */ + create_unimplemented_device("FIREWALL", 0x40011C00, 0x400); + /* RESERVED: 0x40012000, 0x800 */ + create_unimplemented_device("SDMMC1", 0x40012800, 0x400); + create_unimplemented_device("TIM1", 0x40012C00, 0x400); + create_unimplemented_device("SPI1", 0x40013000, 0x400); + create_unimplemented_device("TIM8", 0x40013400, 0x400); + create_unimplemented_device("USART1", 0x40013800, 0x400); + /* RESERVED: 0x40013C00, 0x400 */ + create_unimplemented_device("TIM15", 0x40014000, 0x400); + create_unimplemented_device("TIM16", 0x40014400, 0x400); + create_unimplemented_device("TIM17", 0x40014800, 0x400); + /* RESERVED: 0x40014C00, 0x800 */ + create_unimplemented_device("SAI1", 0x40015400, 0x400); + create_unimplemented_device("SAI2", 0x40015800, 0x400); + /* RESERVED: 0x40015C00, 0x400 */ + create_unimplemented_device("DFSDM1", 0x40016000, 0x400); + /* RESERVED: 0x40016400, 0x9C00 */ + + /* AHB1 BUS */ + create_unimplemented_device("DMA1", 0x40020000, 0x400); + create_unimplemented_device("DMA2", 0x40020400, 0x400); + /* RESERVED: 0x40020800, 0x800 */ + /* RESERVED: 0x40021400, 0xC00 */ + create_unimplemented_device("FLASH", 0x40022000, 0x400); + /* RESERVED: 0x40022400, 0xC00 */ + create_unimplemented_device("CRC", 0x40023000, 0x400); + /* RESERVED: 0x40023400, 0x400 */ + create_unimplemented_device("TSC", 0x40024000, 0x400); + + /* RESERVED: 0x40024400, 0x7FDBC00 */ + + /* AHB2 BUS */ + /* RESERVED: 0x48002000, 0x7FDBC00 */ + create_unimplemented_device("OTG_FS", 0x50000000, 0x40000); + create_unimplemented_device("ADC", 0x50040000, 0x400); + /* RESERVED: 0x50040400, 0x20400 */ + create_unimplemented_device("RNG", 0x50060800, 0x400); + + /* AHB3 BUS */ + create_unimplemented_device("FMC", 0xA0000000, 0x1000); + create_unimplemented_device("QUADSPI", 0xA0001000, 0x400); +} + +static void stm32l4x5_soc_class_init(ObjectClass *klass, void *data) +{ + + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = stm32l4x5_soc_realize; + /* Reason: Mapped at fixed location on the system bus */ + dc->user_creatable = false; + /* No vmstate or reset required: device has no internal state */ +} + +static void stm32l4x5xc_soc_class_init(ObjectClass *oc, void *data) +{ + Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc); + + ssc->flash_size = 256 * KiB; +} + +static void stm32l4x5xe_soc_class_init(ObjectClass *oc, void *data) +{ + Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc); + + ssc->flash_size = 512 * KiB; +} + +static void stm32l4x5xg_soc_class_init(ObjectClass *oc, void *data) +{ + Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc); + + ssc->flash_size = 1 * MiB; +} + +static const TypeInfo stm32l4x5_soc_types[] = { + { + .name = TYPE_STM32L4X5XC_SOC, + .parent = TYPE_STM32L4X5_SOC, + .class_init = stm32l4x5xc_soc_class_init, + }, { + .name = TYPE_STM32L4X5XE_SOC, + .parent = TYPE_STM32L4X5_SOC, + .class_init = stm32l4x5xe_soc_class_init, + }, { + .name = TYPE_STM32L4X5XG_SOC, + .parent = TYPE_STM32L4X5_SOC, + .class_init = stm32l4x5xg_soc_class_init, + }, { + .name = TYPE_STM32L4X5_SOC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Stm32l4x5SocState), + .instance_init = stm32l4x5_soc_initfn, + .class_size = sizeof(Stm32l4x5SocClass), + .class_init = stm32l4x5_soc_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(stm32l4x5_soc_types) diff --git a/hw/arm/stm32vldiscovery.c b/hw/arm/stm32vldiscovery.c index 190db6118b9..cc419351605 100644 --- a/hw/arm/stm32vldiscovery.c +++ b/hw/arm/stm32vldiscovery.c @@ -47,6 +47,7 @@ static void stm32vldiscovery_init(MachineState *machine) clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F100_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c index cc73145053a..823b4931b0a 100644 --- a/hw/arm/strongarm.c +++ b/hw/arm/strongarm.c @@ -28,7 +28,6 @@ */ #include "qemu/osdep.h" -#include "cpu.h" #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" @@ -46,8 +45,8 @@ #include "qemu/cutils.h" #include "qemu/log.h" #include "qom/object.h" - -//#define DEBUG +#include "target/arm/cpu-qom.h" +#include "trace.h" /* TODO @@ -66,12 +65,6 @@ - Enhance UART with modem signals */ -#ifdef DEBUG -# define DPRINTF(format, ...) printf(format , ## __VA_ARGS__) -#else -# define DPRINTF(format, ...) do { } while (0) -#endif - static struct { hwaddr io_base; int irq; @@ -151,8 +144,9 @@ static uint64_t strongarm_pic_mem_read(void *opaque, hwaddr offset, case ICPR: return s->pending; default: - printf("%s: Bad register offset 0x" HWADDR_FMT_plx "\n", - __func__, offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad register offset 0x"HWADDR_FMT_plx"\n", + __func__, offset); return 0; } } @@ -173,8 +167,9 @@ static void strongarm_pic_mem_write(void *opaque, hwaddr offset, s->int_idle = (value & 1) ? 0 : ~0; break; default: - printf("%s: Bad register offset 0x" HWADDR_FMT_plx "\n", - __func__, offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad register offset 0x"HWADDR_FMT_plx"\n", + __func__, offset); break; } strongarm_pic_update(s); @@ -211,7 +206,7 @@ static const VMStateDescription vmstate_strongarm_pic_regs = { .version_id = 0, .minimum_version_id = 0, .post_load = strongarm_pic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pending, StrongARMPICState), VMSTATE_UINT32(enabled, StrongARMPICState), VMSTATE_UINT32(is_fiq, StrongARMPICState), @@ -333,7 +328,9 @@ static uint64_t strongarm_rtc_read(void *opaque, hwaddr addr, ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) / (1000 * ((s->rttr & 0xffff) + 1)); default: - printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad rtc register read 0x"HWADDR_FMT_plx"\n", + __func__, addr); return 0; } } @@ -375,7 +372,9 @@ static void strongarm_rtc_write(void *opaque, hwaddr addr, break; default: - printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad rtc register write 0x"HWADDR_FMT_plx"\n", + __func__, addr); } } @@ -439,7 +438,7 @@ static const VMStateDescription vmstate_strongarm_rtc_regs = { .minimum_version_id = 0, .pre_save = strongarm_rtc_pre_save, .post_load = strongarm_rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(rttr, StrongARMRTCState), VMSTATE_UINT32(rtsr, StrongARMRTCState), VMSTATE_UINT32(rtar, StrongARMRTCState), @@ -556,12 +555,12 @@ static uint64_t strongarm_gpio_read(void *opaque, hwaddr offset, case GPSR: /* GPIO Pin-Output Set registers */ qemu_log_mask(LOG_GUEST_ERROR, - "strongarm GPIO: read from write only register GPSR\n"); + "%s: read from write only register GPSR\n", __func__); return 0; case GPCR: /* GPIO Pin-Output Clear registers */ qemu_log_mask(LOG_GUEST_ERROR, - "strongarm GPIO: read from write only register GPCR\n"); + "%s: read from write only register GPCR\n", __func__); return 0; case GRER: /* GPIO Rising-Edge Detect Enable registers */ @@ -581,7 +580,9 @@ static uint64_t strongarm_gpio_read(void *opaque, hwaddr offset, return s->status; default: - printf("%s: Bad offset 0x" HWADDR_FMT_plx "\n", __func__, offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad gpio read offset 0x"HWADDR_FMT_plx"\n", + __func__, offset); } return 0; @@ -626,7 +627,9 @@ static void strongarm_gpio_write(void *opaque, hwaddr offset, break; default: - printf("%s: Bad offset 0x" HWADDR_FMT_plx "\n", __func__, offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad write offset 0x"HWADDR_FMT_plx"\n", + __func__, offset); } } @@ -677,7 +680,7 @@ static const VMStateDescription vmstate_strongarm_gpio_regs = { .name = "strongarm-gpio", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ilevel, StrongARMGPIOInfo), VMSTATE_UINT32(olevel, StrongARMGPIOInfo), VMSTATE_UINT32(dir, StrongARMGPIOInfo), @@ -782,7 +785,9 @@ static uint64_t strongarm_ppc_read(void *opaque, hwaddr offset, return s->ppfr | ~0x7f001; default: - printf("%s: Bad offset 0x" HWADDR_FMT_plx "\n", __func__, offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad ppc read offset 0x"HWADDR_FMT_plx "\n", + __func__, offset); } return 0; @@ -817,7 +822,9 @@ static void strongarm_ppc_write(void *opaque, hwaddr offset, break; default: - printf("%s: Bad offset 0x" HWADDR_FMT_plx "\n", __func__, offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad ppc write offset 0x"HWADDR_FMT_plx"\n", + __func__, offset); } } @@ -846,7 +853,7 @@ static const VMStateDescription vmstate_strongarm_ppc_regs = { .name = "strongarm-ppc", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ilevel, StrongARMPPCInfo), VMSTATE_UINT32(olevel, StrongARMPPCInfo), VMSTATE_UINT32(dir, StrongARMPPCInfo), @@ -1029,8 +1036,13 @@ static void strongarm_uart_update_parameters(StrongARMUARTState *s) s->char_transmit_time = (NANOSECONDS_PER_SECOND / speed) * frame_size; qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp); - DPRINTF(stderr, "%s speed=%d parity=%c data=%d stop=%d\n", s->chr->label, - speed, parity, data_bits, stop_bits); + trace_strongarm_uart_update_parameters((s->chr.chr ? + s->chr.chr->label : "NULL") ?: + "NULL", + speed, + parity, + data_bits, + stop_bits); } static void strongarm_uart_rx_to(void *opaque) @@ -1164,7 +1176,9 @@ static uint64_t strongarm_uart_read(void *opaque, hwaddr addr, return s->utsr1; default: - printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad uart register read 0x"HWADDR_FMT_plx"\n", + __func__, addr); return 0; } } @@ -1221,7 +1235,9 @@ static void strongarm_uart_write(void *opaque, hwaddr addr, break; default: - printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad uart register write 0x"HWADDR_FMT_plx"\n", + __func__, addr); } } @@ -1300,7 +1316,7 @@ static const VMStateDescription vmstate_strongarm_uart_regs = { .version_id = 0, .minimum_version_id = 0, .post_load = strongarm_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(utcr0, StrongARMUARTState), VMSTATE_UINT16(brd, StrongARMUARTState), VMSTATE_UINT8(utcr3, StrongARMUARTState), @@ -1434,7 +1450,7 @@ static uint64_t strongarm_ssp_read(void *opaque, hwaddr addr, return 0xffffffff; } if (s->rx_level < 1) { - printf("%s: SSP Rx Underrun\n", __func__); + trace_strongarm_ssp_read_underrun(); return 0xffffffff; } s->rx_level--; @@ -1443,7 +1459,9 @@ static uint64_t strongarm_ssp_read(void *opaque, hwaddr addr, strongarm_ssp_fifo_update(s); return retval; default: - printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad ssp register read 0x"HWADDR_FMT_plx"\n", + __func__, addr); break; } return 0; @@ -1458,8 +1476,8 @@ static void strongarm_ssp_write(void *opaque, hwaddr addr, case SSCR0: s->sscr[0] = value & 0xffbf; if ((s->sscr[0] & SSCR0_SSE) && SSCR0_DSS(value) < 4) { - printf("%s: Wrong data size: %i bits\n", __func__, - (int)SSCR0_DSS(value)); + qemu_log_mask(LOG_GUEST_ERROR, "%s: Wrong data size: %i bits\n", + __func__, (int)SSCR0_DSS(value)); } if (!(value & SSCR0_SSE)) { s->sssr = 0; @@ -1471,7 +1489,9 @@ static void strongarm_ssp_write(void *opaque, hwaddr addr, case SSCR1: s->sscr[1] = value & 0x2f; if (value & SSCR1_LBM) { - printf("%s: Attempt to use SSP LBM mode\n", __func__); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Attempt to use SSP LBM mode\n", + __func__); } strongarm_ssp_fifo_update(s); break; @@ -1509,7 +1529,9 @@ static void strongarm_ssp_write(void *opaque, hwaddr addr, break; default: - printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad ssp register write 0x"HWADDR_FMT_plx"\n", + __func__, addr); break; } } @@ -1558,7 +1580,7 @@ static const VMStateDescription vmstate_strongarm_ssp_regs = { .version_id = 0, .minimum_version_id = 0, .post_load = strongarm_ssp_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_ARRAY(sscr, StrongARMSSPState, 2), VMSTATE_UINT16(sssr, StrongARMSSPState), VMSTATE_UINT16_ARRAY(rx_fifo, StrongARMSSPState, 8), diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c index 3ca2e4459ca..5891f6064f9 100644 --- a/hw/arm/tosa.c +++ b/hw/arm/tosa.c @@ -270,6 +270,7 @@ static void tosapda_machine_init(MachineClass *mc) mc->init = tosa_init; mc->block_default_type = IF_IDE; mc->ignore_memory_transaction_failures = true; + mc->deprecation_reason = "machine is old and unmaintained"; } DEFINE_MACHINE("tosa", tosapda_machine_init) diff --git a/hw/arm/trace-events b/hw/arm/trace-events index cdc1ea06a81..f1a54a02dfc 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -55,3 +55,21 @@ smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d vmid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64 +# strongarm.c +strongarm_uart_update_parameters(const char *label, int speed, char parity, int data_bits, int stop_bits) "%s speed=%d parity=%c data=%d stop=%d" +strongarm_ssp_read_underrun(void) "SSP rx underrun" + +# z2.c +z2_lcd_reg_update(uint8_t cur, uint8_t i_0, uint8_t i_1, uint8_t i_2, uint32_t value) "cur_reg = 0x%x, buf = [0x%x, 0x%x, 0x%x], value = 0x%x" +z2_lcd_enable_disable_result(const char *result) "LCD %s" +z2_aer915_send_too_long(int8_t msg) "message too long (%i bytes)" +z2_aer915_send(uint8_t reg, uint8_t value) "reg %d value 0x%02x" +z2_aer915_event(int8_t event, int8_t len) "i2c event =0x%x len=%d bytes" + +# xen_arm.c +xen_create_virtio_mmio_devices(int i, int irq, uint64_t base) "Created virtio-mmio device %d: irq %d base 0x%"PRIx64 +xen_init_ram(uint64_t machine_ram_size) "Initialized xen ram with size 0x%"PRIx64 +xen_enable_tpm(uint64_t addr) "Connected tpmdev at address 0x%"PRIx64 + +# bcm2838.c +bcm2838_gic_set_irq(int irq, int level) "gic irq:%d lvl:%d" diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c index 2f22dc890f4..d48235453e4 100644 --- a/hw/arm/versatilepb.c +++ b/hw/arm/versatilepb.c @@ -9,7 +9,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "cpu.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "hw/arm/boot.h" @@ -27,6 +26,7 @@ #include "hw/sd/sd.h" #include "qom/object.h" #include "audio/audio.h" +#include "target/arm/cpu-qom.h" #define VERSATILE_FLASH_ADDR 0x34000000 #define VERSATILE_FLASH_SIZE (64 * 1024 * 1024) @@ -52,7 +52,7 @@ static const VMStateDescription vmstate_vpb_sic = { .name = "versatilepb_sic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, vpb_sic_state), VMSTATE_UINT32(mask, vpb_sic_state), VMSTATE_UINT32(pic_enable, vpb_sic_state), @@ -192,10 +192,8 @@ static void versatile_init(MachineState *machine, int board_id) SysBusDevice *busdev; DeviceState *pl041; PCIBus *pci_bus; - NICInfo *nd; I2CBus *i2c; int n; - int done_smc = 0; DriveInfo *dinfo; if (machine->ram_size > 0x10000000) { @@ -263,16 +261,11 @@ static void versatile_init(MachineState *machine, int board_id) sysbus_connect_irq(busdev, 3, sic[30]); pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci"); - for(n = 0; n < nb_nics; n++) { - nd = &nd_table[n]; - - if (!done_smc && (!nd->model || strcmp(nd->model, "smc91c111") == 0)) { - smc91c111_init(nd, 0x10010000, sic[25]); - done_smc = 1; - } else { - pci_nic_init_nofail(nd, pci_bus, "rtl8139", NULL); - } + if (qemu_find_nic_info("smc91c111", true, NULL)) { + smc91c111_init(0x10010000, sic[25]); } + pci_init_nic_devices(pci_bus, "rtl8139"); + if (machine_usb(machine)) { pci_create_simple(pci_bus, -1, "pci-ohci"); } @@ -306,7 +299,13 @@ static void versatile_init(MachineState *machine, int board_id) /* The versatile/PB actually has a modified Color LCD controller that includes hardware cursor support from the PL111. */ - dev = sysbus_create_simple("pl110_versatile", 0x10120000, pic[16]); + dev = qdev_new("pl110_versatile"); + object_property_set_link(OBJECT(dev), "framebuffer-memory", + OBJECT(sysmem), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x10120000); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[16]); + /* Wire up the mux control signals from the SYS_CLCD register */ qdev_connect_gpio_out(sysctl, 0, qdev_get_gpio_in(dev, 0)); diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c index fd981f4c33e..de815d84cc6 100644 --- a/hw/arm/vexpress.c +++ b/hw/arm/vexpress.c @@ -24,7 +24,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/datadir.h" -#include "cpu.h" #include "hw/sysbus.h" #include "hw/arm/boot.h" #include "hw/arm/primecell.h" @@ -46,6 +45,7 @@ #include "qapi/qmp/qlist.h" #include "qom/object.h" #include "audio/audio.h" +#include "target/arm/cpu-qom.h" #define VEXPRESS_BOARD_ID 0x8e0 #define VEXPRESS_FLASH_SIZE (64 * 1024 * 1024) @@ -276,6 +276,7 @@ static void a9_daughterboard_init(VexpressMachineState *vms, { MachineState *machine = MACHINE(vms); MemoryRegion *sysmem = get_system_memory(); + DeviceState *dev; if (ram_size > 0x40000000) { /* 1GB is the maximum the address space permits */ @@ -297,7 +298,12 @@ static void a9_daughterboard_init(VexpressMachineState *vms, /* Daughterboard peripherals : 0x10020000 .. 0x20000000 */ /* 0x10020000 PL111 CLCD (daughterboard) */ - sysbus_create_simple("pl111", 0x10020000, pic[44]); + dev = qdev_new("pl111"); + object_property_set_link(OBJECT(dev), "framebuffer-memory", + OBJECT(sysmem), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x10020000); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[44]); /* 0x10060000 AXI RAM */ /* 0x100e0000 PL341 Dynamic Memory Controller */ @@ -650,7 +656,12 @@ static void vexpress_common_init(MachineState *machine) /* VE_COMPACTFLASH: not modelled */ - sysbus_create_simple("pl111", map[VE_CLCD], pic[14]); + dev = qdev_new("pl111"); + object_property_set_link(OBJECT(dev), "framebuffer-memory", + OBJECT(sysmem), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, map[VE_CLCD]); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[14]); dinfo = drive_get(IF_PFLASH, 0, 0); pflash0 = ve_pflash_cfi01_register(map[VE_NORFLASH0], "vexpress.flash0", @@ -679,8 +690,8 @@ static void vexpress_common_init(MachineState *machine) memory_region_add_subregion(sysmem, map[VE_VIDEORAM], &vms->vram); /* 0x4e000000 LAN9118 Ethernet */ - if (nd_table[0].used) { - lan9118_init(&nd_table[0], map[VE_ETHERNET], pic[15]); + if (qemu_find_nic_info("lan9118", true, NULL)) { + lan9118_init(map[VE_ETHERNET], pic[15]); } /* VE_USB: not modelled */ @@ -783,22 +794,30 @@ static void vexpress_class_init(ObjectClass *oc, void *data) static void vexpress_a9_class_init(ObjectClass *oc, void *data) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a9"), + NULL + }; MachineClass *mc = MACHINE_CLASS(oc); VexpressMachineClass *vmc = VEXPRESS_MACHINE_CLASS(oc); mc->desc = "ARM Versatile Express for Cortex-A9"; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9"); + mc->valid_cpu_types = valid_cpu_types; vmc->daughterboard = &a9_daughterboard; } static void vexpress_a15_class_init(ObjectClass *oc, void *data) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a15"), + NULL + }; MachineClass *mc = MACHINE_CLASS(oc); VexpressMachineClass *vmc = VEXPRESS_MACHINE_CLASS(oc); mc->desc = "ARM Versatile Express for Cortex-A15"; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15"); + mc->valid_cpu_types = valid_cpu_types; vmc->daughterboard = &a15_daughterboard; diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 8bc35a483c9..c3ccfef026f 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -32,10 +32,9 @@ #include "qemu/error-report.h" #include "trace.h" #include "hw/core/cpu.h" -#include "target/arm/cpu.h" #include "hw/acpi/acpi-defs.h" #include "hw/acpi/acpi.h" -#include "hw/nvram/fw_cfg.h" +#include "hw/nvram/fw_cfg_acpi.h" #include "hw/acpi/bios-linker-loader.h" #include "hw/acpi/aml-build.h" #include "hw/acpi/utils.h" @@ -58,6 +57,9 @@ #include "migration/vmstate.h" #include "hw/acpi/ghes.h" #include "hw/acpi/viot.h" +#include "hw/acpi/acpi_generic_initiator.h" +#include "hw/virtio/virtio-acpi.h" +#include "target/arm/multiprocessing.h" #define ARM_SPI_BASE 32 @@ -94,21 +96,6 @@ static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap, aml_append(scope, dev); } -static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap) -{ - Aml *dev = aml_device("FWCF"); - aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); - /* device present, functioning, decoding, not shown in UI */ - aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); - aml_append(dev, aml_name_decl("_CCA", aml_int(1))); - - Aml *crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base, - fw_cfg_memmap->size, AML_READ_WRITE)); - aml_append(dev, aml_name_decl("_CRS", crs)); - aml_append(scope, dev); -} - static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap) { Aml *dev, *crs; @@ -133,32 +120,6 @@ static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap) aml_append(scope, dev); } -static void acpi_dsdt_add_virtio(Aml *scope, - const MemMapEntry *virtio_mmio_memmap, - uint32_t mmio_irq, int num) -{ - hwaddr base = virtio_mmio_memmap->base; - hwaddr size = virtio_mmio_memmap->size; - int i; - - for (i = 0; i < num; i++) { - uint32_t irq = mmio_irq + i; - Aml *dev = aml_device("VR%02u", i); - aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005"))); - aml_append(dev, aml_name_decl("_UID", aml_int(i))); - aml_append(dev, aml_name_decl("_CCA", aml_int(1))); - - Aml *crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE)); - aml_append(crs, - aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, - AML_EXCLUSIVE, &irq, 1)); - aml_append(dev, aml_name_decl("_CRS", crs)); - aml_append(scope, dev); - base += size; - } -} - static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap, uint32_t irq, VirtMachineState *vms) { @@ -471,48 +432,34 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) * Rev: 1.07 */ static void -build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +spcr_setup(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { - AcpiTable table = { .sig = "SPCR", .rev = 2, .oem_id = vms->oem_id, - .oem_table_id = vms->oem_table_id }; - - acpi_table_begin(&table, table_data); - - /* Interface Type */ - build_append_int_noprefix(table_data, 3, 1); /* ARM PL011 UART */ - build_append_int_noprefix(table_data, 0, 3); /* Reserved */ - /* Base Address */ - build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 32, 0, 3, - vms->memmap[VIRT_UART].base); - /* Interrupt Type */ - build_append_int_noprefix(table_data, - (1 << 3) /* Bit[3] ARMH GIC interrupt */, 1); - build_append_int_noprefix(table_data, 0, 1); /* IRQ */ - /* Global System Interrupt */ - build_append_int_noprefix(table_data, - vms->irqmap[VIRT_UART] + ARM_SPI_BASE, 4); - build_append_int_noprefix(table_data, 3 /* 9600 */, 1); /* Baud Rate */ - build_append_int_noprefix(table_data, 0 /* No Parity */, 1); /* Parity */ - /* Stop Bits */ - build_append_int_noprefix(table_data, 1 /* 1 Stop bit */, 1); - /* Flow Control */ - build_append_int_noprefix(table_data, - (1 << 1) /* RTS/CTS hardware flow control */, 1); - /* Terminal Type */ - build_append_int_noprefix(table_data, 0 /* VT100 */, 1); - build_append_int_noprefix(table_data, 0, 1); /* Language */ - /* PCI Device ID */ - build_append_int_noprefix(table_data, 0xffff /* not a PCI device*/, 2); - /* PCI Vendor ID */ - build_append_int_noprefix(table_data, 0xffff /* not a PCI device*/, 2); - build_append_int_noprefix(table_data, 0, 1); /* PCI Bus Number */ - build_append_int_noprefix(table_data, 0, 1); /* PCI Device Number */ - build_append_int_noprefix(table_data, 0, 1); /* PCI Function Number */ - build_append_int_noprefix(table_data, 0, 4); /* PCI Flags */ - build_append_int_noprefix(table_data, 0, 1); /* PCI Segment */ - build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + AcpiSpcrData serial = { + .interface_type = 3, /* ARM PL011 UART */ + .base_addr.id = AML_AS_SYSTEM_MEMORY, + .base_addr.width = 32, + .base_addr.offset = 0, + .base_addr.size = 3, + .base_addr.addr = vms->memmap[VIRT_UART].base, + .interrupt_type = (1 << 3),/* Bit[3] ARMH GIC interrupt*/ + .pc_interrupt = 0, /* IRQ */ + .interrupt = (vms->irqmap[VIRT_UART] + ARM_SPI_BASE), + .baud_rate = 3, /* 9600 */ + .parity = 0, /* No Parity */ + .stop_bits = 1, /* 1 Stop bit */ + .flow_control = 1 << 1, /* RTS/CTS hardware flow control */ + .terminal_type = 0, /* VT100 */ + .language = 0, /* Language */ + .pci_device_id = 0xffff, /* not a PCI device*/ + .pci_vendor_id = 0xffff, /* not a PCI device*/ + .pci_bus = 0, + .pci_device = 0, + .pci_function = 0, + .pci_flags = 0, + .pci_segment = 0, + }; - acpi_table_end(linker, &table); + build_spcr(table_data, linker, &serial, 2, vms->oem_id, vms->oem_table_id); } /* @@ -558,6 +505,8 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) } } + build_srat_generic_pci_initiator(table_data); + if (ms->nvdimms_state->is_enabled) { nvdimm_build_srat(table_data); } @@ -573,8 +522,8 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) } /* - * ACPI spec, Revision 5.1 - * 5.2.24 Generic Timer Description Table (GTDT) + * ACPI spec, Revision 6.5 + * 5.2.25 Generic Timer Description Table (GTDT) */ static void build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) @@ -588,7 +537,7 @@ build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) uint32_t irqflags = vmc->claim_edge_triggered_timers ? 1 : /* Interrupt is Edge triggered */ 0; /* Interrupt is Level triggered */ - AcpiTable table = { .sig = "GTDT", .rev = 2, .oem_id = vms->oem_id, + AcpiTable table = { .sig = "GTDT", .rev = 3, .oem_id = vms->oem_id, .oem_table_id = vms->oem_table_id }; acpi_table_begin(&table, table_data); @@ -624,7 +573,15 @@ build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) build_append_int_noprefix(table_data, 0, 4); /* Platform Timer Offset */ build_append_int_noprefix(table_data, 0, 4); - + if (vms->ns_el2_virt_timer_irq) { + /* Virtual EL2 Timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL2_VIRT_IRQ, 4); + /* Virtual EL2 Timer Flags */ + build_append_int_noprefix(table_data, irqflags, 4); + } else { + build_append_int_noprefix(table_data, 0, 4); + build_append_int_noprefix(table_data, 0, 4); + } acpi_table_end(linker, &table); } @@ -760,7 +717,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) build_append_int_noprefix(table_data, vgic_interrupt, 4); build_append_int_noprefix(table_data, 0, 8); /* GICR Base Address*/ /* MPIDR */ - build_append_int_noprefix(table_data, armcpu->mp_affinity, 8); + build_append_int_noprefix(table_data, arm_cpu_mp_affinity(armcpu), 8); /* Processor Power Efficiency Class */ build_append_int_noprefix(table_data, 0, 1); /* Reserved */ @@ -811,10 +768,10 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) static void build_fadt_rev6(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms, unsigned dsdt_tbl_offset) { - /* ACPI v6.0 */ + /* ACPI v6.3 */ AcpiFadtData fadt = { .rev = 6, - .minor_ver = 0, + .minor_ver = 3, .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI, .xdsdt_tbl_offset = &dsdt_tbl_offset, }; @@ -864,9 +821,10 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) if (vmc->acpi_expose_flash) { acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]); } - acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]); - acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO], - (irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS); + fw_cfg_acpi_dsdt_add(scope, &memmap[VIRT_FW_CFG]); + virtio_acpi_dsdt_add(scope, memmap[VIRT_MMIO].base, memmap[VIRT_MMIO].size, + (irqmap[VIRT_MMIO] + ARM_SPI_BASE), + 0, NUM_VIRTIO_TRANSPORTS); acpi_dsdt_add_pci(scope, memmap, irqmap[VIRT_PCIE] + ARM_SPI_BASE, vms); if (vms->acpi_dev) { build_ged_aml(scope, "\\_SB."GED_DEVICE, @@ -969,7 +927,7 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) } acpi_add_table(table_offsets, tables_blob); - build_spcr(tables_blob, tables->linker, vms); + spcr_setup(tables_blob, tables->linker, vms); acpi_add_table(table_offsets, tables_blob); build_dbg2(tables_blob, tables->linker, vms); @@ -1047,7 +1005,7 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) " migration may not work", tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); error_printf("Try removing CPUs, NUMA nodes, memory slots" - " or PCI bridges."); + " or PCI bridges.\n"); } acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE); @@ -1100,7 +1058,7 @@ static const VMStateDescription vmstate_virt_acpi_build = { .name = "virt_acpi_build", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(patched, AcpiBuildState), VMSTATE_END_OF_LIST() }, diff --git a/hw/arm/virt.c b/hw/arm/virt.c index be2856c018a..a9a913aeadb 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -73,7 +73,10 @@ #include "standard-headers/linux/input.h" #include "hw/arm/smmuv3.h" #include "hw/acpi/acpi.h" +#include "target/arm/cpu-qom.h" #include "target/arm/internals.h" +#include "target/arm/multiprocessing.h" +#include "target/arm/gtimer.h" #include "hw/mem/pc-dimm.h" #include "hw/mem/nvdimm.h" #include "hw/acpi/generic_event_device.h" @@ -82,11 +85,28 @@ #include "hw/char/pl011.h" #include "qemu/guest-random.h" +static GlobalProperty arm_virt_compat[] = { + { TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "48" }, +}; +static const size_t arm_virt_compat_len = G_N_ELEMENTS(arm_virt_compat); + +/* + * This cannot be called from the virt_machine_class_init() because + * TYPE_VIRT_MACHINE is abstract and mc->compat_props g_ptr_array_new() + * only is called on virt non abstract class init. + */ +static void arm_virt_compat_set(MachineClass *mc) +{ + compat_props_add(mc->compat_props, arm_virt_compat, + arm_virt_compat_len); +} + #define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \ static void virt_##major##_##minor##_class_init(ObjectClass *oc, \ void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ + arm_virt_compat_set(mc); \ virt_machine_##major##_##minor##_options(mc); \ mc->desc = "QEMU " # major "." # minor " ARM Virtual Machine"; \ if (latest) { \ @@ -204,38 +224,6 @@ static const int a15irqmap[] = { [VIRT_PLATFORM_BUS] = 112, /* ...to 112 + PLATFORM_BUS_NUM_IRQS -1 */ }; -static const char *valid_cpus[] = { -#ifdef CONFIG_TCG - ARM_CPU_TYPE_NAME("cortex-a7"), - ARM_CPU_TYPE_NAME("cortex-a15"), - ARM_CPU_TYPE_NAME("cortex-a35"), - ARM_CPU_TYPE_NAME("cortex-a55"), - ARM_CPU_TYPE_NAME("cortex-a72"), - ARM_CPU_TYPE_NAME("cortex-a76"), - ARM_CPU_TYPE_NAME("cortex-a710"), - ARM_CPU_TYPE_NAME("a64fx"), - ARM_CPU_TYPE_NAME("neoverse-n1"), - ARM_CPU_TYPE_NAME("neoverse-v1"), - ARM_CPU_TYPE_NAME("neoverse-n2"), -#endif - ARM_CPU_TYPE_NAME("cortex-a53"), - ARM_CPU_TYPE_NAME("cortex-a57"), - ARM_CPU_TYPE_NAME("host"), - ARM_CPU_TYPE_NAME("max"), -}; - -static bool cpu_type_valid(const char *cpu) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(valid_cpus); i++) { - if (strcmp(cpu, valid_cpus[i]) == 0) { - return true; - } - } - return false; -} - static void create_randomness(MachineState *ms, const char *node) { struct { @@ -250,6 +238,20 @@ static void create_randomness(MachineState *ms, const char *node) qemu_fdt_setprop(ms->fdt, node, "rng-seed", seed.rng, sizeof(seed.rng)); } +/* + * The CPU object always exposes the NS EL2 virt timer IRQ line, + * but we don't want to advertise it to the guest in the dtb or ACPI + * table unless it's really going to do something. + */ +static bool ns_el2_virt_timer_present(void) +{ + ARMCPU *cpu = ARM_CPU(qemu_get_cpu(0)); + CPUARMState *env = &cpu->env; + + return arm_feature(env, ARM_FEATURE_AARCH64) && + arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu); +} + static void create_fdt(VirtMachineState *vms) { MachineState *ms = MACHINE(vms); @@ -367,15 +369,29 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms) "arm,armv7-timer"); } qemu_fdt_setprop(ms->fdt, "/timer", "always-on", NULL, 0); - qemu_fdt_setprop_cells(ms->fdt, "/timer", "interrupts", - GIC_FDT_IRQ_TYPE_PPI, - INTID_TO_PPI(ARCH_TIMER_S_EL1_IRQ), irqflags, - GIC_FDT_IRQ_TYPE_PPI, - INTID_TO_PPI(ARCH_TIMER_NS_EL1_IRQ), irqflags, - GIC_FDT_IRQ_TYPE_PPI, - INTID_TO_PPI(ARCH_TIMER_VIRT_IRQ), irqflags, - GIC_FDT_IRQ_TYPE_PPI, - INTID_TO_PPI(ARCH_TIMER_NS_EL2_IRQ), irqflags); + if (vms->ns_el2_virt_timer_irq) { + qemu_fdt_setprop_cells(ms->fdt, "/timer", "interrupts", + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_S_EL1_IRQ), irqflags, + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_NS_EL1_IRQ), irqflags, + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_VIRT_IRQ), irqflags, + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_NS_EL2_IRQ), irqflags, + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_NS_EL2_VIRT_IRQ), irqflags); + } else { + qemu_fdt_setprop_cells(ms->fdt, "/timer", "interrupts", + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_S_EL1_IRQ), irqflags, + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_NS_EL1_IRQ), irqflags, + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_VIRT_IRQ), irqflags, + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_NS_EL2_IRQ), irqflags); + } } static void fdt_add_cpu_nodes(const VirtMachineState *vms) @@ -402,7 +418,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) for (cpu = 0; cpu < smp_cpus; cpu++) { ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); - if (armcpu->mp_affinity & ARM_AFF3_MASK) { + if (arm_cpu_mp_affinity(armcpu) & ARM_AFF3_MASK) { addr_cells = 2; break; } @@ -429,10 +445,10 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) if (addr_cells == 2) { qemu_fdt_setprop_u64(ms->fdt, nodename, "reg", - armcpu->mp_affinity); + arm_cpu_mp_affinity(armcpu)); } else { qemu_fdt_setprop_cell(ms->fdt, nodename, "reg", - armcpu->mp_affinity); + arm_cpu_mp_affinity(armcpu)); } if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) { @@ -818,6 +834,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, + [GTIMER_HYPVIRT] = ARCH_TIMER_NS_EL2_VIRT_IRQ, }; for (unsigned irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { @@ -1486,9 +1503,7 @@ static void create_pcie(VirtMachineState *vms) pci->bypass_iommu = vms->default_bus_bypass_iommu; vms->bus = pci->bus; if (vms->bus) { - for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci->bus, mc->default_nic, NULL); - } + pci_init_nic_devices(pci->bus, mc->default_nic); } nodename = vms->pciehb_nodename = g_strdup_printf("/pcie@%" PRIx64, base); @@ -1635,14 +1650,14 @@ static void virt_build_smbios(VirtMachineState *vms) } smbios_set_defaults("QEMU", product, - vmc->smbios_old_sys_ver ? "1.0" : mc->name, false, - true, SMBIOS_ENTRY_POINT_TYPE_64); + vmc->smbios_old_sys_ver ? "1.0" : mc->name, + true); /* build the array of physical mem area from base_memmap */ mem_array.address = vms->memmap[VIRT_MEM].base; mem_array.length = ms->ram_size; - smbios_get_tables(ms, &mem_array, 1, + smbios_get_tables(ms, SMBIOS_ENTRY_POINT_TYPE_64, &mem_array, 1, &smbios_tables, &smbios_tables_len, &smbios_anchor, &smbios_anchor_len, &error_fatal); @@ -1708,7 +1723,7 @@ static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx) clustersz = GICV3_TARGETLIST_BITS; } } - return arm_cpu_mp_affinity(idx, clustersz); + return arm_build_mp_affinity(idx, clustersz); } static inline bool *virt_get_high_memmap_enabled(VirtMachineState *vms, @@ -1803,8 +1818,8 @@ static void virt_set_memmap(VirtMachineState *vms, int pa_bits) /* Base address of the high IO region */ memtop = base = device_memory_base + ROUND_UP(device_memory_size, GiB); if (memtop > BIT_ULL(pa_bits)) { - error_report("Addressing limited to %d bits, but memory exceeds it by %llu bytes\n", - pa_bits, memtop - BIT_ULL(pa_bits)); + error_report("Addressing limited to %d bits, but memory exceeds it by %llu bytes", + pa_bits, memtop - BIT_ULL(pa_bits)); exit(EXIT_FAILURE); } if (base < device_memory_base) { @@ -1998,13 +2013,14 @@ static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem) if (pmu) { assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU)); if (kvm_irqchip_in_kernel()) { - kvm_arm_pmu_set_irq(cpu, VIRTUAL_PMU_IRQ); + kvm_arm_pmu_set_irq(ARM_CPU(cpu), VIRTUAL_PMU_IRQ); } - kvm_arm_pmu_init(cpu); + kvm_arm_pmu_init(ARM_CPU(cpu)); } if (steal_time) { - kvm_arm_pvtime_init(cpu, pvtime_reg_base + - cpu->cpu_index * PVTIME_SIZE_PER_CPU); + kvm_arm_pvtime_init(ARM_CPU(cpu), pvtime_reg_base + + cpu->cpu_index + * PVTIME_SIZE_PER_CPU); } } } else { @@ -2039,11 +2055,6 @@ static void machvirt_init(MachineState *machine) unsigned int smp_cpus = machine->smp.cpus; unsigned int max_cpus = machine->smp.max_cpus; - if (!cpu_type_valid(machine->cpu_type)) { - error_report("mach-virt: CPU type %s not supported", machine->cpu_type); - exit(1); - } - possible_cpus = mc->possible_cpu_arch_ids(machine); /* @@ -2257,6 +2268,11 @@ static void machvirt_init(MachineState *machine) qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); object_unref(cpuobj); } + + /* Now we've created the CPUs we can see if they have the hypvirt timer */ + vms->ns_el2_virt_timer_irq = ns_el2_virt_timer_present() && + !vmc->no_ns_el2_virt_timer_irq; + fdt_add_timer_nodes(vms); fdt_add_cpu_nodes(vms); @@ -2937,6 +2953,32 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + static const char * const valid_cpu_types[] = { +#ifdef CONFIG_TCG + ARM_CPU_TYPE_NAME("cortex-a7"), + ARM_CPU_TYPE_NAME("cortex-a15"), +#ifdef TARGET_AARCH64 + ARM_CPU_TYPE_NAME("cortex-a35"), + ARM_CPU_TYPE_NAME("cortex-a55"), + ARM_CPU_TYPE_NAME("cortex-a72"), + ARM_CPU_TYPE_NAME("cortex-a76"), + ARM_CPU_TYPE_NAME("cortex-a710"), + ARM_CPU_TYPE_NAME("a64fx"), + ARM_CPU_TYPE_NAME("neoverse-n1"), + ARM_CPU_TYPE_NAME("neoverse-v1"), + ARM_CPU_TYPE_NAME("neoverse-n2"), +#endif /* TARGET_AARCH64 */ +#endif /* CONFIG_TCG */ +#ifdef TARGET_AARCH64 + ARM_CPU_TYPE_NAME("cortex-a53"), + ARM_CPU_TYPE_NAME("cortex-a57"), +#if defined(CONFIG_KVM) || defined(CONFIG_HVF) + ARM_CPU_TYPE_NAME("host"), +#endif /* CONFIG_KVM || CONFIG_HVF */ +#endif /* TARGET_AARCH64 */ + ARM_CPU_TYPE_NAME("max"), + NULL + }; mc->init = machvirt_init; /* Start with max_cpus set to 512, which is the maximum supported by KVM. @@ -2963,6 +3005,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) #else mc->default_cpu_type = ARM_CPU_TYPE_NAME("max"); #endif + mc->valid_cpu_types = valid_cpu_types; mc->get_default_cpu_node_id = virt_get_default_cpu_node_id; mc->kvm_type = virt_kvm_type; assert(!mc->get_hotplug_handler); @@ -3180,10 +3223,25 @@ static void machvirt_machine_init(void) } type_init(machvirt_machine_init); +static void virt_machine_9_0_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE_AS_LATEST(9, 0) + static void virt_machine_8_2_options(MachineClass *mc) { + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_9_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); + /* + * Don't expose NS_EL2_VIRT timer IRQ in DTB on ACPI on 8.2 and + * earlier machines. (Exposing it tickles a bug in older EDK2 + * guest BIOS binaries.) + */ + vmc->no_ns_el2_virt_timer_irq = true; } -DEFINE_VIRT_MACHINE_AS_LATEST(8, 2) +DEFINE_VIRT_MACHINE(8, 2) static void virt_machine_8_1_options(MachineClass *mc) { diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c index a5631529d09..15fa7dfa84f 100644 --- a/hw/arm/xen_arm.c +++ b/hw/arm/xen_arm.c @@ -34,6 +34,7 @@ #include "hw/xen/xen-hvm-common.h" #include "sysemu/tpm.h" #include "hw/xen/arch_hvm.h" +#include "trace.h" #define TYPE_XEN_ARM MACHINE_TYPE_NAME("xenpvh") OBJECT_DECLARE_SIMPLE_TYPE(XenArmState, XEN_ARM) @@ -91,8 +92,9 @@ static void xen_create_virtio_mmio_devices(XenArmState *xam) sysbus_create_simple("virtio-mmio", base, irq); - DPRINTF("Created virtio-mmio device %d: irq %d base 0x%lx\n", - i, GUEST_VIRTIO_MMIO_SPI_FIRST + i, base); + trace_xen_create_virtio_mmio_devices(i, + GUEST_VIRTIO_MMIO_SPI_FIRST + i, + base); } } @@ -101,6 +103,7 @@ static void xen_init_ram(MachineState *machine) MemoryRegion *sysmem = get_system_memory(); ram_addr_t block_len, ram_size[GUEST_RAM_BANKS]; + trace_xen_init_ram(machine->ram_size); if (machine->ram_size <= GUEST_RAM0_SIZE) { ram_size[0] = machine->ram_size; ram_size[1] = 0; @@ -111,21 +114,16 @@ static void xen_init_ram(MachineState *machine) block_len = GUEST_RAM1_BASE + ram_size[1]; } - memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, + memory_region_init_ram(&xen_memory, NULL, "xen.ram", block_len, &error_fatal); - memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", &ram_memory, + memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", &xen_memory, GUEST_RAM0_BASE, ram_size[0]); memory_region_add_subregion(sysmem, GUEST_RAM0_BASE, &ram_lo); - DPRINTF("Initialized region xen.ram.lo: base 0x%llx size 0x%lx\n", - GUEST_RAM0_BASE, ram_size[0]); - if (ram_size[1] > 0) { - memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", &ram_memory, + memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", &xen_memory, GUEST_RAM1_BASE, ram_size[1]); memory_region_add_subregion(sysmem, GUEST_RAM1_BASE, &ram_hi); - DPRINTF("Initialized region xen.ram.hi: base 0x%llx size 0x%lx\n", - GUEST_RAM1_BASE, ram_size[1]); } } @@ -158,7 +156,7 @@ static void xen_enable_tpm(XenArmState *xam) TPMBackend *be = qemu_find_tpm_be("tpm0"); if (be == NULL) { - DPRINTF("Couldn't fine the backend for tpm0\n"); + error_report("Couldn't find tmp0 backend"); return; } dev = qdev_new(TYPE_TPM_TIS_SYSBUS); @@ -168,7 +166,7 @@ static void xen_enable_tpm(XenArmState *xam) sysbus_realize_and_unref(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, xam->cfg.tpm_base_addr); - DPRINTF("Connected tpmdev at address 0x%lx\n", xam->cfg.tpm_base_addr); + trace_xen_enable_tpm(xam->cfg.tpm_base_addr); } #endif @@ -179,8 +177,9 @@ static void xen_arm_init(MachineState *machine) xam->state = g_new0(XenIOState, 1); if (machine->ram_size == 0) { - DPRINTF("ram_size not specified. QEMU machine started without IOREQ" - "(no emulated devices including Virtio)\n"); + warn_report("%s non-zero ram size not specified. QEMU machine started" + " without IOREQ (no emulated devices including virtio)", + MACHINE_CLASS(object_get_class(OBJECT(machine)))->desc); return; } @@ -194,7 +193,7 @@ static void xen_arm_init(MachineState *machine) if (xam->cfg.tpm_base_addr) { xen_enable_tpm(xam); } else { - DPRINTF("tpm-base-addr is not provided. TPM will not be enabled\n"); + warn_report("tpm-base-addr is not provided. TPM will not be enabled"); } #endif } diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c index dbb9793aa13..fc3abcbe88b 100644 --- a/hw/arm/xilinx_zynq.c +++ b/hw/arm/xilinx_zynq.c @@ -18,7 +18,6 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "cpu.h" #include "hw/sysbus.h" #include "hw/arm/boot.h" #include "net/net.h" @@ -37,6 +36,8 @@ #include "hw/qdev-clock.h" #include "sysemu/reset.h" #include "qom/object.h" +#include "exec/tswap.h" +#include "target/arm/cpu-qom.h" #define TYPE_ZYNQ_MACHINE MACHINE_TYPE_NAME("xilinx-zynq-a9") OBJECT_DECLARE_SIMPLE_TYPE(ZynqMachineState, ZYNQ_MACHINE) @@ -108,16 +109,13 @@ static void zynq_write_board_setup(ARMCPU *cpu, static struct arm_boot_info zynq_binfo = {}; -static void gem_init(NICInfo *nd, uint32_t base, qemu_irq irq) +static void gem_init(uint32_t base, qemu_irq irq) { DeviceState *dev; SysBusDevice *s; dev = qdev_new(TYPE_CADENCE_GEM); - if (nd->used) { - qemu_check_nic_model(nd, TYPE_CADENCE_GEM); - qdev_set_nic_properties(dev, nd); - } + qemu_configure_nic_device(dev, true, NULL); object_property_set_int(OBJECT(dev), "phy-addr", 7, &error_abort); s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); @@ -245,6 +243,8 @@ static void zynq_init(MachineState *machine) sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE); sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ)); + sysbus_connect_irq(busdev, 1, + qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_FIQ)); for (n = 0; n < 64; n++) { pic[n] = qdev_get_gpio_in(dev, n); @@ -279,8 +279,8 @@ static void zynq_init(MachineState *machine) sysbus_create_varargs("cadence_ttc", 0xF8002000, pic[69-IRQ_OFFSET], pic[70-IRQ_OFFSET], pic[71-IRQ_OFFSET], NULL); - gem_init(&nd_table[0], 0xE000B000, pic[54-IRQ_OFFSET]); - gem_init(&nd_table[1], 0xE000C000, pic[77-IRQ_OFFSET]); + gem_init(0xE000B000, pic[54 - IRQ_OFFSET]); + gem_init(0xE000C000, pic[77 - IRQ_OFFSET]); for (n = 0; n < 2; n++) { int hci_irq = n ? 79 : 56; @@ -354,13 +354,17 @@ static void zynq_init(MachineState *machine) static void zynq_machine_class_init(ObjectClass *oc, void *data) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a9"), + NULL + }; MachineClass *mc = MACHINE_CLASS(oc); mc->desc = "Xilinx Zynq Platform Baseboard for Cortex-A9"; mc->init = zynq_init; mc->max_cpus = 1; mc->no_sdcard = 1; mc->ignore_memory_transaction_failures = true; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9"); + mc->valid_cpu_types = valid_cpu_types; mc->default_ram_id = "zynq.ext_ram"; } diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c index 537118224fb..962f98fee2e 100644 --- a/hw/arm/xlnx-versal-virt.c +++ b/hw/arm/xlnx-versal-virt.c @@ -13,13 +13,14 @@ #include "qemu/error-report.h" #include "qapi/error.h" #include "sysemu/device_tree.h" +#include "hw/block/flash.h" #include "hw/boards.h" #include "hw/sysbus.h" #include "hw/arm/fdt.h" -#include "cpu.h" #include "hw/qdev-properties.h" #include "hw/arm/xlnx-versal.h" #include "hw/arm/boot.h" +#include "target/arm/multiprocessing.h" #include "qom/object.h" #define TYPE_XLNX_VERSAL_VIRT_MACHINE MACHINE_TYPE_NAME("xlnx-versal-virt") @@ -49,6 +50,7 @@ struct VersalVirt { struct { bool secure; } cfg; + char *ospi_model; }; static void fdt_create(VersalVirt *s) @@ -107,7 +109,8 @@ static void fdt_add_cpu_nodes(VersalVirt *s, uint32_t psci_conduit) ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i)); qemu_fdt_add_subnode(s->fdt, name); - qemu_fdt_setprop_cell(s->fdt, name, "reg", armcpu->mp_affinity); + qemu_fdt_setprop_cell(s->fdt, name, "reg", + arm_cpu_mp_affinity(armcpu)); if (psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) { qemu_fdt_setprop_string(s->fdt, name, "enable-method", "psci"); } @@ -637,6 +640,22 @@ static void sd_plugin_card(SDHCIState *sd, DriveInfo *di) &error_fatal); } +static char *versal_get_ospi_model(Object *obj, Error **errp) +{ + VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(obj); + + return g_strdup(s->ospi_model); +} + +static void versal_set_ospi_model(Object *obj, const char *value, Error **errp) +{ + VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(obj); + + g_free(s->ospi_model); + s->ospi_model = g_strdup(value); +} + + static void versal_virt_init(MachineState *machine) { VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(machine); @@ -731,12 +750,25 @@ static void versal_virt_init(MachineState *machine) for (i = 0; i < XLNX_VERSAL_NUM_OSPI_FLASH; i++) { BusState *spi_bus; DeviceState *flash_dev; + ObjectClass *flash_klass; qemu_irq cs_line; DriveInfo *dinfo = drive_get(IF_MTD, 0, i); spi_bus = qdev_get_child_bus(DEVICE(&s->soc.pmc.iou.ospi), "spi0"); - flash_dev = qdev_new("mt35xu01g"); + if (s->ospi_model) { + flash_klass = object_class_by_name(s->ospi_model); + if (!flash_klass || + object_class_is_abstract(flash_klass) || + !object_class_dynamic_cast(flash_klass, TYPE_M25P80)) { + error_setg(&error_fatal, "'%s' is either abstract or" + " not a subtype of m25p80", s->ospi_model); + return; + } + } + + flash_dev = qdev_new(s->ospi_model ? s->ospi_model : "mt35xu01g"); + if (dinfo) { qdev_prop_set_drive_err(flash_dev, "drive", blk_by_legacy_dinfo(dinfo), &error_fatal); @@ -769,6 +801,13 @@ static void versal_virt_machine_instance_init(Object *obj) 0); } +static void versal_virt_machine_finalize(Object *obj) +{ + VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(obj); + + g_free(s->ospi_model); +} + static void versal_virt_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -780,6 +819,10 @@ static void versal_virt_machine_class_init(ObjectClass *oc, void *data) mc->default_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS; mc->no_cdrom = true; mc->default_ram_id = "ddr"; + object_class_property_add_str(oc, "ospi-flash", versal_get_ospi_model, + versal_set_ospi_model); + object_class_property_set_description(oc, "ospi-flash", + "Change the OSPI Flash model"); } static const TypeInfo versal_virt_machine_init_typeinfo = { @@ -788,6 +831,7 @@ static const TypeInfo versal_virt_machine_init_typeinfo = { .class_init = versal_virt_machine_class_init, .instance_init = versal_virt_machine_instance_init, .instance_size = sizeof(VersalVirt), + .instance_finalize = versal_virt_machine_finalize, }; static void versal_virt_machine_init_register_types(void) diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c index 9600551c442..50cb0606cbf 100644 --- a/hw/arm/xlnx-versal.c +++ b/hw/arm/xlnx-versal.c @@ -23,6 +23,8 @@ #include "hw/misc/unimp.h" #include "hw/arm/xlnx-versal.h" #include "qemu/log.h" +#include "target/arm/cpu-qom.h" +#include "target/arm/gtimer.h" #define XLNX_VERSAL_ACPU_TYPE ARM_CPU_TYPE_NAME("cortex-a72") #define XLNX_VERSAL_RCPU_TYPE ARM_CPU_TYPE_NAME("cortex-r5f") @@ -254,18 +256,13 @@ static void versal_create_gems(Versal *s, qemu_irq *pic) static const int irqs[] = { VERSAL_GEM0_IRQ_0, VERSAL_GEM1_IRQ_0}; static const uint64_t addrs[] = { MM_GEM0, MM_GEM1 }; char *name = g_strdup_printf("gem%d", i); - NICInfo *nd = &nd_table[i]; DeviceState *dev; MemoryRegion *mr; object_initialize_child(OBJECT(s), name, &s->lpd.iou.gem[i], TYPE_CADENCE_GEM); dev = DEVICE(&s->lpd.iou.gem[i]); - /* FIXME use qdev NIC properties instead of nd_table[] */ - if (nd->used) { - qemu_check_nic_model(nd, "cadence_gem"); - qdev_set_nic_properties(dev, nd); - } + qemu_configure_nic_device(dev, true, NULL); object_property_set_int(OBJECT(dev), "phy-addr", 23, &error_abort); object_property_set_int(OBJECT(dev), "num-priority-queues", 2, &error_abort); diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index 5905a330151..afeb3f88f81 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -25,6 +25,8 @@ #include "sysemu/kvm.h" #include "sysemu/sysemu.h" #include "kvm_arm.h" +#include "target/arm/cpu-qom.h" +#include "target/arm/gtimer.h" #define GIC_NUM_SPI_INTR 160 @@ -616,13 +618,7 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) } for (i = 0; i < XLNX_ZYNQMP_NUM_GEMS; i++) { - NICInfo *nd = &nd_table[i]; - - /* FIXME use qdev NIC properties instead of nd_table[] */ - if (nd->used) { - qemu_check_nic_model(nd, TYPE_CADENCE_GEM); - qdev_set_nic_properties(DEVICE(&s->gem[i]), nd); - } + qemu_configure_nic_device(DEVICE(&s->gem[i]), true, NULL); object_property_set_int(OBJECT(&s->gem[i]), "revision", GEM_REVISION, &error_abort); object_property_set_int(OBJECT(&s->gem[i]), "phy-addr", 23, diff --git a/hw/arm/z2.c b/hw/arm/z2.c index d9a08fa67b2..fc5672e7ab2 100644 --- a/hw/arm/z2.c +++ b/hw/arm/z2.c @@ -25,16 +25,9 @@ #include "hw/audio/wm8750.h" #include "audio/audio.h" #include "exec/address-spaces.h" -#include "cpu.h" #include "qom/object.h" #include "qapi/error.h" - -#ifdef DEBUG_Z2 -#define DPRINTF(fmt, ...) \ - printf(fmt, ## __VA_ARGS__) -#else -#define DPRINTF(fmt, ...) -#endif +#include "trace.h" static const struct keymap map[0x100] = { [0 ... 0xff] = { -1, -1 }, @@ -120,6 +113,8 @@ static uint32_t zipit_lcd_transfer(SSIPeripheral *dev, uint32_t value) { ZipitLCD *z = ZIPIT_LCD(dev); uint16_t val; + + trace_z2_lcd_reg_update(z->cur_reg, z->buf[0], z->buf[1], z->buf[2], value); if (z->selected) { z->buf[z->pos] = value & 0xff; z->pos++; @@ -127,22 +122,19 @@ static uint32_t zipit_lcd_transfer(SSIPeripheral *dev, uint32_t value) if (z->pos == 3) { switch (z->buf[0]) { case 0x74: - DPRINTF("%s: reg: 0x%.2x\n", __func__, z->buf[2]); z->cur_reg = z->buf[2]; break; case 0x76: val = z->buf[1] << 8 | z->buf[2]; - DPRINTF("%s: value: 0x%.4x\n", __func__, val); if (z->cur_reg == 0x22 && val == 0x0000) { z->enabled = 1; - printf("%s: LCD enabled\n", __func__); + trace_z2_lcd_enable_disable_result("enabled"); } else if (z->cur_reg == 0x10 && val == 0x0000) { z->enabled = 0; - printf("%s: LCD disabled\n", __func__); + trace_z2_lcd_enable_disable_result("disabled"); } break; default: - DPRINTF("%s: unknown command!\n", __func__); break; } z->pos = 0; @@ -168,7 +160,7 @@ static const VMStateDescription vmstate_zipit_lcd_state = { .name = "zipit-lcd", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SSI_PERIPHERAL(ssidev, ZipitLCD), VMSTATE_INT32(selected, ZipitLCD), VMSTATE_INT32(enabled, ZipitLCD), @@ -212,14 +204,12 @@ static int aer915_send(I2CSlave *i2c, uint8_t data) s->buf[s->len] = data; if (s->len++ > 2) { - DPRINTF("%s: message too long (%i bytes)\n", - __func__, s->len); + trace_z2_aer915_send_too_long(s->len); return 1; } if (s->len == 2) { - DPRINTF("%s: reg %d value 0x%02x\n", __func__, - s->buf[0], s->buf[1]); + trace_z2_aer915_send(s->buf[0], s->buf[1]); } return 0; @@ -229,14 +219,12 @@ static int aer915_event(I2CSlave *i2c, enum i2c_event event) { AER915State *s = AER915(i2c); + trace_z2_aer915_event(s->len, event); switch (event) { case I2C_START_SEND: s->len = 0; break; case I2C_START_RECV: - if (s->len != 1) { - DPRINTF("%s: short message!?\n", __func__); - } break; case I2C_FINISH: break; @@ -274,7 +262,7 @@ static const VMStateDescription vmstate_aer915_state = { .name = "aer915", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(len, AER915State), VMSTATE_BUFFER(buf, AER915State), VMSTATE_END_OF_LIST(), @@ -359,6 +347,7 @@ static void z2_machine_init(MachineClass *mc) mc->init = z2_init; mc->ignore_memory_transaction_failures = true; mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5"); + mc->deprecation_reason = "machine is old and unmaintained"; machine_add_audiodev_property(mc); } diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c index 6a7a2dc80c4..3f0053f94de 100644 --- a/hw/audio/ac97.c +++ b/hw/audio/ac97.c @@ -1094,7 +1094,7 @@ static const VMStateDescription vmstate_ac97_bm_regs = { .name = "ac97_bm_regs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(bdbar, AC97BusMasterRegs), VMSTATE_UINT8(civ, AC97BusMasterRegs), VMSTATE_UINT8(lvi, AC97BusMasterRegs), @@ -1142,7 +1142,7 @@ static const VMStateDescription vmstate_ac97 = { .version_id = 3, .minimum_version_id = 2, .post_load = ac97_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, AC97LinkState), VMSTATE_UINT32(glob_cnt, AC97LinkState), VMSTATE_UINT32(glob_sta, AC97LinkState), diff --git a/hw/audio/asc.c b/hw/audio/asc.c index 0f36b4ce9b6..87b56243262 100644 --- a/hw/audio/asc.c +++ b/hw/audio/asc.c @@ -555,7 +555,7 @@ static const VMStateDescription vmstate_asc_fifo = { .name = "apple-sound-chip.fifo", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(fifo, ASCFIFOState, ASC_FIFO_SIZE), VMSTATE_UINT8(int_status, ASCFIFOState), VMSTATE_INT32(cnt, ASCFIFOState), @@ -575,7 +575,7 @@ static const VMStateDescription vmstate_asc = { .version_id = 0, .minimum_version_id = 0, .post_load = asc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(fifos, ASCState, 2, 0, vmstate_asc_fifo, ASCFIFOState), VMSTATE_UINT8_ARRAY(regs, ASCState, ASC_REG_SIZE), diff --git a/hw/audio/cs4231.c b/hw/audio/cs4231.c index aefc3edea18..967caa7fcbd 100644 --- a/hw/audio/cs4231.c +++ b/hw/audio/cs4231.c @@ -142,7 +142,7 @@ static const VMStateDescription vmstate_cs4231 = { .name ="cs4231", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CSState, CS_REGS), VMSTATE_UINT8_ARRAY(dregs, CSState, CS_DREGS), VMSTATE_END_OF_LIST() diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c index 3aa105748d3..9ef57f042d1 100644 --- a/hw/audio/cs4231a.c +++ b/hw/audio/cs4231a.c @@ -637,7 +637,7 @@ static const VMStateDescription vmstate_cs4231a = { .minimum_version_id = 1, .pre_load = cs4231a_pre_load, .post_load = cs4231a_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY (regs, CSState, CS_REGS), VMSTATE_BUFFER (dregs, CSState), VMSTATE_INT32 (dma_running, CSState), diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c index fad55412119..4ab61d3b9da 100644 --- a/hw/audio/es1370.c +++ b/hw/audio/es1370.c @@ -765,7 +765,7 @@ static const VMStateDescription vmstate_es1370_channel = { .name = "es1370_channel", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32 (shift, struct chan), VMSTATE_UINT32 (leftover, struct chan), VMSTATE_UINT32 (scount, struct chan), @@ -808,7 +808,7 @@ static const VMStateDescription vmstate_es1370 = { .version_id = 2, .minimum_version_id = 2, .post_load = es1370_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE (dev, ES1370State), VMSTATE_STRUCT_ARRAY (chan, ES1370State, NB_CHANNELS, 2, vmstate_es1370_channel, struct chan), diff --git a/hw/audio/gus.c b/hw/audio/gus.c index 6c2b586ca71..4beb3fd74e2 100644 --- a/hw/audio/gus.c +++ b/hw/audio/gus.c @@ -209,7 +209,7 @@ static const VMStateDescription vmstate_gus = { .name = "gus", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32 (pos, GUSState), VMSTATE_INT32 (left, GUSState), VMSTATE_INT32 (shift, GUSState), diff --git a/hw/audio/hda-codec.c b/hw/audio/hda-codec.c index 0bc20d49f6c..b22e486fda9 100644 --- a/hw/audio/hda-codec.c +++ b/hw/audio/hda-codec.c @@ -812,7 +812,7 @@ static const VMStateDescription vmstate_hda_audio_stream_buf = { .name = "hda-audio-stream/buffer", .version_id = 1, .needed = vmstate_hda_audio_stream_buf_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(buf, HDAAudioStream), VMSTATE_INT64(rpos, HDAAudioStream), VMSTATE_INT64(wpos, HDAAudioStream), @@ -825,7 +825,7 @@ static const VMStateDescription vmstate_hda_audio_stream_buf = { static const VMStateDescription vmstate_hda_audio_stream = { .name = "hda-audio-stream", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(stream, HDAAudioStream), VMSTATE_UINT32(channel, HDAAudioStream), VMSTATE_UINT32(format, HDAAudioStream), @@ -837,7 +837,7 @@ static const VMStateDescription vmstate_hda_audio_stream = { VMSTATE_BUFFER(compat_buf, HDAAudioStream), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_hda_audio_stream_buf, NULL } @@ -847,7 +847,7 @@ static const VMStateDescription vmstate_hda_audio = { .name = "hda-audio", .version_id = 2, .post_load = hda_audio_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(st, HDAAudioState, 4, 0, vmstate_hda_audio_stream, HDAAudioStream), diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c index 78ff9f9a680..9c54e60b718 100644 --- a/hw/audio/intel-hda.c +++ b/hw/audio/intel-hda.c @@ -1158,7 +1158,7 @@ static int intel_hda_post_load(void *opaque, int version) static const VMStateDescription vmstate_intel_hda_stream = { .name = "intel-hda-stream", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctl, IntelHDAStream), VMSTATE_UINT32(lpib, IntelHDAStream), VMSTATE_UINT32(cbl, IntelHDAStream), @@ -1174,7 +1174,7 @@ static const VMStateDescription vmstate_intel_hda = { .name = "intel-hda", .version_id = 1, .post_load = intel_hda_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pci, IntelHDAState), /* registers */ diff --git a/hw/audio/lm4549.c b/hw/audio/lm4549.c index e7bfcc4b9fe..a4a77c8dc6a 100644 --- a/hw/audio/lm4549.c +++ b/hw/audio/lm4549.c @@ -329,7 +329,7 @@ const VMStateDescription vmstate_lm4549_state = { .version_id = 1, .minimum_version_id = 1, .post_load = lm4549_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(voice_is_active, lm4549_state), VMSTATE_UINT16_ARRAY(regfile, lm4549_state, 128), VMSTATE_UINT16_ARRAY(buffer, lm4549_state, LM4549_BUFFER_SIZE), diff --git a/hw/audio/marvell_88w8618.c b/hw/audio/marvell_88w8618.c index e6c09bdb8e3..cc285444bce 100644 --- a/hw/audio/marvell_88w8618.c +++ b/hw/audio/marvell_88w8618.c @@ -273,7 +273,7 @@ static const VMStateDescription mv88w8618_audio_vmsd = { .name = "mv88w8618_audio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(playback_mode, mv88w8618_audio_state), VMSTATE_UINT32(status, mv88w8618_audio_state), VMSTATE_UINT32(irq_enable, mv88w8618_audio_state), diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c index fe7f07ced21..a4b89f17682 100644 --- a/hw/audio/pcspk.c +++ b/hw/audio/pcspk.c @@ -208,7 +208,7 @@ static const VMStateDescription vmstate_spk = { .version_id = 1, .minimum_version_id = 1, .needed = migrate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(data_on, PCSpkState), VMSTATE_UINT8(dummy_refresh_clock, PCSpkState), VMSTATE_END_OF_LIST() diff --git a/hw/audio/pl041.c b/hw/audio/pl041.c index 868dffbfd32..b435208c242 100644 --- a/hw/audio/pl041.c +++ b/hw/audio/pl041.c @@ -571,7 +571,7 @@ static const VMStateDescription vmstate_pl041_regfile = { .name = "pl041_regfile", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { #define REGISTER(name, offset) VMSTATE_UINT32(name, pl041_regfile), #include "pl041.hx" #undef REGISTER @@ -583,7 +583,7 @@ static const VMStateDescription vmstate_pl041_fifo = { .name = "pl041_fifo", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, pl041_fifo), VMSTATE_UINT32_ARRAY(data, pl041_fifo, MAX_FIFO_DEPTH), VMSTATE_END_OF_LIST() @@ -594,7 +594,7 @@ static const VMStateDescription vmstate_pl041_channel = { .name = "pl041_channel", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(tx_fifo, pl041_channel, 0, vmstate_pl041_fifo, pl041_fifo), VMSTATE_UINT8(tx_enabled, pl041_channel), @@ -613,7 +613,7 @@ static const VMStateDescription vmstate_pl041 = { .name = "pl041", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(fifo_depth, PL041State), VMSTATE_STRUCT(regs, PL041State, 0, vmstate_pl041_regfile, pl041_regfile), diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c index 18f6d252db3..fd76e78d180 100644 --- a/hw/audio/sb16.c +++ b/hw/audio/sb16.c @@ -1324,12 +1324,12 @@ static const VMStateDescription vmstate_sb16 = { .version_id = 1, .minimum_version_id = 1, .post_load = sb16_post_load, - .fields = (VMStateField[]) { - VMSTATE_UINT32 (irq, SB16State), - VMSTATE_UINT32 (dma, SB16State), - VMSTATE_UINT32 (hdma, SB16State), - VMSTATE_UINT32 (port, SB16State), - VMSTATE_UINT32 (ver, SB16State), + .fields = (const VMStateField[]) { + VMSTATE_UNUSED( 4 /* irq */ + + 4 /* dma */ + + 4 /* hdma */ + + 4 /* port */ + + 4 /* ver */), VMSTATE_INT32 (in_index, SB16State), VMSTATE_INT32 (out_data_len, SB16State), VMSTATE_INT32 (fmt_stereo, SB16State), diff --git a/hw/audio/virtio-snd.c b/hw/audio/virtio-snd.c index 137fa77a01c..c80b58bf5dc 100644 --- a/hw/audio/virtio-snd.c +++ b/hw/audio/virtio-snd.c @@ -72,7 +72,7 @@ static const VMStateDescription vmstate_virtio_snd = { .unmigratable = 1, .minimum_version_id = VIRTIO_SOUND_VM_VERSION, .version_id = VIRTIO_SOUND_VM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, @@ -243,12 +243,13 @@ static void virtio_snd_handle_pcm_info(VirtIOSound *s, memset(&pcm_info[i].padding, 0, 5); } + cmd->payload_size = sizeof(virtio_snd_pcm_info) * count; cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_OK); iov_from_buf(cmd->elem->in_sg, cmd->elem->in_num, sizeof(virtio_snd_hdr), pcm_info, - sizeof(virtio_snd_pcm_info) * count); + cmd->payload_size); } /* @@ -455,7 +456,6 @@ static uint32_t virtio_snd_pcm_prepare(VirtIOSound *s, uint32_t stream_id) stream->s = s; qemu_mutex_init(&stream->queue_mutex); QSIMPLEQ_INIT(&stream->queue); - QSIMPLEQ_INIT(&stream->invalid); /* * stream_id >= s->snd_conf.streams was checked before so this is @@ -610,9 +610,6 @@ static size_t virtio_snd_pcm_get_io_msgs_count(VirtIOSoundPCMStream *stream) QSIMPLEQ_FOREACH_SAFE(buffer, &stream->queue, entry, next) { count += 1; } - QSIMPLEQ_FOREACH_SAFE(buffer, &stream->invalid, entry, next) { - count += 1; - } } return count; } @@ -749,7 +746,8 @@ process_cmd(VirtIOSound *s, virtio_snd_ctrl_command *cmd) 0, &cmd->resp, sizeof(virtio_snd_hdr)); - virtqueue_push(cmd->vq, cmd->elem, sizeof(virtio_snd_hdr)); + virtqueue_push(cmd->vq, cmd->elem, + sizeof(virtio_snd_hdr) + cmd->payload_size); virtio_notify(VIRTIO_DEVICE(s), cmd->vq); } @@ -808,6 +806,7 @@ static void virtio_snd_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) cmd->elem = elem; cmd->vq = vq; cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_OK); + /* implicit cmd->payload_size = 0; */ QTAILQ_INSERT_TAIL(&s->cmdq, cmd, next); elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); } @@ -828,47 +827,36 @@ static void virtio_snd_handle_event(VirtIODevice *vdev, VirtQueue *vq) trace_virtio_snd_handle_event(); } +/* + * Must only be called if vsnd->invalid is not empty. + */ static inline void empty_invalid_queue(VirtIODevice *vdev, VirtQueue *vq) { VirtIOSoundPCMBuffer *buffer = NULL; - VirtIOSoundPCMStream *stream = NULL; virtio_snd_pcm_status resp = { 0 }; VirtIOSound *vsnd = VIRTIO_SND(vdev); - bool any = false; - for (uint32_t i = 0; i < vsnd->snd_conf.streams; i++) { - stream = vsnd->pcm->streams[i]; - if (stream) { - any = false; - WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { - while (!QSIMPLEQ_EMPTY(&stream->invalid)) { - buffer = QSIMPLEQ_FIRST(&stream->invalid); - if (buffer->vq != vq) { - break; - } - any = true; - resp.status = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); - iov_from_buf(buffer->elem->in_sg, - buffer->elem->in_num, - 0, - &resp, - sizeof(virtio_snd_pcm_status)); - virtqueue_push(vq, - buffer->elem, - sizeof(virtio_snd_pcm_status)); - QSIMPLEQ_REMOVE_HEAD(&stream->invalid, entry); - virtio_snd_pcm_buffer_free(buffer); - } - if (any) { - /* - * Notify vq about virtio_snd_pcm_status responses. - * Buffer responses must be notified separately later. - */ - virtio_notify(vdev, vq); - } - } - } + g_assert(!QSIMPLEQ_EMPTY(&vsnd->invalid)); + + while (!QSIMPLEQ_EMPTY(&vsnd->invalid)) { + buffer = QSIMPLEQ_FIRST(&vsnd->invalid); + /* If buffer->vq != vq, our logic is fundamentally wrong, so bail out */ + g_assert(buffer->vq == vq); + + resp.status = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + iov_from_buf(buffer->elem->in_sg, + buffer->elem->in_num, + 0, + &resp, + sizeof(virtio_snd_pcm_status)); + virtqueue_push(vq, + buffer->elem, + sizeof(virtio_snd_pcm_status)); + QSIMPLEQ_REMOVE_HEAD(&vsnd->invalid, entry); + virtio_snd_pcm_buffer_free(buffer); } + /* Notify vq about virtio_snd_pcm_status responses. */ + virtio_notify(vdev, vq); } /* @@ -880,15 +868,14 @@ static inline void empty_invalid_queue(VirtIODevice *vdev, VirtQueue *vq) */ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq) { - VirtIOSound *s = VIRTIO_SND(vdev); - VirtIOSoundPCMStream *stream = NULL; + VirtIOSound *vsnd = VIRTIO_SND(vdev); VirtIOSoundPCMBuffer *buffer; VirtQueueElement *elem; size_t msg_sz, size; virtio_snd_pcm_xfer hdr; uint32_t stream_id; /* - * If any of the I/O messages are invalid, put them in stream->invalid and + * If any of the I/O messages are invalid, put them in vsnd->invalid and * return them after the for loop. */ bool must_empty_invalid_queue = false; @@ -899,6 +886,8 @@ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq) trace_virtio_snd_handle_tx_xfer(); for (;;) { + VirtIOSoundPCMStream *stream; + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { break; @@ -914,12 +903,12 @@ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq) } stream_id = le32_to_cpu(hdr.stream_id); - if (stream_id >= s->snd_conf.streams - || s->pcm->streams[stream_id] == NULL) { + if (stream_id >= vsnd->snd_conf.streams + || vsnd->pcm->streams[stream_id] == NULL) { goto tx_err; } - stream = s->pcm->streams[stream_id]; + stream = vsnd->pcm->streams[stream_id]; if (stream->info.direction != VIRTIO_SND_D_OUTPUT) { goto tx_err; } @@ -939,13 +928,11 @@ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq) continue; tx_err: - WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { - must_empty_invalid_queue = true; - buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer)); - buffer->elem = elem; - buffer->vq = vq; - QSIMPLEQ_INSERT_TAIL(&stream->invalid, buffer, entry); - } + must_empty_invalid_queue = true; + buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer)); + buffer->elem = elem; + buffer->vq = vq; + QSIMPLEQ_INSERT_TAIL(&vsnd->invalid, buffer, entry); } if (must_empty_invalid_queue) { @@ -962,15 +949,14 @@ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq) */ static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq) { - VirtIOSound *s = VIRTIO_SND(vdev); - VirtIOSoundPCMStream *stream = NULL; + VirtIOSound *vsnd = VIRTIO_SND(vdev); VirtIOSoundPCMBuffer *buffer; VirtQueueElement *elem; size_t msg_sz, size; virtio_snd_pcm_xfer hdr; uint32_t stream_id; /* - * if any of the I/O messages are invalid, put them in stream->invalid and + * if any of the I/O messages are invalid, put them in vsnd->invalid and * return them after the for loop. */ bool must_empty_invalid_queue = false; @@ -981,6 +967,8 @@ static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq) trace_virtio_snd_handle_rx_xfer(); for (;;) { + VirtIOSoundPCMStream *stream; + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { break; @@ -996,12 +984,12 @@ static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq) } stream_id = le32_to_cpu(hdr.stream_id); - if (stream_id >= s->snd_conf.streams - || !s->pcm->streams[stream_id]) { + if (stream_id >= vsnd->snd_conf.streams + || !vsnd->pcm->streams[stream_id]) { goto rx_err; } - stream = s->pcm->streams[stream_id]; + stream = vsnd->pcm->streams[stream_id]; if (stream == NULL || stream->info.direction != VIRTIO_SND_D_INPUT) { goto rx_err; } @@ -1018,13 +1006,11 @@ static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq) continue; rx_err: - WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { - must_empty_invalid_queue = true; - buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer)); - buffer->elem = elem; - buffer->vq = vq; - QSIMPLEQ_INSERT_TAIL(&stream->invalid, buffer, entry); - } + must_empty_invalid_queue = true; + buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer)); + buffer->elem = elem; + buffer->vq = vq; + QSIMPLEQ_INSERT_TAIL(&vsnd->invalid, buffer, entry); } if (must_empty_invalid_queue) { @@ -1124,6 +1110,7 @@ static void virtio_snd_realize(DeviceState *dev, Error **errp) virtio_add_queue(vdev, 64, virtio_snd_handle_rx_xfer); qemu_mutex_init(&vsnd->cmdq_mutex); QTAILQ_INIT(&vsnd->cmdq); + QSIMPLEQ_INIT(&vsnd->invalid); for (uint32_t i = 0; i < vsnd->snd_conf.streams; i++) { status = virtio_snd_set_pcm_params(vsnd, i, &default_params); @@ -1373,13 +1360,20 @@ static void virtio_snd_unrealize(DeviceState *dev) static void virtio_snd_reset(VirtIODevice *vdev) { - VirtIOSound *s = VIRTIO_SND(vdev); + VirtIOSound *vsnd = VIRTIO_SND(vdev); virtio_snd_ctrl_command *cmd; - WITH_QEMU_LOCK_GUARD(&s->cmdq_mutex) { - while (!QTAILQ_EMPTY(&s->cmdq)) { - cmd = QTAILQ_FIRST(&s->cmdq); - QTAILQ_REMOVE(&s->cmdq, cmd, next); + /* + * Sanity check that the invalid buffer message queue is emptied at the end + * of every virtio_snd_handle_tx_xfer/virtio_snd_handle_rx_xfer call, and + * must be empty otherwise. + */ + g_assert(QSIMPLEQ_EMPTY(&vsnd->invalid)); + + WITH_QEMU_LOCK_GUARD(&vsnd->cmdq_mutex) { + while (!QTAILQ_EMPTY(&vsnd->cmdq)) { + cmd = QTAILQ_FIRST(&vsnd->cmdq); + QTAILQ_REMOVE(&vsnd->cmdq, cmd, next); virtio_snd_ctrl_cmd_free(cmd); } } diff --git a/hw/audio/wm8750.c b/hw/audio/wm8750.c index 57954a63144..ec2c4e13743 100644 --- a/hw/audio/wm8750.c +++ b/hw/audio/wm8750.c @@ -592,7 +592,7 @@ static const VMStateDescription vmstate_wm8750 = { .minimum_version_id = 0, .pre_save = wm8750_pre_save, .post_load = wm8750_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(i2c_data, WM8750State, 2), VMSTATE_INT32(i2c_len, WM8750State), VMSTATE_INT32(enable, WM8750State), diff --git a/hw/block/block.c b/hw/block/block.c index 9f52ee6e728..3ceca7dce69 100644 --- a/hw/block/block.c +++ b/hw/block/block.c @@ -30,7 +30,7 @@ static int blk_pread_nonzeroes(BlockBackend *blk, hwaddr size, void *buf) BlockDriverState *bs = blk_bs(blk); for (;;) { - bytes = MIN(size - offset, BDRV_REQUEST_MAX_SECTORS); + bytes = MIN(size - offset, BDRV_REQUEST_MAX_BYTES); if (bytes <= 0) { return 0; } @@ -54,29 +54,30 @@ static int blk_pread_nonzeroes(BlockBackend *blk, hwaddr size, void *buf) * BDRV_REQUEST_MAX_BYTES. * On success, return true. * On failure, store an error through @errp and return false. - * Note that the error messages do not identify the block backend. - * TODO Since callers don't either, this can result in confusing - * errors. + * * This function not intended for actual block devices, which read on * demand. It's for things like memory devices that (ab)use a block * backend to provide persistence. */ -bool blk_check_size_and_read_all(BlockBackend *blk, void *buf, hwaddr size, - Error **errp) +bool blk_check_size_and_read_all(BlockBackend *blk, DeviceState *dev, + void *buf, hwaddr size, Error **errp) { int64_t blk_len; int ret; + g_autofree char *dev_id = NULL; blk_len = blk_getlength(blk); if (blk_len < 0) { error_setg_errno(errp, -blk_len, - "can't get size of block backend"); + "can't get size of %s block backend", blk_name(blk)); return false; } if (blk_len != size) { - error_setg(errp, "device requires %" HWADDR_PRIu " bytes, " - "block backend provides %" PRIu64 " bytes", - size, blk_len); + dev_id = qdev_get_human_name(dev); + error_setg(errp, "%s device '%s' requires %" HWADDR_PRIu + " bytes, %s block backend provides %" PRIu64 " bytes", + object_get_typename(OBJECT(dev)), dev_id, size, + blk_name(blk), blk_len); return false; } @@ -89,7 +90,11 @@ bool blk_check_size_and_read_all(BlockBackend *blk, void *buf, hwaddr size, assert(size <= BDRV_REQUEST_MAX_BYTES); ret = blk_pread_nonzeroes(blk, size, buf); if (ret < 0) { - error_setg_errno(errp, -ret, "can't read block backend"); + dev_id = qdev_get_human_name(dev); + error_setg_errno(errp, -ret, "can't read %s block backend" + " for %s device '%s'", + blk_name(blk), object_get_typename(OBJECT(dev)), + dev_id); return false; } return true; diff --git a/hw/block/dataplane/meson.build b/hw/block/dataplane/meson.build index 025b3b061b6..11a5eba2f4c 100644 --- a/hw/block/dataplane/meson.build +++ b/hw/block/dataplane/meson.build @@ -1,2 +1 @@ -system_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c')) specific_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c')) diff --git a/hw/block/dataplane/trace-events b/hw/block/dataplane/trace-events deleted file mode 100644 index 38fc3e75071..00000000000 --- a/hw/block/dataplane/trace-events +++ /dev/null @@ -1,5 +0,0 @@ -# See docs/devel/tracing.rst for syntax documentation. - -# virtio-blk.c -virtio_blk_data_plane_start(void *s) "dataplane %p" -virtio_blk_data_plane_stop(void *s) "dataplane %p" diff --git a/hw/block/dataplane/trace.h b/hw/block/dataplane/trace.h deleted file mode 100644 index 240cc598348..00000000000 --- a/hw/block/dataplane/trace.h +++ /dev/null @@ -1 +0,0 @@ -#include "trace/trace-hw_block_dataplane.h" diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c deleted file mode 100644 index f83bb0f116b..00000000000 --- a/hw/block/dataplane/virtio-blk.c +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Dedicated thread for virtio-blk I/O processing - * - * Copyright 2012 IBM, Corp. - * Copyright 2012 Red Hat, Inc. and/or its affiliates - * - * Authors: - * Stefan Hajnoczi - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "trace.h" -#include "qemu/iov.h" -#include "qemu/main-loop.h" -#include "qemu/thread.h" -#include "qemu/error-report.h" -#include "hw/virtio/virtio-blk.h" -#include "virtio-blk.h" -#include "block/aio.h" -#include "hw/virtio/virtio-bus.h" -#include "qom/object_interfaces.h" - -struct VirtIOBlockDataPlane { - bool starting; - bool stopping; - - VirtIOBlkConf *conf; - VirtIODevice *vdev; - - /* Note that these EventNotifiers are assigned by value. This is - * fine as long as you do not call event_notifier_cleanup on them - * (because you don't own the file descriptor or handle; you just - * use it). - */ - IOThread *iothread; - AioContext *ctx; -}; - -/* Raise an interrupt to signal guest, if necessary */ -void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq) -{ - virtio_notify_irqfd(s->vdev, vq); -} - -/* Context: QEMU global mutex held */ -bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, - VirtIOBlockDataPlane **dataplane, - Error **errp) -{ - VirtIOBlockDataPlane *s; - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - - *dataplane = NULL; - - if (conf->iothread) { - if (!k->set_guest_notifiers || !k->ioeventfd_assign) { - error_setg(errp, - "device is incompatible with iothread " - "(transport does not support notifiers)"); - return false; - } - if (!virtio_device_ioeventfd_enabled(vdev)) { - error_setg(errp, "ioeventfd is required for iothread"); - return false; - } - - /* If dataplane is (re-)enabled while the guest is running there could - * be block jobs that can conflict. - */ - if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { - error_prepend(errp, "cannot start virtio-blk dataplane: "); - return false; - } - } - /* Don't try if transport does not support notifiers. */ - if (!virtio_device_ioeventfd_enabled(vdev)) { - return false; - } - - s = g_new0(VirtIOBlockDataPlane, 1); - s->vdev = vdev; - s->conf = conf; - - if (conf->iothread) { - s->iothread = conf->iothread; - object_ref(OBJECT(s->iothread)); - s->ctx = iothread_get_aio_context(s->iothread); - } else { - s->ctx = qemu_get_aio_context(); - } - - *dataplane = s; - - return true; -} - -/* Context: QEMU global mutex held */ -void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) -{ - VirtIOBlock *vblk; - - if (!s) { - return; - } - - vblk = VIRTIO_BLK(s->vdev); - assert(!vblk->dataplane_started); - if (s->iothread) { - object_unref(OBJECT(s->iothread)); - } - g_free(s); -} - -/* Context: QEMU global mutex held */ -int virtio_blk_data_plane_start(VirtIODevice *vdev) -{ - VirtIOBlock *vblk = VIRTIO_BLK(vdev); - VirtIOBlockDataPlane *s = vblk->dataplane; - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - AioContext *old_context; - unsigned i; - unsigned nvqs = s->conf->num_queues; - Error *local_err = NULL; - int r; - - if (vblk->dataplane_started || s->starting) { - return 0; - } - - s->starting = true; - - /* Set up guest notifier (irq) */ - r = k->set_guest_notifiers(qbus->parent, nvqs, true); - if (r != 0) { - error_report("virtio-blk failed to set guest notifier (%d), " - "ensure -accel kvm is set.", r); - goto fail_guest_notifiers; - } - - /* - * Batch all the host notifiers in a single transaction to avoid - * quadratic time complexity in address_space_update_ioeventfds(). - */ - memory_region_transaction_begin(); - - /* Set up virtqueue notify */ - for (i = 0; i < nvqs; i++) { - r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true); - if (r != 0) { - int j = i; - - fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); - while (i--) { - virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); - } - - /* - * The transaction expects the ioeventfds to be open when it - * commits. Do it now, before the cleanup loop. - */ - memory_region_transaction_commit(); - - while (j--) { - virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), j); - } - goto fail_host_notifiers; - } - } - - memory_region_transaction_commit(); - - trace_virtio_blk_data_plane_start(s); - - old_context = blk_get_aio_context(s->conf->conf.blk); - aio_context_acquire(old_context); - r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err); - aio_context_release(old_context); - if (r < 0) { - error_report_err(local_err); - goto fail_aio_context; - } - - /* Kick right away to begin processing requests already in vring */ - for (i = 0; i < nvqs; i++) { - VirtQueue *vq = virtio_get_queue(s->vdev, i); - - event_notifier_set(virtio_queue_get_host_notifier(vq)); - } - - /* - * These fields must be visible to the IOThread when it processes the - * virtqueue, otherwise it will think dataplane has not started yet. - * - * Make sure ->dataplane_started is false when blk_set_aio_context() is - * called above so that draining does not cause the host notifier to be - * detached/attached prematurely. - */ - s->starting = false; - vblk->dataplane_started = true; - smp_wmb(); /* paired with aio_notify_accept() on the read side */ - - /* Get this show started by hooking up our callbacks */ - if (!blk_in_drain(s->conf->conf.blk)) { - aio_context_acquire(s->ctx); - for (i = 0; i < nvqs; i++) { - VirtQueue *vq = virtio_get_queue(s->vdev, i); - - virtio_queue_aio_attach_host_notifier(vq, s->ctx); - } - aio_context_release(s->ctx); - } - return 0; - - fail_aio_context: - memory_region_transaction_begin(); - - for (i = 0; i < nvqs; i++) { - virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); - } - - memory_region_transaction_commit(); - - for (i = 0; i < nvqs; i++) { - virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); - } - fail_host_notifiers: - k->set_guest_notifiers(qbus->parent, nvqs, false); - fail_guest_notifiers: - vblk->dataplane_disabled = true; - s->starting = false; - return -ENOSYS; -} - -/* Stop notifications for new requests from guest. - * - * Context: BH in IOThread - */ -static void virtio_blk_data_plane_stop_bh(void *opaque) -{ - VirtIOBlockDataPlane *s = opaque; - unsigned i; - - for (i = 0; i < s->conf->num_queues; i++) { - VirtQueue *vq = virtio_get_queue(s->vdev, i); - EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq); - - virtio_queue_aio_detach_host_notifier(vq, s->ctx); - - /* - * Test and clear notifier after disabling event, in case poll callback - * didn't have time to run. - */ - virtio_queue_host_notifier_read(host_notifier); - } -} - -/* Context: QEMU global mutex held */ -void virtio_blk_data_plane_stop(VirtIODevice *vdev) -{ - VirtIOBlock *vblk = VIRTIO_BLK(vdev); - VirtIOBlockDataPlane *s = vblk->dataplane; - BusState *qbus = qdev_get_parent_bus(DEVICE(vblk)); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - unsigned i; - unsigned nvqs = s->conf->num_queues; - - if (!vblk->dataplane_started || s->stopping) { - return; - } - - /* Better luck next time. */ - if (vblk->dataplane_disabled) { - vblk->dataplane_disabled = false; - vblk->dataplane_started = false; - return; - } - s->stopping = true; - trace_virtio_blk_data_plane_stop(s); - - if (!blk_in_drain(s->conf->conf.blk)) { - aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s); - } - - /* - * Batch all the host notifiers in a single transaction to avoid - * quadratic time complexity in address_space_update_ioeventfds(). - */ - memory_region_transaction_begin(); - - for (i = 0; i < nvqs; i++) { - virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); - } - - /* - * The transaction expects the ioeventfds to be open when it - * commits. Do it now, before the cleanup loop. - */ - memory_region_transaction_commit(); - - for (i = 0; i < nvqs; i++) { - virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); - } - - /* - * Set ->dataplane_started to false before draining so that host notifiers - * are not detached/attached anymore. - */ - vblk->dataplane_started = false; - - aio_context_acquire(s->ctx); - - /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */ - blk_drain(s->conf->conf.blk); - - /* - * Try to switch bs back to the QEMU main loop. If other users keep the - * BlockBackend in the iothread, that's ok - */ - blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL); - - aio_context_release(s->ctx); - - /* Clean up guest notifier (irq) */ - k->set_guest_notifiers(qbus->parent, nvqs, false); - - s->stopping = false; -} diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h deleted file mode 100644 index 5e18bb99aeb..00000000000 --- a/hw/block/dataplane/virtio-blk.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Dedicated thread for virtio-blk I/O processing - * - * Copyright 2012 IBM, Corp. - * Copyright 2012 Red Hat, Inc. and/or its affiliates - * - * Authors: - * Stefan Hajnoczi - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef HW_DATAPLANE_VIRTIO_BLK_H -#define HW_DATAPLANE_VIRTIO_BLK_H - -#include "hw/virtio/virtio.h" - -typedef struct VirtIOBlockDataPlane VirtIOBlockDataPlane; - -bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, - VirtIOBlockDataPlane **dataplane, - Error **errp); -void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s); -void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq); - -int virtio_blk_data_plane_start(VirtIODevice *vdev); -void virtio_blk_data_plane_stop(VirtIODevice *vdev); - -#endif /* HW_DATAPLANE_VIRTIO_BLK_H */ diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index c4bb28c66fe..98501e6885e 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -260,8 +260,6 @@ static void xen_block_complete_aio(void *opaque, int ret) XenBlockRequest *request = opaque; XenBlockDataPlane *dataplane = request->dataplane; - aio_context_acquire(dataplane->ctx); - if (ret != 0) { error_report("%s I/O error", request->req.operation == BLKIF_OP_READ ? @@ -273,10 +271,10 @@ static void xen_block_complete_aio(void *opaque, int ret) if (request->presync) { request->presync = 0; xen_block_do_aio(request); - goto done; + return; } if (request->aio_inflight > 0) { - goto done; + return; } switch (request->req.operation) { @@ -318,9 +316,6 @@ static void xen_block_complete_aio(void *opaque, int ret) if (dataplane->more_work) { qemu_bh_schedule(dataplane->bh); } - -done: - aio_context_release(dataplane->ctx); } static bool xen_block_split_discard(XenBlockRequest *request, @@ -601,9 +596,7 @@ static void xen_block_dataplane_bh(void *opaque) { XenBlockDataPlane *dataplane = opaque; - aio_context_acquire(dataplane->ctx); xen_block_handle_requests(dataplane); - aio_context_release(dataplane->ctx); } static bool xen_block_dataplane_event(void *opaque) @@ -703,10 +696,8 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) xen_block_dataplane_detach(dataplane); } - aio_context_acquire(dataplane->ctx); /* Xen doesn't have multiple users for nodes, so this can't fail */ blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort); - aio_context_release(dataplane->ctx); /* * Now that the context has been moved onto the main thread, cancel @@ -752,7 +743,6 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, { ERRP_GUARD(); XenDevice *xendev = dataplane->xendev; - AioContext *old_context; unsigned int ring_size; unsigned int i; @@ -836,11 +826,8 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, goto stop; } - old_context = blk_get_aio_context(dataplane->blk); - aio_context_acquire(old_context); /* If other users keep the BlockBackend in the iothread, that's ok */ blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL); - aio_context_release(old_context); if (!blk_in_drain(dataplane->blk)) { xen_block_dataplane_attach(dataplane); diff --git a/hw/block/ecc.c b/hw/block/ecc.c index 6e0d63842c1..ed889a4184f 100644 --- a/hw/block/ecc.c +++ b/hw/block/ecc.c @@ -82,7 +82,7 @@ const VMStateDescription vmstate_ecc_state = { .name = "ecc-state", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(cp, ECCState), VMSTATE_UINT16_ARRAY(lp, ECCState, 2), VMSTATE_UINT16(count, ECCState), diff --git a/hw/block/fdc-internal.h b/hw/block/fdc-internal.h index 036392e9fc1..e219623dc7a 100644 --- a/hw/block/fdc-internal.h +++ b/hw/block/fdc-internal.h @@ -25,8 +25,6 @@ #ifndef HW_BLOCK_FDC_INTERNAL_H #define HW_BLOCK_FDC_INTERNAL_H -#include "exec/memory.h" -#include "exec/ioport.h" #include "hw/block/block.h" #include "hw/block/fdc.h" #include "qapi/qapi-types-block.h" @@ -92,7 +90,6 @@ typedef struct FDrive { } FDrive; struct FDCtrl { - MemoryRegion iomem; qemu_irq irq; /* Controller state */ QEMUTimer *result_timer; @@ -140,7 +137,6 @@ struct FDCtrl { /* Timers state */ uint8_t timer0; uint8_t timer1; - PortioList portio_list; }; extern const FDFormat fd_formats[]; diff --git a/hw/block/fdc-isa.c b/hw/block/fdc-isa.c index 7ec075e470a..e43dc532af8 100644 --- a/hw/block/fdc-isa.c +++ b/hw/block/fdc-isa.c @@ -42,6 +42,7 @@ #include "sysemu/block-backend.h" #include "sysemu/blockdev.h" #include "sysemu/sysemu.h" +#include "exec/ioport.h" #include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/module.h" @@ -60,6 +61,7 @@ struct FDCtrlISABus { uint32_t irq; uint32_t dma; struct FDCtrl state; + PortioList portio_list; int32_t bootindexA; int32_t bootindexB; }; @@ -91,7 +93,7 @@ static void isabus_fdc_realize(DeviceState *dev, Error **errp) FDCtrl *fdctrl = &isa->state; Error *err = NULL; - isa_register_portio_list(isadev, &fdctrl->portio_list, + isa_register_portio_list(isadev, &isa->portio_list, isa->iobase, fdc_portio_list, fdctrl, "fdc"); @@ -190,6 +192,20 @@ static Aml *build_fdinfo_aml(int idx, FloppyDriveType type) return dev; } +void isa_fdc_set_iobase(ISADevice *fdc, hwaddr iobase) +{ + FDCtrlISABus *isa = ISA_FDC(fdc); + + fdc->ioport_id = iobase; + isa->iobase = iobase; + portio_list_set_address(&isa->portio_list, isa->iobase); +} + +void isa_fdc_set_enabled(ISADevice *fdc, bool enabled) +{ + portio_list_set_enabled(&ISA_FDC(fdc)->portio_list, enabled); +} + int cmos_get_fd_drive_type(FloppyDriveType fd0) { int val; @@ -259,7 +275,7 @@ static const VMStateDescription vmstate_isa_fdc = { .name = "fdc", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, FDCtrlISABus, 0, vmstate_fdc, FDCtrl), VMSTATE_END_OF_LIST() } diff --git a/hw/block/fdc-sysbus.c b/hw/block/fdc-sysbus.c index 86ea51d0034..035bc089752 100644 --- a/hw/block/fdc-sysbus.c +++ b/hw/block/fdc-sysbus.c @@ -26,6 +26,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qom/object.h" +#include "exec/memory.h" #include "hw/sysbus.h" #include "hw/block/fdc.h" #include "migration/vmstate.h" @@ -52,6 +53,7 @@ struct FDCtrlSysBus { /*< public >*/ struct FDCtrl state; + MemoryRegion iomem; }; static uint64_t fdctrl_read_mem(void *opaque, hwaddr reg, unsigned ize) @@ -146,11 +148,11 @@ static void sysbus_fdc_common_instance_init(Object *obj) qdev_set_legacy_instance_id(dev, 0 /* io */, 2); /* FIXME */ - memory_region_init_io(&fdctrl->iomem, obj, + memory_region_init_io(&sys->iomem, obj, sbdc->use_strict_io ? &fdctrl_mem_strict_ops : &fdctrl_mem_ops, fdctrl, "fdc", 0x08); - sysbus_init_mmio(sbd, &fdctrl->iomem); + sysbus_init_mmio(sbd, &sys->iomem); sysbus_init_irq(sbd, &fdctrl->irq); qdev_init_gpio_in(dev, fdctrl_handle_tc, 1); @@ -168,7 +170,7 @@ static const VMStateDescription vmstate_sysbus_fdc = { .name = "fdc", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, FDCtrlSysBus, 0, vmstate_fdc, FDCtrl), VMSTATE_END_OF_LIST() } diff --git a/hw/block/fdc.c b/hw/block/fdc.c index d7cc4d3ec19..6dd94e98bc3 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -854,7 +854,7 @@ static const VMStateDescription vmstate_fdrive_media_changed = { .version_id = 1, .minimum_version_id = 1, .needed = fdrive_media_changed_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(media_changed, FDrive), VMSTATE_END_OF_LIST() } @@ -864,7 +864,7 @@ static const VMStateDescription vmstate_fdrive_media_rate = { .name = "fdrive/media_rate", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(media_rate, FDrive), VMSTATE_END_OF_LIST() } @@ -882,7 +882,7 @@ static const VMStateDescription vmstate_fdrive_perpendicular = { .version_id = 1, .minimum_version_id = 1, .needed = fdrive_perpendicular_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(perpendicular, FDrive), VMSTATE_END_OF_LIST() } @@ -899,13 +899,13 @@ static const VMStateDescription vmstate_fdrive = { .version_id = 1, .minimum_version_id = 1, .post_load = fdrive_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(head, FDrive), VMSTATE_UINT8(track, FDrive), VMSTATE_UINT8(sect, FDrive), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fdrive_media_changed, &vmstate_fdrive_media_rate, &vmstate_fdrive_perpendicular, @@ -977,7 +977,7 @@ static const VMStateDescription vmstate_fdc_reset_sensei = { .version_id = 1, .minimum_version_id = 1, .needed = fdc_reset_sensei_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(reset_sensei, FDCtrl), VMSTATE_END_OF_LIST() } @@ -995,7 +995,7 @@ static const VMStateDescription vmstate_fdc_result_timer = { .version_id = 1, .minimum_version_id = 1, .needed = fdc_result_timer_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(result_timer, FDCtrl), VMSTATE_END_OF_LIST() } @@ -1013,7 +1013,7 @@ static const VMStateDescription vmstate_fdc_phase = { .version_id = 1, .minimum_version_id = 1, .needed = fdc_phase_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(phase, FDCtrl), VMSTATE_END_OF_LIST() } @@ -1026,7 +1026,7 @@ const VMStateDescription vmstate_fdc = { .pre_save = fdc_pre_save, .pre_load = fdc_pre_load, .post_load = fdc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Controller State */ VMSTATE_UINT8(sra, FDCtrl), VMSTATE_UINT8(srb, FDCtrl), @@ -1057,7 +1057,7 @@ const VMStateDescription vmstate_fdc = { vmstate_fdrive, FDrive), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fdc_reset_sensei, &vmstate_fdc_result_timer, &vmstate_fdc_phase, diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index afc3fdf4d60..8dec134832a 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -267,6 +267,9 @@ static const FlashPartInfo known_devices[] = { { INFO("mt25ql512ab", 0x20ba20, 0x1044, 64 << 10, 1024, ER_4K | ER_32K) }, { INFO_STACKED("mt35xu01g", 0x2c5b1b, 0x104100, 128 << 10, 1024, ER_4K | ER_32K, 2) }, + { INFO_STACKED("mt35xu02gbba", 0x2c5b1c, 0x104100, 128 << 10, 2048, + ER_4K | ER_32K, 4), + .sfdp_read = m25p80_sfdp_mt35xu02g }, { INFO_STACKED("n25q00", 0x20ba21, 0x1000, 64 << 10, 2048, ER_4K, 4) }, { INFO_STACKED("n25q00a", 0x20bb21, 0x1000, 64 << 10, 2048, ER_4K, 4) }, { INFO_STACKED("mt25ql01g", 0x20ba21, 0x1040, 64 << 10, 2048, ER_4K, 2) }, @@ -512,7 +515,6 @@ struct M25P80Class { FlashPartInfo *pi; }; -#define TYPE_M25P80 "m25p80-generic" OBJECT_DECLARE_TYPE(Flash, M25P80Class, M25P80) static inline Manufacturer get_man(Flash *s) @@ -1617,7 +1619,8 @@ static void m25p80_realize(SSIPeripheral *ss, Error **errp) trace_m25p80_binding(s); s->storage = blk_blockalign(s->blk, s->size); - if (!blk_check_size_and_read_all(s->blk, s->storage, s->size, errp)) { + if (!blk_check_size_and_read_all(s->blk, DEVICE(s), + s->storage, s->size, errp)) { return; } } else { @@ -1684,7 +1687,7 @@ static const VMStateDescription vmstate_m25p80_data_read_loop = { .version_id = 1, .minimum_version_id = 1, .needed = m25p80_data_read_loop_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(data_read_loop, Flash), VMSTATE_END_OF_LIST() } @@ -1702,7 +1705,7 @@ static const VMStateDescription vmstate_m25p80_aai_enable = { .version_id = 1, .minimum_version_id = 1, .needed = m25p80_aai_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(aai_enable, Flash), VMSTATE_END_OF_LIST() } @@ -1720,7 +1723,7 @@ static const VMStateDescription vmstate_m25p80_write_protect = { .version_id = 1, .minimum_version_id = 1, .needed = m25p80_wp_level_srwd_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(wp_level, Flash), VMSTATE_BOOL(status_register_write_disabled, Flash), VMSTATE_END_OF_LIST() @@ -1743,7 +1746,7 @@ static const VMStateDescription vmstate_m25p80_block_protect = { .version_id = 1, .minimum_version_id = 1, .needed = m25p80_block_protect_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(block_protect0, Flash), VMSTATE_BOOL(block_protect1, Flash), VMSTATE_BOOL(block_protect2, Flash), @@ -1759,7 +1762,7 @@ static const VMStateDescription vmstate_m25p80 = { .minimum_version_id = 0, .pre_save = m25p80_pre_save, .pre_load = m25p80_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(state, Flash), VMSTATE_UINT8_ARRAY(data, Flash, M25P80_INTERNAL_DATA_BUFFER_SZ), VMSTATE_UINT32(len, Flash), @@ -1781,7 +1784,7 @@ static const VMStateDescription vmstate_m25p80 = { VMSTATE_UINT8(spansion_cr4nv, Flash), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_m25p80_data_read_loop, &vmstate_m25p80_aai_enable, &vmstate_m25p80_write_protect, diff --git a/hw/block/m25p80_sfdp.c b/hw/block/m25p80_sfdp.c index b33811a4f5e..6ee2cfaf119 100644 --- a/hw/block/m25p80_sfdp.c +++ b/hw/block/m25p80_sfdp.c @@ -57,6 +57,42 @@ static const uint8_t sfdp_n25q256a[] = { }; define_sfdp_read(n25q256a); +static const uint8_t sfdp_mt35xu02g[] = { + 0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x01, 0xff, + 0x00, 0x06, 0x01, 0x10, 0x30, 0x00, 0x00, 0xff, + 0x84, 0x00, 0x01, 0x02, 0x80, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0x8a, 0xff, 0xff, 0xff, 0xff, 0x7f, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x00, 0x0c, 0x20, 0x11, 0xd8, + 0x0f, 0x52, 0x00, 0x00, 0x24, 0x5a, 0x99, 0x00, + 0x8b, 0x8e, 0x03, 0xe1, 0xac, 0x01, 0x27, 0x38, + 0x7a, 0x75, 0x7a, 0x75, 0xfb, 0xbd, 0xd5, 0x5c, + 0x00, 0x00, 0x70, 0xff, 0x81, 0xb0, 0x38, 0x36, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x43, 0x0e, 0xff, 0xff, 0x21, 0xdc, 0x5c, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; + +define_sfdp_read(mt35xu02g); /* * Matronix diff --git a/hw/block/m25p80_sfdp.h b/hw/block/m25p80_sfdp.h index 011a880f66a..1733b569508 100644 --- a/hw/block/m25p80_sfdp.h +++ b/hw/block/m25p80_sfdp.h @@ -16,6 +16,7 @@ #define M25P80_SFDP_MAX_SIZE (1 << 24) uint8_t m25p80_sfdp_n25q256a(uint32_t addr); +uint8_t m25p80_sfdp_mt35xu02g(uint32_t addr); uint8_t m25p80_sfdp_mx25l25635e(uint32_t addr); uint8_t m25p80_sfdp_mx25l25635f(uint32_t addr); diff --git a/hw/block/nand.c b/hw/block/nand.c index 9c1b89cfa66..e2433c25bdc 100644 --- a/hw/block/nand.c +++ b/hw/block/nand.c @@ -84,7 +84,11 @@ struct NANDFlashState { void (*blk_write)(NANDFlashState *s); void (*blk_erase)(NANDFlashState *s); - void (*blk_load)(NANDFlashState *s, uint64_t addr, int offset); + /* + * Returns %true when block containing (@addr + @offset) is + * successfully loaded, otherwise %false. + */ + bool (*blk_load)(NANDFlashState *s, uint64_t addr, unsigned offset); uint32_t ioaddr_vmstate; }; @@ -243,9 +247,30 @@ static inline void nand_pushio_byte(NANDFlashState *s, uint8_t value) } } +/* + * nand_load_block: Load block containing (s->addr + @offset). + * Returns length of data available at @offset in this block. + */ +static unsigned nand_load_block(NANDFlashState *s, unsigned offset) +{ + unsigned iolen; + + if (!s->blk_load(s, s->addr, offset)) { + return 0; + } + + iolen = (1 << s->page_shift); + if (s->gnd) { + iolen += 1 << s->oob_shift; + } + assert(offset <= iolen); + iolen -= offset; + + return iolen; +} + static void nand_command(NANDFlashState *s) { - unsigned int offset; switch (s->cmd) { case NAND_CMD_READ0: s->iolen = 0; @@ -271,12 +296,7 @@ static void nand_command(NANDFlashState *s) case NAND_CMD_NOSERIALREAD2: if (!(nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP)) break; - offset = s->addr & ((1 << s->addr_shift) - 1); - s->blk_load(s, s->addr, offset); - if (s->gnd) - s->iolen = (1 << s->page_shift) - offset; - else - s->iolen = (1 << s->page_shift) + (1 << s->oob_shift) - offset; + s->iolen = nand_load_block(s, s->addr & ((1 << s->addr_shift) - 1)); break; case NAND_CMD_RESET: @@ -345,7 +365,7 @@ static const VMStateDescription vmstate_nand = { .minimum_version_id = 1, .pre_save = nand_pre_save, .post_load = nand_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(cle, NANDFlashState), VMSTATE_UINT8(ale, NANDFlashState), VMSTATE_UINT8(ce, NANDFlashState), @@ -597,12 +617,7 @@ uint32_t nand_getio(DeviceState *dev) if (!s->iolen && s->cmd == NAND_CMD_READ0) { offset = (int) (s->addr & ((1 << s->addr_shift) - 1)) + s->offset; s->offset = 0; - - s->blk_load(s, s->addr, offset); - if (s->gnd) - s->iolen = (1 << s->page_shift) - offset; - else - s->iolen = (1 << s->page_shift) + (1 << s->oob_shift) - offset; + s->iolen = nand_load_block(s, offset); } if (s->ce || s->iolen <= 0) { @@ -763,11 +778,15 @@ static void glue(nand_blk_erase_, NAND_PAGE_SIZE)(NANDFlashState *s) } } -static void glue(nand_blk_load_, NAND_PAGE_SIZE)(NANDFlashState *s, - uint64_t addr, int offset) +static bool glue(nand_blk_load_, NAND_PAGE_SIZE)(NANDFlashState *s, + uint64_t addr, unsigned offset) { if (PAGE(addr) >= s->pages) { - return; + return false; + } + + if (offset > NAND_PAGE_SIZE + OOB_SIZE) { + return false; } if (s->blk) { @@ -795,6 +814,8 @@ static void glue(nand_blk_load_, NAND_PAGE_SIZE)(NANDFlashState *s, offset, NAND_PAGE_SIZE + OOB_SIZE - offset); s->ioaddr = s->io; } + + return true; } static void glue(nand_init_, NAND_PAGE_SIZE)(NANDFlashState *s) diff --git a/hw/block/onenand.c b/hw/block/onenand.c index 50d3d1c9856..d8a6944027a 100644 --- a/hw/block/onenand.c +++ b/hw/block/onenand.c @@ -179,7 +179,7 @@ static const VMStateDescription vmstate_onenand = { .minimum_version_id = 1, .pre_save = onenand_pre_save, .post_load = onenand_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(current_direction, OneNANDState), VMSTATE_INT32(cycle, OneNANDState), VMSTATE_INT32(otpmode, OneNANDState), diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c index 5e848a9b4c1..1bda8424b90 100644 --- a/hw/block/pflash_cfi01.c +++ b/hw/block/pflash_cfi01.c @@ -118,14 +118,14 @@ static const VMStateDescription vmstate_pflash = { .version_id = 1, .minimum_version_id = 1, .post_load = pflash_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(wcycle, PFlashCFI01), VMSTATE_UINT8(cmd, PFlashCFI01), VMSTATE_UINT8(status, PFlashCFI01), VMSTATE_UINT64(counter, PFlashCFI01), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_pflash_blk_write, NULL } @@ -848,8 +848,8 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp) } if (pfl->blk) { - if (!blk_check_size_and_read_all(pfl->blk, pfl->storage, total_len, - errp)) { + if (!blk_check_size_and_read_all(pfl->blk, dev, pfl->storage, + total_len, errp)) { vmstate_unregister_ram(&pfl->mem, DEVICE(pfl)); return; } diff --git a/hw/block/pflash_cfi02.c b/hw/block/pflash_cfi02.c index 6fa56f14c02..2314142373f 100644 --- a/hw/block/pflash_cfi02.c +++ b/hw/block/pflash_cfi02.c @@ -902,7 +902,7 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp) } if (pfl->blk) { - if (!blk_check_size_and_read_all(pfl->blk, pfl->storage, + if (!blk_check_size_and_read_all(pfl->blk, dev, pfl->storage, pfl->chip_len, errp)) { vmstate_unregister_ram(&pfl->orig_mem, DEVICE(pfl)); return; diff --git a/hw/block/swim.c b/hw/block/swim.c index fd65c59f8a1..44761c11cbc 100644 --- a/hw/block/swim.c +++ b/hw/block/swim.c @@ -516,7 +516,7 @@ static const VMStateDescription vmstate_fdrive = { .name = "fdrive", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() }, }; @@ -525,7 +525,7 @@ static const VMStateDescription vmstate_swim = { .name = "swim", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(mode, SWIMCtrl), /* IWM mode */ VMSTATE_INT32(iwm_switch, SWIMCtrl), @@ -545,7 +545,7 @@ static const VMStateDescription vmstate_swim = { static const VMStateDescription vmstate_sysbus_swim = { .name = "SWIM", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(ctrl, Swim, 0, vmstate_swim, SWIMCtrl), VMSTATE_END_OF_LIST() } diff --git a/hw/block/tc58128.c b/hw/block/tc58128.c index d350126b274..0984e37417b 100644 --- a/hw/block/tc58128.c +++ b/hw/block/tc58128.c @@ -202,6 +202,9 @@ static sh7750_io_device tc58128 = { int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2) { + if (!qtest_enabled()) { + warn_report_once("The TC58128 flash device is deprecated"); + } init_dev(&tc58128_devs[0], zone1); init_dev(&tc58128_devs[1], zone2); return sh7750_register_io_device(s, &tc58128); diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index 2863d80d150..9e6bbc6950d 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -91,7 +91,6 @@ static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config) static int vhost_user_blk_handle_config_change(struct vhost_dev *dev) { int ret; - struct virtio_blk_config blkcfg; VirtIODevice *vdev = dev->vdev; VHostUserBlk *s = VHOST_USER_BLK(dev->vdev); Error *local_err = NULL; @@ -100,19 +99,15 @@ static int vhost_user_blk_handle_config_change(struct vhost_dev *dev) return 0; } - ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg, + ret = vhost_dev_get_config(dev, (uint8_t *)&s->blkcfg, vdev->config_len, &local_err); if (ret < 0) { error_report_err(local_err); return ret; } - /* valid for resize only */ - if (blkcfg.capacity != s->blkcfg.capacity) { - s->blkcfg.capacity = blkcfg.capacity; - memcpy(dev->vdev->config, &s->blkcfg, vdev->config_len); - virtio_notify_config(dev->vdev); - } + memcpy(dev->vdev->config, &s->blkcfg, vdev->config_len); + virtio_notify_config(dev->vdev); return 0; } @@ -554,7 +549,7 @@ static const VMStateDescription vmstate_vhost_user_blk = { .name = "vhost-user-blk", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 31aac145810..bb86e65f652 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -27,7 +27,6 @@ #include "sysemu/sysemu.h" #include "sysemu/runstate.h" #include "hw/virtio/virtio-blk.h" -#include "dataplane/virtio-blk.h" #include "scsi/constants.h" #ifdef __linux__ # include @@ -38,6 +37,8 @@ #include "hw/virtio/virtio-blk-common.h" #include "qemu/coroutine.h" +static void virtio_blk_ioeventfd_attach(VirtIOBlock *s); + static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, VirtIOBlockReq *req) { @@ -66,7 +67,7 @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) iov_discard_undo(&req->outhdr_undo); virtqueue_push(req->vq, &req->elem, req->in_len); if (qemu_in_iothread()) { - virtio_blk_data_plane_notify(s->dataplane, req->vq); + virtio_notify_irqfd(vdev, req->vq); } else { virtio_notify(vdev, req->vq); } @@ -82,8 +83,11 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, /* Break the link as the next request is going to be parsed from the * ring again. Otherwise we may end up doing a double completion! */ req->mr_next = NULL; - req->next = s->rq; - s->rq = req; + + WITH_QEMU_LOCK_GUARD(&s->rq_lock) { + req->next = s->rq; + s->rq = req; + } } else if (action == BLOCK_ERROR_ACTION_REPORT) { virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); if (acct_failed) { @@ -102,7 +106,6 @@ static void virtio_blk_rw_complete(void *opaque, int ret) VirtIOBlock *s = next->dev; VirtIODevice *vdev = VIRTIO_DEVICE(s); - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); while (next) { VirtIOBlockReq *req = next; next = req->mr_next; @@ -135,7 +138,6 @@ static void virtio_blk_rw_complete(void *opaque, int ret) block_acct_done(blk_get_stats(s->blk), &req->acct); virtio_blk_free_request(req); } - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } static void virtio_blk_flush_complete(void *opaque, int ret) @@ -143,19 +145,13 @@ static void virtio_blk_flush_complete(void *opaque, int ret) VirtIOBlockReq *req = opaque; VirtIOBlock *s = req->dev; - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); - if (ret) { - if (virtio_blk_handle_rw_error(req, -ret, 0, true)) { - goto out; - } + if (ret && virtio_blk_handle_rw_error(req, -ret, 0, true)) { + return; } virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); block_acct_done(blk_get_stats(s->blk), &req->acct); virtio_blk_free_request(req); - -out: - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) @@ -165,11 +161,8 @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) & ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES; - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); - if (ret) { - if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) { - goto out; - } + if (ret && virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) { + return; } virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); @@ -177,9 +170,6 @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) block_acct_done(blk_get_stats(s->blk), &req->acct); } virtio_blk_free_request(req); - -out: - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } #ifdef __linux__ @@ -226,10 +216,8 @@ static void virtio_blk_ioctl_complete(void *opaque, int status) virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len); out: - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); virtio_blk_req_complete(req, status); virtio_blk_free_request(req); - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); g_free(ioctl_req); } @@ -669,13 +657,15 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret) { ZoneCmdData *data = opaque; VirtIOBlockReq *req = data->req; - VirtIOBlock *s = req->dev; VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); struct iovec *in_iov = data->in_iov; unsigned in_num = data->in_num; int64_t zrp_size, n, j = 0; int64_t nz = data->zone_report_data.nr_zones; int8_t err_status = VIRTIO_BLK_S_OK; + struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) { + .nr_zones = cpu_to_le64(nz), + }; trace_virtio_blk_zone_report_complete(vdev, req, nz, ret); if (ret) { @@ -683,9 +673,6 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret) goto out; } - struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) { - .nr_zones = cpu_to_le64(nz), - }; zrp_size = sizeof(struct virtio_blk_zone_report) + sizeof(struct virtio_blk_zone_descriptor) * nz; n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr)); @@ -760,10 +747,8 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret) } out: - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); virtio_blk_req_complete(req, err_status); virtio_blk_free_request(req); - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); g_free(data->zone_report_data.zones); g_free(data); } @@ -783,7 +768,8 @@ static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, sizeof(struct virtio_blk_zone_report) + sizeof(struct virtio_blk_zone_descriptor)) { virtio_error(vdev, "in buffer too small for zone report"); - return; + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + goto out; } /* start byte offset of the zone report */ @@ -826,10 +812,8 @@ static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; } - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); virtio_blk_req_complete(req, err_status); virtio_blk_free_request(req); - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) @@ -879,7 +863,6 @@ static void virtio_blk_zone_append_complete(void *opaque, int ret) { ZoneCmdData *data = opaque; VirtIOBlockReq *req = data->req; - VirtIOBlock *s = req->dev; VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); int64_t append_sector, n; uint8_t err_status = VIRTIO_BLK_S_OK; @@ -902,10 +885,8 @@ static void virtio_blk_zone_append_complete(void *opaque, int ret) trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret); out: - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); virtio_blk_req_complete(req, err_status); virtio_blk_free_request(req); - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); g_free(data); } @@ -920,13 +901,14 @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; int64_t len = iov_size(out_iov, out_num); + ZoneCmdData *data; trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS); if (!check_zoned_request(s, offset, len, true, &err_status)) { goto out; } - ZoneCmdData *data = g_malloc(sizeof(ZoneCmdData)); + data = g_malloc(sizeof(ZoneCmdData)); data->req = req; data->in_iov = in_iov; data->in_num = in_num; @@ -941,10 +923,8 @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, return 0; out: - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); virtio_blk_req_complete(req, err_status); virtio_blk_free_request(req); - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); return err_status; } @@ -1134,7 +1114,6 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) MultiReqBuffer mrb = {}; bool suppress_notifications = virtio_queue_get_notification(vq); - aio_context_acquire(blk_get_aio_context(s->blk)); defer_call_begin(); do { @@ -1160,35 +1139,32 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) } defer_call_end(); - aio_context_release(blk_get_aio_context(s->blk)); } static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBlock *s = (VirtIOBlock *)vdev; - if (s->dataplane && !s->dataplane_started) { + if (!s->ioeventfd_disabled && !s->ioeventfd_started) { /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start - * dataplane here instead of waiting for .set_status(). + * ioeventfd here instead of waiting for .set_status(). */ virtio_device_start_ioeventfd(vdev); - if (!s->dataplane_disabled) { + if (!s->ioeventfd_disabled) { return; } } + virtio_blk_handle_vq(s, vq); } static void virtio_blk_dma_restart_bh(void *opaque) { - VirtIOBlock *s = opaque; + VirtIOBlockReq *req = opaque; + VirtIOBlock *s = req->dev; /* we're called with at least one request */ - VirtIOBlockReq *req = s->rq; MultiReqBuffer mrb = {}; - s->rq = NULL; - - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); while (req) { VirtIOBlockReq *next = req->next; if (virtio_blk_handle_request(req, &mrb)) { @@ -1212,48 +1188,79 @@ static void virtio_blk_dma_restart_bh(void *opaque) /* Paired with inc in virtio_blk_dma_restart_cb() */ blk_dec_in_flight(s->conf.conf.blk); - - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } static void virtio_blk_dma_restart_cb(void *opaque, bool running, RunState state) { VirtIOBlock *s = opaque; + uint16_t num_queues = s->conf.num_queues; + g_autofree VirtIOBlockReq **vq_rq = NULL; + VirtIOBlockReq *rq; if (!running) { return; } - /* Paired with dec in virtio_blk_dma_restart_bh() */ - blk_inc_in_flight(s->conf.conf.blk); + /* Split the device-wide s->rq request list into per-vq request lists */ + vq_rq = g_new0(VirtIOBlockReq *, num_queues); + + WITH_QEMU_LOCK_GUARD(&s->rq_lock) { + rq = s->rq; + s->rq = NULL; + } + + while (rq) { + VirtIOBlockReq *next = rq->next; + uint16_t idx = virtio_get_queue_index(rq->vq); - aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk), - virtio_blk_dma_restart_bh, s); + /* Only num_queues vqs were created so vq_rq[idx] is within bounds */ + assert(idx < num_queues); + rq->next = vq_rq[idx]; + vq_rq[idx] = rq; + rq = next; + } + + /* Schedule a BH to submit the requests in each vq's AioContext */ + for (uint16_t i = 0; i < num_queues; i++) { + if (!vq_rq[i]) { + continue; + } + + /* Paired with dec in virtio_blk_dma_restart_bh() */ + blk_inc_in_flight(s->conf.conf.blk); + + aio_bh_schedule_oneshot(s->vq_aio_context[i], + virtio_blk_dma_restart_bh, + vq_rq[i]); + } } static void virtio_blk_reset(VirtIODevice *vdev) { VirtIOBlock *s = VIRTIO_BLK(vdev); - AioContext *ctx; VirtIOBlockReq *req; - ctx = blk_get_aio_context(s->blk); - aio_context_acquire(ctx); + /* Dataplane has stopped... */ + assert(!s->ioeventfd_started); + + /* ...but requests may still be in flight. */ blk_drain(s->blk); /* We drop queued requests after blk_drain() because blk_drain() itself can * produce them. */ - while (s->rq) { - req = s->rq; - s->rq = req->next; - virtqueue_detach_element(req->vq, &req->elem, 0); - virtio_blk_free_request(req); - } + WITH_QEMU_LOCK_GUARD(&s->rq_lock) { + while (s->rq) { + req = s->rq; + s->rq = req->next; - aio_context_release(ctx); + /* No other threads can access req->vq here */ + virtqueue_detach_element(req->vq, &req->elem, 0); + + virtio_blk_free_request(req); + } + } - assert(!s->dataplane_started); blk_set_enable_write_cache(s->blk, s->original_wce); } @@ -1268,10 +1275,6 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) uint64_t capacity; int64_t length; int blk_size = conf->logical_block_size; - AioContext *ctx; - - ctx = blk_get_aio_context(s->blk); - aio_context_acquire(ctx); blk_get_geometry(s->blk, &capacity); memset(&blkcfg, 0, sizeof(blkcfg)); @@ -1295,7 +1298,6 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) * per track (cylinder). */ length = blk_getlength(s->blk); - aio_context_release(ctx); if (length > 0 && length / conf->heads / conf->secs % blk_size) { blkcfg.geometry.sectors = conf->secs & ~s->sector_mask; } else { @@ -1362,9 +1364,7 @@ static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) memcpy(&blkcfg, config, s->config_size); - aio_context_acquire(blk_get_aio_context(s->blk)); blk_set_enable_write_cache(s->blk, blkcfg.wce != 0); - aio_context_release(blk_get_aio_context(s->blk)); } static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features, @@ -1409,7 +1409,7 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) VirtIOBlock *s = VIRTIO_BLK(vdev); if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) { - assert(!s->dataplane_started); + assert(!s->ioeventfd_started); } if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { @@ -1432,29 +1432,31 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) * s->blk would erroneously be placed in writethrough mode. */ if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) { - aio_context_acquire(blk_get_aio_context(s->blk)); blk_set_enable_write_cache(s->blk, virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_WCE)); - aio_context_release(blk_get_aio_context(s->blk)); } } static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f) { VirtIOBlock *s = VIRTIO_BLK(vdev); - VirtIOBlockReq *req = s->rq; - while (req) { - qemu_put_sbyte(f, 1); + WITH_QEMU_LOCK_GUARD(&s->rq_lock) { + VirtIOBlockReq *req = s->rq; - if (s->conf.num_queues > 1) { - qemu_put_be32(f, virtio_get_queue_index(req->vq)); - } + while (req) { + qemu_put_sbyte(f, 1); + + if (s->conf.num_queues > 1) { + qemu_put_be32(f, virtio_get_queue_index(req->vq)); + } - qemu_put_virtqueue_element(vdev, f, &req->elem); - req = req->next; + qemu_put_virtqueue_element(vdev, f, &req->elem); + req = req->next; + } } + qemu_put_sbyte(f, 0); } @@ -1480,8 +1482,11 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq)); virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req); - req->next = s->rq; - s->rq = req; + + WITH_QEMU_LOCK_GUARD(&s->rq_lock) { + req->next = s->rq; + s->rq = req; + } } return 0; @@ -1500,44 +1505,50 @@ static void virtio_blk_resize(void *opaque) VirtIODevice *vdev = VIRTIO_DEVICE(opaque); /* - * virtio_notify_config() needs to acquire the global mutex, + * virtio_notify_config() needs to acquire the BQL, * so it can't be called from an iothread. Instead, schedule * it to be run in the main context BH. */ aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev); } -/* Suspend virtqueue ioeventfd processing during drain */ -static void virtio_blk_drained_begin(void *opaque) +static void virtio_blk_ioeventfd_detach(VirtIOBlock *s) { - VirtIOBlock *s = opaque; - VirtIODevice *vdev = VIRTIO_DEVICE(opaque); - AioContext *ctx = blk_get_aio_context(s->conf.conf.blk); + VirtIODevice *vdev = VIRTIO_DEVICE(s); - if (!s->dataplane || !s->dataplane_started) { - return; + for (uint16_t i = 0; i < s->conf.num_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]); } +} + +static void virtio_blk_ioeventfd_attach(VirtIOBlock *s) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(s); for (uint16_t i = 0; i < s->conf.num_queues; i++) { VirtQueue *vq = virtio_get_queue(vdev, i); - virtio_queue_aio_detach_host_notifier(vq, ctx); + virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]); } } -/* Resume virtqueue ioeventfd processing after drain */ -static void virtio_blk_drained_end(void *opaque) +/* Suspend virtqueue ioeventfd processing during drain */ +static void virtio_blk_drained_begin(void *opaque) { VirtIOBlock *s = opaque; - VirtIODevice *vdev = VIRTIO_DEVICE(opaque); - AioContext *ctx = blk_get_aio_context(s->conf.conf.blk); - if (!s->dataplane || !s->dataplane_started) { - return; + if (s->ioeventfd_started) { + virtio_blk_ioeventfd_detach(s); } +} - for (uint16_t i = 0; i < s->conf.num_queues; i++) { - VirtQueue *vq = virtio_get_queue(vdev, i); - virtio_queue_aio_attach_host_notifier(vq, ctx); +/* Resume virtqueue ioeventfd processing after drain */ +static void virtio_blk_drained_end(void *opaque) +{ + VirtIOBlock *s = opaque; + + if (s->ioeventfd_started) { + virtio_blk_ioeventfd_attach(s); } } @@ -1547,11 +1558,413 @@ static const BlockDevOps virtio_block_ops = { .drained_end = virtio_blk_drained_end, }; +static bool +validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list, + uint16_t num_queues, Error **errp) +{ + g_autofree unsigned long *vqs = bitmap_new(num_queues); + g_autoptr(GHashTable) iothreads = + g_hash_table_new(g_str_hash, g_str_equal); + + for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) { + const char *name = node->value->iothread; + uint16List *vq; + + if (!iothread_by_id(name)) { + error_setg(errp, "IOThread \"%s\" object does not exist", name); + return false; + } + + if (!g_hash_table_add(iothreads, (gpointer)name)) { + error_setg(errp, + "duplicate IOThread name \"%s\" in iothread-vq-mapping", + name); + return false; + } + + if (node != list) { + if (!!node->value->vqs != !!list->value->vqs) { + error_setg(errp, "either all items in iothread-vq-mapping " + "must have vqs or none of them must have it"); + return false; + } + } + + for (vq = node->value->vqs; vq; vq = vq->next) { + if (vq->value >= num_queues) { + error_setg(errp, "vq index %u for IOThread \"%s\" must be " + "less than num_queues %u in iothread-vq-mapping", + vq->value, name, num_queues); + return false; + } + + if (test_and_set_bit(vq->value, vqs)) { + error_setg(errp, "cannot assign vq %u to IOThread \"%s\" " + "because it is already assigned", vq->value, name); + return false; + } + } + } + + if (list->value->vqs) { + for (uint16_t i = 0; i < num_queues; i++) { + if (!test_bit(i, vqs)) { + error_setg(errp, + "missing vq %u IOThread assignment in iothread-vq-mapping", + i); + return false; + } + } + } + + return true; +} + +/** + * apply_iothread_vq_mapping: + * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads. + * @vq_aio_context: The array of AioContext pointers to fill in. + * @num_queues: The length of @vq_aio_context. + * @errp: If an error occurs, a pointer to the area to store the error. + * + * Fill in the AioContext for each virtqueue in the @vq_aio_context array given + * the iothread-vq-mapping parameter in @iothread_vq_mapping_list. + * + * Returns: %true on success, %false on failure. + **/ +static bool apply_iothread_vq_mapping( + IOThreadVirtQueueMappingList *iothread_vq_mapping_list, + AioContext **vq_aio_context, + uint16_t num_queues, + Error **errp) +{ + IOThreadVirtQueueMappingList *node; + size_t num_iothreads = 0; + size_t cur_iothread = 0; + + if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list, + num_queues, errp)) { + return false; + } + + for (node = iothread_vq_mapping_list; node; node = node->next) { + num_iothreads++; + } + + for (node = iothread_vq_mapping_list; node; node = node->next) { + IOThread *iothread = iothread_by_id(node->value->iothread); + AioContext *ctx = iothread_get_aio_context(iothread); + + /* Released in virtio_blk_vq_aio_context_cleanup() */ + object_ref(OBJECT(iothread)); + + if (node->value->vqs) { + uint16List *vq; + + /* Explicit vq:IOThread assignment */ + for (vq = node->value->vqs; vq; vq = vq->next) { + assert(vq->value < num_queues); + vq_aio_context[vq->value] = ctx; + } + } else { + /* Round-robin vq:IOThread assignment */ + for (unsigned i = cur_iothread; i < num_queues; + i += num_iothreads) { + vq_aio_context[i] = ctx; + } + } + + cur_iothread++; + } + + return true; +} + +/* Context: BQL held */ +static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp) +{ + ERRP_GUARD(); + VirtIODevice *vdev = VIRTIO_DEVICE(s); + VirtIOBlkConf *conf = &s->conf; + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + if (conf->iothread && conf->iothread_vq_mapping_list) { + error_setg(errp, + "iothread and iothread-vq-mapping properties cannot be set " + "at the same time"); + return false; + } + + if (conf->iothread || conf->iothread_vq_mapping_list) { + if (!k->set_guest_notifiers || !k->ioeventfd_assign) { + error_setg(errp, + "device is incompatible with iothread " + "(transport does not support notifiers)"); + return false; + } + if (!virtio_device_ioeventfd_enabled(vdev)) { + error_setg(errp, "ioeventfd is required for iothread"); + return false; + } + + /* + * If ioeventfd is (re-)enabled while the guest is running there could + * be block jobs that can conflict. + */ + if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { + error_prepend(errp, "cannot start virtio-blk ioeventfd: "); + return false; + } + } + + s->vq_aio_context = g_new(AioContext *, conf->num_queues); + + if (conf->iothread_vq_mapping_list) { + if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list, + s->vq_aio_context, + conf->num_queues, + errp)) { + g_free(s->vq_aio_context); + s->vq_aio_context = NULL; + return false; + } + } else if (conf->iothread) { + AioContext *ctx = iothread_get_aio_context(conf->iothread); + for (unsigned i = 0; i < conf->num_queues; i++) { + s->vq_aio_context[i] = ctx; + } + + /* Released in virtio_blk_vq_aio_context_cleanup() */ + object_ref(OBJECT(conf->iothread)); + } else { + AioContext *ctx = qemu_get_aio_context(); + for (unsigned i = 0; i < conf->num_queues; i++) { + s->vq_aio_context[i] = ctx; + } + } + + return true; +} + +/* Context: BQL held */ +static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock *s) +{ + VirtIOBlkConf *conf = &s->conf; + + assert(!s->ioeventfd_started); + + if (conf->iothread_vq_mapping_list) { + IOThreadVirtQueueMappingList *node; + + for (node = conf->iothread_vq_mapping_list; node; node = node->next) { + IOThread *iothread = iothread_by_id(node->value->iothread); + object_unref(OBJECT(iothread)); + } + } + + if (conf->iothread) { + object_unref(OBJECT(conf->iothread)); + } + + g_free(s->vq_aio_context); + s->vq_aio_context = NULL; +} + +/* Context: BQL held */ +static int virtio_blk_start_ioeventfd(VirtIODevice *vdev) +{ + VirtIOBlock *s = VIRTIO_BLK(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + unsigned i; + unsigned nvqs = s->conf.num_queues; + Error *local_err = NULL; + int r; + + if (s->ioeventfd_started || s->ioeventfd_starting) { + return 0; + } + + s->ioeventfd_starting = true; + + /* Set up guest notifier (irq) */ + r = k->set_guest_notifiers(qbus->parent, nvqs, true); + if (r != 0) { + error_report("virtio-blk failed to set guest notifier (%d), " + "ensure -accel kvm is set.", r); + goto fail_guest_notifiers; + } + + /* + * Batch all the host notifiers in a single transaction to avoid + * quadratic time complexity in address_space_update_ioeventfds(). + */ + memory_region_transaction_begin(); + + /* Set up virtqueue notify */ + for (i = 0; i < nvqs; i++) { + r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true); + if (r != 0) { + int j = i; + + fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); + while (i--) { + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); + } + + /* + * The transaction expects the ioeventfds to be open when it + * commits. Do it now, before the cleanup loop. + */ + memory_region_transaction_commit(); + + while (j--) { + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), j); + } + goto fail_host_notifiers; + } + } + + memory_region_transaction_commit(); + + /* + * Try to change the AioContext so that block jobs and other operations can + * co-locate their activity in the same AioContext. If it fails, nevermind. + */ + assert(nvqs > 0); /* enforced during ->realize() */ + r = blk_set_aio_context(s->conf.conf.blk, s->vq_aio_context[0], + &local_err); + if (r < 0) { + warn_report_err(local_err); + } + + /* + * These fields must be visible to the IOThread when it processes the + * virtqueue, otherwise it will think ioeventfd has not started yet. + * + * Make sure ->ioeventfd_started is false when blk_set_aio_context() is + * called above so that draining does not cause the host notifier to be + * detached/attached prematurely. + */ + s->ioeventfd_starting = false; + s->ioeventfd_started = true; + smp_wmb(); /* paired with aio_notify_accept() on the read side */ + + /* + * Get this show started by hooking up our callbacks. If drained now, + * virtio_blk_drained_end() will do this later. + * Attaching the notifier also kicks the virtqueues, processing any requests + * they may already have. + */ + if (!blk_in_drain(s->conf.conf.blk)) { + virtio_blk_ioeventfd_attach(s); + } + return 0; + + fail_host_notifiers: + k->set_guest_notifiers(qbus->parent, nvqs, false); + fail_guest_notifiers: + s->ioeventfd_disabled = true; + s->ioeventfd_starting = false; + return -ENOSYS; +} + +/* Stop notifications for new requests from guest. + * + * Context: BH in IOThread + */ +static void virtio_blk_ioeventfd_stop_vq_bh(void *opaque) +{ + VirtQueue *vq = opaque; + EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq); + + virtio_queue_aio_detach_host_notifier(vq, qemu_get_current_aio_context()); + + /* + * Test and clear notifier after disabling event, in case poll callback + * didn't have time to run. + */ + virtio_queue_host_notifier_read(host_notifier); +} + +/* Context: BQL held */ +static void virtio_blk_stop_ioeventfd(VirtIODevice *vdev) +{ + VirtIOBlock *s = VIRTIO_BLK(vdev); + BusState *qbus = qdev_get_parent_bus(DEVICE(s)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + unsigned i; + unsigned nvqs = s->conf.num_queues; + + if (!s->ioeventfd_started || s->ioeventfd_stopping) { + return; + } + + /* Better luck next time. */ + if (s->ioeventfd_disabled) { + s->ioeventfd_disabled = false; + s->ioeventfd_started = false; + return; + } + s->ioeventfd_stopping = true; + + if (!blk_in_drain(s->conf.conf.blk)) { + for (i = 0; i < nvqs; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + AioContext *ctx = s->vq_aio_context[i]; + + aio_wait_bh_oneshot(ctx, virtio_blk_ioeventfd_stop_vq_bh, vq); + } + } + + /* + * Batch all the host notifiers in a single transaction to avoid + * quadratic time complexity in address_space_update_ioeventfds(). + */ + memory_region_transaction_begin(); + + for (i = 0; i < nvqs; i++) { + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); + } + + /* + * The transaction expects the ioeventfds to be open when it + * commits. Do it now, before the cleanup loop. + */ + memory_region_transaction_commit(); + + for (i = 0; i < nvqs; i++) { + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); + } + + /* + * Set ->ioeventfd_started to false before draining so that host notifiers + * are not detached/attached anymore. + */ + s->ioeventfd_started = false; + + /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */ + blk_drain(s->conf.conf.blk); + + /* + * Try to switch bs back to the QEMU main loop. If other users keep the + * BlockBackend in the iothread, that's ok + */ + blk_set_aio_context(s->conf.conf.blk, qemu_get_aio_context(), NULL); + + /* Clean up guest notifier (irq) */ + k->set_guest_notifiers(qbus->parent, nvqs, false); + + s->ioeventfd_stopping = false; +} + static void virtio_blk_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOBlock *s = VIRTIO_BLK(dev); VirtIOBlkConf *conf = &s->conf; + BlockDriverState *bs; Error *err = NULL; unsigned i; @@ -1597,7 +2010,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) return; } - BlockDriverState *bs = blk_bs(conf->conf.blk); + bs = blk_bs(conf->conf.blk); if (bs->bl.zoned != BLK_Z_NONE) { virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED); if (bs->bl.zoned == BLK_Z_HM) { @@ -1628,6 +2041,8 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) s->host_features); virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size); + qemu_mutex_init(&s->rq_lock); + s->blk = conf->conf.blk; s->rq = NULL; s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1; @@ -1636,7 +2051,13 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output); } qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2); - virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err); + + /* Don't start ioeventfd if transport does not support notifiers. */ + if (!virtio_device_ioeventfd_enabled(vdev)) { + s->ioeventfd_disabled = true; + } + + virtio_blk_vq_aio_context_init(s, &err); if (err != NULL) { error_propagate(errp, err); for (i = 0; i < conf->num_queues; i++) { @@ -1673,12 +2094,12 @@ static void virtio_blk_device_unrealize(DeviceState *dev) blk_drain(s->blk); del_boot_device_lchs(dev, "/disk@0,0"); - virtio_blk_data_plane_destroy(s->dataplane); - s->dataplane = NULL; + virtio_blk_vq_aio_context_cleanup(s); for (i = 0; i < conf->num_queues; i++) { virtio_del_queue(vdev, i); } qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2); + qemu_mutex_destroy(&s->rq_lock); blk_ram_registrar_destroy(&s->blk_ram_registrar); qemu_del_vm_change_state_handler(s->change); blockdev_mark_auto_del(s->blk); @@ -1698,7 +2119,7 @@ static const VMStateDescription vmstate_virtio_blk = { .name = "virtio-blk", .minimum_version_id = 2, .version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, @@ -1723,6 +2144,8 @@ static Property virtio_blk_properties[] = { DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true), DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD, IOThread *), + DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock, + conf.iothread_vq_mapping_list), DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features, VIRTIO_BLK_F_DISCARD, true), DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock, @@ -1755,8 +2178,8 @@ static void virtio_blk_class_init(ObjectClass *klass, void *data) vdc->reset = virtio_blk_reset; vdc->save = virtio_blk_save_device; vdc->load = virtio_blk_load_device; - vdc->start_ioeventfd = virtio_blk_data_plane_start; - vdc->stop_ioeventfd = virtio_blk_data_plane_stop; + vdc->start_ioeventfd = virtio_blk_start_ioeventfd; + vdc->stop_ioeventfd = virtio_blk_stop_ioeventfd; } static const TypeInfo virtio_blk_info = { diff --git a/hw/char/bcm2835_aux.c b/hw/char/bcm2835_aux.c index 96410b1ff86..83990e20f76 100644 --- a/hw/char/bcm2835_aux.c +++ b/hw/char/bcm2835_aux.c @@ -260,7 +260,7 @@ static const VMStateDescription vmstate_bcm2835_aux = { .name = TYPE_BCM2835_AUX, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(read_fifo, BCM2835AuxState, BCM2835_AUX_RX_FIFO_LEN), VMSTATE_UINT8(read_pos, BCM2835AuxState), diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c index a2ac062b1ee..db31d7cc859 100644 --- a/hw/char/cadence_uart.c +++ b/hw/char/cadence_uart.c @@ -602,7 +602,7 @@ static const VMStateDescription vmstate_cadence_uart = { .minimum_version_id = 2, .pre_load = cadence_uart_pre_load, .post_load = cadence_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(r, CadenceUARTState, CADENCE_UART_R_MAX), VMSTATE_UINT8_ARRAY(rx_fifo, CadenceUARTState, CADENCE_UART_RX_FIFO_SIZE), diff --git a/hw/char/cmsdk-apb-uart.c b/hw/char/cmsdk-apb-uart.c index d466cd93de4..d07cca1bd42 100644 --- a/hw/char/cmsdk-apb-uart.c +++ b/hw/char/cmsdk-apb-uart.c @@ -366,7 +366,7 @@ static const VMStateDescription cmsdk_apb_uart_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = cmsdk_apb_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(state, CMSDKAPBUART), VMSTATE_UINT32(ctrl, CMSDKAPBUART), VMSTATE_UINT32(intstatus, CMSDKAPBUART), diff --git a/hw/char/digic-uart.c b/hw/char/digic-uart.c index 51d4e7db52f..ef2d7627262 100644 --- a/hw/char/digic-uart.c +++ b/hw/char/digic-uart.c @@ -165,7 +165,7 @@ static const VMStateDescription vmstate_digic_uart = { .name = "digic-uart", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(reg_rx, DigicUartState), VMSTATE_UINT32(reg_st, DigicUartState), VMSTATE_END_OF_LIST() diff --git a/hw/char/escc.c b/hw/char/escc.c index 48b30ee760a..d450d70eda1 100644 --- a/hw/char/escc.c +++ b/hw/char/escc.c @@ -766,7 +766,7 @@ static const VMStateDescription vmstate_escc_chn = { .name = "escc_chn", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vmstate_dummy, ESCCChannelState), VMSTATE_UINT32(reg, ESCCChannelState), VMSTATE_UINT32(rxint, ESCCChannelState), @@ -785,7 +785,7 @@ static const VMStateDescription vmstate_escc = { .name = "escc", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(chn, ESCCState, 2, 2, vmstate_escc_chn, ESCCChannelState), VMSTATE_END_OF_LIST() diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c index 7b7c56b6ef4..8cdd42e54fd 100644 --- a/hw/char/exynos4210_uart.c +++ b/hw/char/exynos4210_uart.c @@ -628,7 +628,7 @@ static const VMStateDescription vmstate_exynos4210_uart_fifo = { .name = "exynos4210.uart.fifo", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(sp, Exynos4210UartFIFO), VMSTATE_UINT32(rp, Exynos4210UartFIFO), VMSTATE_VBUFFER_UINT32(data, Exynos4210UartFIFO, 1, NULL, size), @@ -641,7 +641,7 @@ static const VMStateDescription vmstate_exynos4210_uart = { .version_id = 1, .minimum_version_id = 1, .post_load = exynos4210_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(rx, Exynos4210UartState, 1, vmstate_exynos4210_uart_fifo, Exynos4210UartFIFO), VMSTATE_UINT32_ARRAY(reg, Exynos4210UartState, diff --git a/hw/char/goldfish_tty.c b/hw/char/goldfish_tty.c index 20b77885c18..f8ff043c396 100644 --- a/hw/char/goldfish_tty.c +++ b/hw/char/goldfish_tty.c @@ -232,7 +232,7 @@ static const VMStateDescription vmstate_goldfish_tty = { .name = "goldfish_tty", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(data_len, GoldfishTTYState), VMSTATE_UINT64(data_ptr, GoldfishTTYState), VMSTATE_BOOL(int_enabled, GoldfishTTYState), diff --git a/hw/char/grlib_apbuart.c b/hw/char/grlib_apbuart.c index 82ff40a530a..515b65bc070 100644 --- a/hw/char/grlib_apbuart.c +++ b/hw/char/grlib_apbuart.c @@ -1,7 +1,9 @@ /* * QEMU GRLIB APB UART Emulator * - * Copyright (c) 2010-2019 AdaCore + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2010-2024 AdaCore * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -26,7 +28,7 @@ #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" -#include "hw/sparc/grlib.h" +#include "hw/char/grlib_uart.h" #include "hw/sysbus.h" #include "qemu/module.h" #include "chardev/char-fe.h" diff --git a/hw/char/ibex_uart.c b/hw/char/ibex_uart.c index 51708c08363..63aae6dc2c7 100644 --- a/hw/char/ibex_uart.c +++ b/hw/char/ibex_uart.c @@ -488,7 +488,7 @@ static const VMStateDescription vmstate_ibex_uart = { .version_id = 1, .minimum_version_id = 1, .post_load = ibex_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(tx_fifo, IbexUartState, IBEX_UART_TX_FIFO_SIZE), VMSTATE_UINT32(tx_level, IbexUartState), diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c index 377d1d97730..ba37be6faab 100644 --- a/hw/char/imx_serial.c +++ b/hw/char/imx_serial.c @@ -26,6 +26,7 @@ #include "migration/vmstate.h" #include "qemu/log.h" #include "qemu/module.h" +#include "qemu/fifo32.h" #ifndef DEBUG_IMX_UART #define DEBUG_IMX_UART 0 @@ -41,10 +42,11 @@ static const VMStateDescription vmstate_imx_serial = { .name = TYPE_IMX_SERIAL, - .version_id = 2, - .minimum_version_id = 2, - .fields = (VMStateField[]) { - VMSTATE_INT32(readbuff, IMXSerialState), + .version_id = 3, + .minimum_version_id = 3, + .fields = (const VMStateField[]) { + VMSTATE_FIFO32(rx_fifo, IMXSerialState), + VMSTATE_TIMER(ageing_timer, IMXSerialState), VMSTATE_UINT32(usr1, IMXSerialState), VMSTATE_UINT32(usr2, IMXSerialState), VMSTATE_UINT32(ucr1, IMXSerialState), @@ -71,6 +73,10 @@ static void imx_update(IMXSerialState *s) * following: */ usr1 = s->usr1 & s->ucr1 & (USR1_TRDY | USR1_RRDY); + /* + * Interrupt if AGTIM is set (ageing timer interrupt in RxFIFO) + */ + usr1 |= (s->ucr2 & UCR2_ATEN) ? (s->usr1 & USR1_AGTIM) : 0; /* * Bits that we want in USR2 are not as conveniently laid out, * unfortunately. @@ -78,15 +84,66 @@ static void imx_update(IMXSerialState *s) mask = (s->ucr1 & UCR1_TXMPTYEN) ? USR2_TXFE : 0; /* * TCEN and TXDC are both bit 3 + * ORE and OREN are both bit 1 * RDR and DREN are both bit 0 */ - mask |= s->ucr4 & (UCR4_WKEN | UCR4_TCEN | UCR4_DREN); + mask |= s->ucr4 & (UCR4_WKEN | UCR4_TCEN | UCR4_DREN | UCR4_OREN); usr2 = s->usr2 & mask; qemu_set_irq(s->irq, usr1 || usr2); } +static void imx_serial_rx_fifo_push(IMXSerialState *s, uint32_t value) +{ + uint32_t pushed_value = value; + if (fifo32_is_full(&s->rx_fifo)) { + /* Set ORE if FIFO is already full */ + s->usr2 |= USR2_ORE; + } else { + if (fifo32_num_used(&s->rx_fifo) == FIFO_SIZE - 1) { + /* Set OVRRUN on 32nd character in FIFO */ + pushed_value |= URXD_ERR | URXD_OVRRUN; + } + fifo32_push(&s->rx_fifo, pushed_value); + } +} + +static uint32_t imx_serial_rx_fifo_pop(IMXSerialState *s) +{ + if (fifo32_is_empty(&s->rx_fifo)) { + return 0; + } + return fifo32_pop(&s->rx_fifo); +} + +static void imx_serial_rx_fifo_ageing_timer_int(void *opaque) +{ + IMXSerialState *s = (IMXSerialState *) opaque; + s->usr1 |= USR1_AGTIM; + imx_update(s); +} + +static void imx_serial_rx_fifo_ageing_timer_restart(void *opaque) +{ + /* + * Ageing timer starts ticking when + * RX FIFO is non empty and below trigger level. + * Timer is reset if new character is received or + * a FIFO read occurs. + * Timer triggers an interrupt when duration of + * 8 characters has passed (assuming 115200 baudrate). + */ + IMXSerialState *s = (IMXSerialState *) opaque; + + if (!(s->usr1 & USR1_RRDY) && !(s->uts1 & UTS1_RXEMPTY)) { + timer_mod_ns(&s->ageing_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + AGE_DURATION_NS); + } else { + timer_del(&s->ageing_timer); + } +} + static void imx_serial_reset(IMXSerialState *s) { @@ -102,7 +159,9 @@ static void imx_serial_reset(IMXSerialState *s) s->ucr3 = 0x700; s->ubmr = 0; s->ubrc = 4; - s->readbuff = URXD_ERR; + + fifo32_reset(&s->rx_fifo); + timer_del(&s->ageing_timer); } static void imx_serial_reset_at_boot(DeviceState *dev) @@ -125,20 +184,28 @@ static uint64_t imx_serial_read(void *opaque, hwaddr offset, unsigned size) { IMXSerialState *s = (IMXSerialState *)opaque; - uint32_t c; + uint32_t c, rx_used; + uint8_t rxtl = s->ufcr & TL_MASK; DPRINTF("read(offset=0x%" HWADDR_PRIx ")\n", offset); switch (offset >> 2) { case 0x0: /* URXD */ - c = s->readbuff; + c = imx_serial_rx_fifo_pop(s); if (!(s->uts1 & UTS1_RXEMPTY)) { /* Character is valid */ c |= URXD_CHARRDY; - s->usr1 &= ~USR1_RRDY; - s->usr2 &= ~USR2_RDR; - s->uts1 |= UTS1_RXEMPTY; + rx_used = fifo32_num_used(&s->rx_fifo); + /* Clear RRDY if below threshold */ + if (rx_used < rxtl) { + s->usr1 &= ~USR1_RRDY; + } + if (rx_used == 0) { + s->usr2 &= ~USR2_RDR; + s->uts1 |= UTS1_RXEMPTY; + } imx_update(s); + imx_serial_rx_fifo_ageing_timer_restart(s); qemu_chr_fe_accept_input(&s->chr); } return c; @@ -300,19 +367,24 @@ static void imx_serial_write(void *opaque, hwaddr offset, static int imx_can_receive(void *opaque) { IMXSerialState *s = (IMXSerialState *)opaque; - return !(s->usr1 & USR1_RRDY); + return s->ucr2 & UCR2_RXEN && fifo32_num_used(&s->rx_fifo) < FIFO_SIZE; } static void imx_put_data(void *opaque, uint32_t value) { IMXSerialState *s = (IMXSerialState *)opaque; + uint8_t rxtl = s->ufcr & TL_MASK; DPRINTF("received char\n"); + imx_serial_rx_fifo_push(s, value); + if (fifo32_num_used(&s->rx_fifo) >= rxtl) { + s->usr1 |= USR1_RRDY; + } + + imx_serial_rx_fifo_ageing_timer_restart(s); - s->usr1 |= USR1_RRDY; s->usr2 |= USR2_RDR; s->uts1 &= ~UTS1_RXEMPTY; - s->readbuff = value; if (value & URXD_BRK) { s->usr2 |= USR2_BRCD; } @@ -345,6 +417,10 @@ static void imx_serial_realize(DeviceState *dev, Error **errp) { IMXSerialState *s = IMX_SERIAL(dev); + fifo32_create(&s->rx_fifo, FIFO_SIZE); + timer_init_ns(&s->ageing_timer, QEMU_CLOCK_VIRTUAL, + imx_serial_rx_fifo_ageing_timer_int, s); + DPRINTF("char dev for uart: %p\n", qemu_chr_fe_get_driver(&s->chr)); qemu_chr_fe_set_handlers(&s->chr, imx_can_receive, imx_receive, diff --git a/hw/char/ipoctal232.c b/hw/char/ipoctal232.c index 3311e0872c2..64be5226d4b 100644 --- a/hw/char/ipoctal232.c +++ b/hw/char/ipoctal232.c @@ -130,7 +130,7 @@ static const VMStateDescription vmstate_scc2698_channel = { .name = "scc2698_channel", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(rx_enabled, SCC2698Channel), VMSTATE_UINT8_ARRAY(mr, SCC2698Channel, 2), VMSTATE_UINT8(mr_idx, SCC2698Channel), @@ -146,7 +146,7 @@ static const VMStateDescription vmstate_scc2698_block = { .name = "scc2698_block", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(imr, SCC2698Block), VMSTATE_UINT8(isr, SCC2698Block), VMSTATE_END_OF_LIST() @@ -157,7 +157,7 @@ static const VMStateDescription vmstate_ipoctal = { .name = "ipoctal232", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_IPACK_DEVICE(parent_obj, IPOctalState), VMSTATE_STRUCT_ARRAY(ch, IPOctalState, N_CHANNELS, 1, vmstate_scc2698_channel, SCC2698Channel), diff --git a/hw/char/mchp_pfsoc_mmuart.c b/hw/char/mchp_pfsoc_mmuart.c index 22f3e78eb9e..e7908bbfb5d 100644 --- a/hw/char/mchp_pfsoc_mmuart.c +++ b/hw/char/mchp_pfsoc_mmuart.c @@ -114,7 +114,7 @@ static const VMStateDescription mchp_pfsoc_mmuart_vmstate = { .name = "mchp.pfsoc.uart", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, MchpPfSoCMMUartState, MCHP_PFSOC_MMUART_REG_COUNT), VMSTATE_END_OF_LIST() diff --git a/hw/char/nrf51_uart.c b/hw/char/nrf51_uart.c index dfe2276d711..c2cd6bb5e71 100644 --- a/hw/char/nrf51_uart.c +++ b/hw/char/nrf51_uart.c @@ -291,7 +291,7 @@ static int nrf51_uart_post_load(void *opaque, int version_id) static const VMStateDescription nrf51_uart_vmstate = { .name = "nrf51_soc.uart", .post_load = nrf51_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, NRF51UARTState, 0x56C), VMSTATE_UINT8_ARRAY(rx_fifo, NRF51UARTState, UART_FIFO_LENGTH), VMSTATE_UINT32(rx_fifo_pos, NRF51UARTState), diff --git a/hw/char/parallel-isa.c b/hw/char/parallel-isa.c index ab0f879998d..a5ce6ee13a4 100644 --- a/hw/char/parallel-isa.c +++ b/hw/char/parallel-isa.c @@ -41,3 +41,17 @@ void parallel_hds_isa_init(ISABus *bus, int n) } } } + +void isa_parallel_set_iobase(ISADevice *parallel, hwaddr iobase) +{ + ISAParallelState *s = ISA_PARALLEL(parallel); + + parallel->ioport_id = iobase; + s->iobase = iobase; + portio_list_set_address(&s->portio_list, s->iobase); +} + +void isa_parallel_set_enabled(ISADevice *parallel, bool enabled) +{ + portio_list_set_enabled(&ISA_PARALLEL(parallel)->portio_list, enabled); +} diff --git a/hw/char/parallel.c b/hw/char/parallel.c index 147c900f0d6..c394635ada2 100644 --- a/hw/char/parallel.c +++ b/hw/char/parallel.c @@ -478,7 +478,7 @@ static const VMStateDescription vmstate_parallel_isa = { .name = "parallel_isa", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(state.dataw, ISAParallelState), VMSTATE_UINT8(state.datar, ISAParallelState), VMSTATE_UINT8(state.status, ISAParallelState), @@ -532,7 +532,7 @@ static void parallel_isa_realizefn(DeviceState *dev, Error **errp) s->status = dummy; } - isa_register_portio_list(isadev, &s->portio_list, base, + isa_register_portio_list(isadev, &isa->portio_list, base, (s->hw_driver ? &isa_parallel_portio_hw_list[0] : &isa_parallel_portio_sw_list[0]), diff --git a/hw/char/pl011.c b/hw/char/pl011.c index 58edeb9ddb6..8753b84a842 100644 --- a/hw/char/pl011.c +++ b/hw/char/pl011.c @@ -49,10 +49,14 @@ DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr) } /* Flag Register, UARTFR */ +#define PL011_FLAG_RI 0x100 #define PL011_FLAG_TXFE 0x80 #define PL011_FLAG_RXFF 0x40 #define PL011_FLAG_TXFF 0x20 #define PL011_FLAG_RXFE 0x10 +#define PL011_FLAG_DCD 0x04 +#define PL011_FLAG_DSR 0x02 +#define PL011_FLAG_CTS 0x01 /* Data Register, UARTDR */ #define DR_BE (1 << 10) @@ -76,6 +80,13 @@ DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr) #define LCR_FEN (1 << 4) #define LCR_BRK (1 << 0) +/* Control Register, UARTCR */ +#define CR_OUT2 (1 << 13) +#define CR_OUT1 (1 << 12) +#define CR_RTS (1 << 11) +#define CR_DTR (1 << 10) +#define CR_LBE (1 << 7) + static const unsigned char pl011_id_arm[8] = { 0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; static const unsigned char pl011_id_luminary[8] = @@ -251,6 +262,89 @@ static void pl011_trace_baudrate_change(const PL011State *s) s->ibrd, s->fbrd); } +static bool pl011_loopback_enabled(PL011State *s) +{ + return !!(s->cr & CR_LBE); +} + +static void pl011_loopback_mdmctrl(PL011State *s) +{ + uint32_t cr, fr, il; + + if (!pl011_loopback_enabled(s)) { + return; + } + + /* + * Loopback software-driven modem control outputs to modem status inputs: + * FR.RI <= CR.Out2 + * FR.DCD <= CR.Out1 + * FR.CTS <= CR.RTS + * FR.DSR <= CR.DTR + * + * The loopback happens immediately even if this call is triggered + * by setting only CR.LBE. + * + * CTS/RTS updates due to enabled hardware flow controls are not + * dealt with here. + */ + cr = s->cr; + fr = s->flags & ~(PL011_FLAG_RI | PL011_FLAG_DCD | + PL011_FLAG_DSR | PL011_FLAG_CTS); + fr |= (cr & CR_OUT2) ? PL011_FLAG_RI : 0; + fr |= (cr & CR_OUT1) ? PL011_FLAG_DCD : 0; + fr |= (cr & CR_RTS) ? PL011_FLAG_CTS : 0; + fr |= (cr & CR_DTR) ? PL011_FLAG_DSR : 0; + + /* Change interrupts based on updated FR */ + il = s->int_level & ~(INT_DSR | INT_DCD | INT_CTS | INT_RI); + il |= (fr & PL011_FLAG_DSR) ? INT_DSR : 0; + il |= (fr & PL011_FLAG_DCD) ? INT_DCD : 0; + il |= (fr & PL011_FLAG_CTS) ? INT_CTS : 0; + il |= (fr & PL011_FLAG_RI) ? INT_RI : 0; + + s->flags = fr; + s->int_level = il; + pl011_update(s); +} + +static void pl011_put_fifo(void *opaque, uint32_t value); + +static void pl011_loopback_tx(PL011State *s, uint32_t value) +{ + if (!pl011_loopback_enabled(s)) { + return; + } + + /* + * Caveat: + * + * In real hardware, TX loopback happens at the serial-bit level + * and then reassembled by the RX logics back into bytes and placed + * into the RX fifo. That is, loopback happens after TX fifo. + * + * Because the real hardware TX fifo is time-drained at the frame + * rate governed by the configured serial format, some loopback + * bytes in TX fifo may still be able to get into the RX fifo + * that could be full at times while being drained at software + * pace. + * + * In such scenario, the RX draining pace is the major factor + * deciding which loopback bytes get into the RX fifo, unless + * hardware flow-control is enabled. + * + * For simplicity, the above described is not emulated. + */ + pl011_put_fifo(s, value); +} + +static void pl011_loopback_break(PL011State *s, int brk_enable) +{ + if (brk_enable) { + pl011_loopback_tx(s, DR_BE); + } +} + static void pl011_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { @@ -266,6 +360,7 @@ static void pl011_write(void *opaque, hwaddr offset, /* XXX this blocks entire thread. Rewrite to use * qemu_chr_fe_write and background I/O callbacks */ qemu_chr_fe_write_all(&s->chr, &ch, 1); + pl011_loopback_tx(s, ch); s->int_level |= INT_TX; pl011_update(s); break; @@ -295,13 +390,15 @@ static void pl011_write(void *opaque, hwaddr offset, int break_enable = value & LCR_BRK; qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_BREAK, &break_enable); + pl011_loopback_break(s, break_enable); } s->lcr = value; pl011_set_read_trigger(s); break; case 12: /* UARTCR */ - /* ??? Need to implement the enable and loopback bits. */ + /* ??? Need to implement the enable bit. */ s->cr = value; + pl011_loopback_mdmctrl(s); break; case 13: /* UARTIFS */ s->ifl = value; @@ -361,12 +458,21 @@ static void pl011_put_fifo(void *opaque, uint32_t value) static void pl011_receive(void *opaque, const uint8_t *buf, int size) { + /* + * In loopback mode, the RX input signal is internally disconnected + * from the entire receiving logics; thus, all inputs are ignored, + * and BREAK detection on RX input signal is also not performed. + */ + if (pl011_loopback_enabled(opaque)) { + return; + } + pl011_put_fifo(opaque, *buf); } static void pl011_event(void *opaque, QEMUChrEvent event) { - if (event == CHR_EVENT_BREAK) { + if (event == CHR_EVENT_BREAK && !pl011_loopback_enabled(opaque)) { pl011_put_fifo(opaque, DR_BE); } } @@ -398,7 +504,7 @@ static const VMStateDescription vmstate_pl011_clock = { .version_id = 1, .minimum_version_id = 1, .needed = pl011_clock_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clk, PL011State), VMSTATE_END_OF_LIST() } @@ -433,7 +539,7 @@ static const VMStateDescription vmstate_pl011 = { .version_id = 2, .minimum_version_id = 2, .post_load = pl011_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(readbuff, PL011State), VMSTATE_UINT32(flags, PL011State), VMSTATE_UINT32(lcr, PL011State), @@ -452,7 +558,7 @@ static const VMStateDescription vmstate_pl011 = { VMSTATE_INT32(read_trigger, PL011State), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_pl011_clock, NULL } diff --git a/hw/char/renesas_sci.c b/hw/char/renesas_sci.c index 1c634672905..5cb733545c4 100644 --- a/hw/char/renesas_sci.c +++ b/hw/char/renesas_sci.c @@ -302,7 +302,7 @@ static const VMStateDescription vmstate_rsci = { .name = "renesas-sci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(trtime, RSCIState), VMSTATE_INT64(rx_next, RSCIState), VMSTATE_UINT8(smr, RSCIState), diff --git a/hw/char/sclpconsole-lm.c b/hw/char/sclpconsole-lm.c index b9e9b2d4535..7719f438f68 100644 --- a/hw/char/sclpconsole-lm.c +++ b/hw/char/sclpconsole-lm.c @@ -292,7 +292,7 @@ static const VMStateDescription vmstate_sclplmconsole = { .name = "sclplmconsole", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(event.event_pending, SCLPConsoleLM), VMSTATE_UINT32(write_errors, SCLPConsoleLM), VMSTATE_UINT32(length, SCLPConsoleLM), diff --git a/hw/char/sclpconsole.c b/hw/char/sclpconsole.c index c36b5722224..5d630b04bb9 100644 --- a/hw/char/sclpconsole.c +++ b/hw/char/sclpconsole.c @@ -206,7 +206,7 @@ static const VMStateDescription vmstate_sclpconsole = { .name = "sclpconsole", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(event.event_pending, SCLPConsole), VMSTATE_UINT8_ARRAY(iov, SCLPConsole, SIZE_BUFFER_VT220), VMSTATE_UINT32(iov_sclp, SCLPConsole), diff --git a/hw/char/serial-isa.c b/hw/char/serial-isa.c index 141a6cb1684..329b352b9a8 100644 --- a/hw/char/serial-isa.c +++ b/hw/char/serial-isa.c @@ -106,7 +106,7 @@ static const VMStateDescription vmstate_isa_serial = { .name = "serial", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, ISASerialState, 0, vmstate_serial, SerialState), VMSTATE_END_OF_LIST() } @@ -184,3 +184,17 @@ void serial_hds_isa_init(ISABus *bus, int from, int to) } } } + +void isa_serial_set_iobase(ISADevice *serial, hwaddr iobase) +{ + ISASerialState *s = ISA_SERIAL(serial); + + serial->ioport_id = iobase; + s->iobase = iobase; + memory_region_set_address(&s->state.io, s->iobase); +} + +void isa_serial_set_enabled(ISADevice *serial, bool enabled) +{ + memory_region_set_enabled(&ISA_SERIAL(serial)->state.io, enabled); +} diff --git a/hw/char/serial-pci-multi.c b/hw/char/serial-pci-multi.c index 5d65c534cb5..28b275709af 100644 --- a/hw/char/serial-pci-multi.c +++ b/hw/char/serial-pci-multi.c @@ -123,7 +123,7 @@ static const VMStateDescription vmstate_pci_multi_serial = { .name = "pci-serial-multi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCIMultiSerialState), VMSTATE_STRUCT_ARRAY(state, PCIMultiSerialState, PCI_SERIAL_MAX_PORTS, 0, vmstate_serial, SerialState), diff --git a/hw/char/serial-pci.c b/hw/char/serial-pci.c index 087da3059a4..f8a1a94d0c2 100644 --- a/hw/char/serial-pci.c +++ b/hw/char/serial-pci.c @@ -74,7 +74,7 @@ static const VMStateDescription vmstate_pci_serial = { .name = "pci-serial", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCISerialState), VMSTATE_STRUCT(state, PCISerialState, 0, vmstate_serial, SerialState), VMSTATE_END_OF_LIST() diff --git a/hw/char/serial.c b/hw/char/serial.c index a32eb25f581..d8b2db50829 100644 --- a/hw/char/serial.c +++ b/hw/char/serial.c @@ -707,7 +707,7 @@ static const VMStateDescription vmstate_serial_thr_ipending = { .version_id = 1, .minimum_version_id = 1, .needed = serial_thr_ipending_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(thr_ipending, SerialState), VMSTATE_END_OF_LIST() } @@ -724,7 +724,7 @@ static const VMStateDescription vmstate_serial_tsr = { .version_id = 1, .minimum_version_id = 1, .needed = serial_tsr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tsr_retry, SerialState), VMSTATE_UINT8(thr, SerialState), VMSTATE_UINT8(tsr, SerialState), @@ -744,7 +744,7 @@ static const VMStateDescription vmstate_serial_recv_fifo = { .version_id = 1, .minimum_version_id = 1, .needed = serial_recv_fifo_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(recv_fifo, SerialState, 1, vmstate_fifo8, Fifo8), VMSTATE_END_OF_LIST() } @@ -761,7 +761,7 @@ static const VMStateDescription vmstate_serial_xmit_fifo = { .version_id = 1, .minimum_version_id = 1, .needed = serial_xmit_fifo_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(xmit_fifo, SerialState, 1, vmstate_fifo8, Fifo8), VMSTATE_END_OF_LIST() } @@ -778,7 +778,7 @@ static const VMStateDescription vmstate_serial_fifo_timeout_timer = { .version_id = 1, .minimum_version_id = 1, .needed = serial_fifo_timeout_timer_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(fifo_timeout_timer, SerialState), VMSTATE_END_OF_LIST() } @@ -795,7 +795,7 @@ static const VMStateDescription vmstate_serial_timeout_ipending = { .version_id = 1, .minimum_version_id = 1, .needed = serial_timeout_ipending_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(timeout_ipending, SerialState), VMSTATE_END_OF_LIST() } @@ -812,7 +812,7 @@ static const VMStateDescription vmstate_serial_poll = { .version_id = 1, .needed = serial_poll_needed, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(poll_msl, SerialState), VMSTATE_TIMER_PTR(modem_status_poll, SerialState), VMSTATE_END_OF_LIST() @@ -826,7 +826,7 @@ const VMStateDescription vmstate_serial = { .pre_save = serial_pre_save, .pre_load = serial_pre_load, .post_load = serial_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_V(divider, SerialState, 2), VMSTATE_UINT8(rbr, SerialState), VMSTATE_UINT8(ier, SerialState), @@ -839,7 +839,7 @@ const VMStateDescription vmstate_serial = { VMSTATE_UINT8_V(fcr_vmstate, SerialState, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_serial_thr_ipending, &vmstate_serial_tsr, &vmstate_serial_recv_fifo, @@ -1056,7 +1056,7 @@ static const VMStateDescription vmstate_serial_mm = { .name = "serial", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(serial, SerialMM, 0, vmstate_serial, SerialState), VMSTATE_END_OF_LIST() } diff --git a/hw/char/sifive_uart.c b/hw/char/sifive_uart.c index f2684e57bcc..e8716c42523 100644 --- a/hw/char/sifive_uart.c +++ b/hw/char/sifive_uart.c @@ -224,7 +224,7 @@ static const VMStateDescription vmstate_sifive_uart = { .name = TYPE_SIFIVE_UART, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(rx_fifo, SiFiveUARTState, SIFIVE_UART_RX_FIFO_SIZE), VMSTATE_UINT8(rx_fifo_len, SiFiveUARTState), diff --git a/hw/char/spapr_vty.c b/hw/char/spapr_vty.c index 91eae1a5988..3e23d9cbab3 100644 --- a/hw/char/spapr_vty.c +++ b/hw/char/spapr_vty.c @@ -173,7 +173,7 @@ static const VMStateDescription vmstate_spapr_vty = { .name = "spapr_vty", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SPAPR_VIO(sdev, SpaprVioVty), VMSTATE_UINT32(in, SpaprVioVty), diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index dd619f0731e..2094d213cdf 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -985,8 +985,7 @@ static void virtser_port_device_realize(DeviceState *dev, Error **errp) return; } - port->bh = qemu_bh_new_guarded(flush_queued_data_bh, port, - &dev->mem_reentrancy_guard); + port->bh = virtio_bh_new_guarded(dev, flush_queued_data_bh, port); port->elem = NULL; } @@ -1148,7 +1147,7 @@ static const VMStateDescription vmstate_virtio_console = { .name = "virtio-console", .minimum_version_id = 3, .version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c index 5cbee2f184d..683c92aca1c 100644 --- a/hw/char/xen_console.c +++ b/hw/char/xen_console.c @@ -206,6 +206,7 @@ static bool con_event(void *_xendev) static bool xen_console_connect(XenDevice *xendev, Error **errp) { + ERRP_GUARD(); XenConsole *con = XEN_CONSOLE_DEVICE(xendev); unsigned int port, limit; diff --git a/hw/core/bus.c b/hw/core/bus.c index c7831b5293b..b9d89495cdf 100644 --- a/hw/core/bus.c +++ b/hw/core/bus.c @@ -232,57 +232,6 @@ static char *default_bus_get_fw_dev_path(DeviceState *dev) return g_strdup(object_get_typename(OBJECT(dev))); } -/** - * bus_phases_reset: - * Transition reset method for buses to allow moving - * smoothly from legacy reset method to multi-phases - */ -static void bus_phases_reset(BusState *bus) -{ - ResettableClass *rc = RESETTABLE_GET_CLASS(bus); - - if (rc->phases.enter) { - rc->phases.enter(OBJECT(bus), RESET_TYPE_COLD); - } - if (rc->phases.hold) { - rc->phases.hold(OBJECT(bus)); - } - if (rc->phases.exit) { - rc->phases.exit(OBJECT(bus)); - } -} - -static void bus_transitional_reset(Object *obj) -{ - BusClass *bc = BUS_GET_CLASS(obj); - - /* - * This will call either @bus_phases_reset (for multi-phases transitioned - * buses) or a bus's specific method for not-yet transitioned buses. - * In both case, it does not reset children. - */ - if (bc->reset) { - bc->reset(BUS(obj)); - } -} - -/** - * bus_get_transitional_reset: - * check if the bus's class is ready for multi-phase - */ -static ResettableTrFunction bus_get_transitional_reset(Object *obj) -{ - BusClass *dc = BUS_GET_CLASS(obj); - if (dc->reset != bus_phases_reset) { - /* - * dc->reset has been overridden by a subclass, - * the bus is not ready for multi phase yet. - */ - return bus_transitional_reset; - } - return NULL; -} - static void bus_class_init(ObjectClass *class, void *data) { BusClass *bc = BUS_CLASS(class); @@ -293,22 +242,6 @@ static void bus_class_init(ObjectClass *class, void *data) rc->get_state = bus_get_reset_state; rc->child_foreach = bus_reset_child_foreach; - - /* - * @bus_phases_reset is put as the default reset method below, allowing - * to do the multi-phase transition from base classes to leaf classes. It - * allows a legacy-reset Bus class to extend a multi-phases-reset - * Bus class for the following reason: - * + If a base class B has been moved to multi-phase, then it does not - * override this default reset method and may have defined phase methods. - * + A child class C (extending class B) which uses - * bus_class_set_parent_reset() (or similar means) to override the - * reset method will still work as expected. @bus_phases_reset function - * will be registered as the parent reset method and effectively call - * parent reset phases. - */ - bc->reset = bus_phases_reset; - rc->get_transitional_function = bus_get_transitional_reset; } static void qbus_finalize(Object *obj) diff --git a/hw/core/clock-vmstate.c b/hw/core/clock-vmstate.c index 7eccb6d4eaa..e831fc596f8 100644 --- a/hw/core/clock-vmstate.c +++ b/hw/core/clock-vmstate.c @@ -41,7 +41,7 @@ const VMStateDescription vmstate_muldiv = { .version_id = 1, .minimum_version_id = 1, .needed = muldiv_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(multiplier, Clock), VMSTATE_UINT32(divider, Clock), VMSTATE_END_OF_LIST() @@ -53,11 +53,11 @@ const VMStateDescription vmstate_clock = { .version_id = 0, .minimum_version_id = 0, .pre_load = clock_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(period, Clock), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_muldiv, NULL }, diff --git a/hw/core/clock.c b/hw/core/clock.c index d82e44cd1aa..a19c7db7df9 100644 --- a/hw/core/clock.c +++ b/hw/core/clock.c @@ -143,14 +143,20 @@ char *clock_display_freq(Clock *clk) return freq_to_str(clock_get_hz(clk)); } -void clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider) +bool clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider) { assert(divider != 0); + if (clk->multiplier == multiplier && clk->divider == divider) { + return false; + } + trace_clock_set_mul_div(CLOCK_PATH(clk), clk->multiplier, multiplier, clk->divider, divider); clk->multiplier = multiplier; clk->divider = divider; + + return true; } static void clock_initfn(Object *obj) diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index 82dae51a550..4bd9c70a83f 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -22,13 +22,10 @@ #include "qapi/error.h" #include "hw/core/cpu.h" #include "sysemu/hw_accel.h" -#include "qemu/notify.h" #include "qemu/log.h" #include "qemu/main-loop.h" #include "exec/log.h" -#include "exec/cpu-common.h" -#include "qemu/error-report.h" -#include "qemu/qemu-print.h" +#include "exec/gdbstub.h" #include "sysemu/tcg.h" #include "hw/boards.h" #include "hw/qdev-properties.h" @@ -70,14 +67,14 @@ CPUState *cpu_create(const char *typename) * BQL here if we need to. cpu_interrupt assumes it is held.*/ void cpu_reset_interrupt(CPUState *cpu, int mask) { - bool need_lock = !qemu_mutex_iothread_locked(); + bool need_lock = !bql_locked(); if (need_lock) { - qemu_mutex_lock_iothread(); + bql_lock(); } cpu->interrupt_request &= ~mask; if (need_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -154,10 +151,12 @@ ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) assert(cc->class_by_name); assert(cpu_model); oc = cc->class_by_name(cpu_model); - if (oc == NULL || object_class_is_abstract(oc)) { - return NULL; + if (object_class_dynamic_cast(oc, typename) && + !object_class_is_abstract(oc)) { + return oc; } - return oc; + + return NULL; } static void cpu_common_parse_features(const char *typename, char *features, @@ -191,6 +190,13 @@ static void cpu_common_parse_features(const char *typename, char *features, } } +#ifdef CONFIG_PLUGIN +static void qemu_plugin_vcpu_init__async(CPUState *cpu, run_on_cpu_data unused) +{ + qemu_plugin_vcpu_init_hook(cpu); +} +#endif + static void cpu_common_realizefn(DeviceState *dev, Error **errp) { CPUState *cpu = CPU(dev); @@ -214,10 +220,13 @@ static void cpu_common_realizefn(DeviceState *dev, Error **errp) cpu_resume(cpu); } - /* Plugin initialization must wait until the cpu is fully realized. */ + /* Plugin initialization must wait until the cpu start executing code */ +#ifdef CONFIG_PLUGIN if (tcg_enabled()) { - qemu_plugin_vcpu_init_hook(cpu); + cpu->plugin_state = qemu_plugin_create_vcpu_state(); + async_run_on_cpu(cpu, qemu_plugin_vcpu_init__async, RUN_ON_CPU_NULL); } +#endif /* NOTE: latest generic point where the cpu is fully realized */ } @@ -238,11 +247,10 @@ static void cpu_common_unrealizefn(DeviceState *dev) static void cpu_common_initfn(Object *obj) { CPUState *cpu = CPU(obj); - CPUClass *cc = CPU_GET_CLASS(obj); + gdb_init_cpu(cpu); cpu->cpu_index = UNASSIGNED_CPU_INDEX; cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX; - cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs; /* user-mode doesn't have configurable SMP topology */ /* the default value is changed by qemu_init_vcpu() for system-mode */ cpu->nr_cores = 1; @@ -262,6 +270,7 @@ static void cpu_common_finalize(Object *obj) { CPUState *cpu = CPU(obj); + g_array_free(cpu->gdb_regs, TRUE); qemu_lockcnt_destroy(&cpu->in_ioctl_lock); qemu_mutex_destroy(&cpu->work_mutex); } @@ -271,7 +280,7 @@ static int64_t cpu_common_get_arch_id(CPUState *cpu) return cpu->cpu_index; } -static void cpu_class_init(ObjectClass *klass, void *data) +static void cpu_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -302,7 +311,7 @@ static const TypeInfo cpu_type_info = { .instance_finalize = cpu_common_finalize, .abstract = true, .class_size = sizeof(CPUClass), - .class_init = cpu_class_init, + .class_init = cpu_common_class_init, }; static void cpu_register_types(void) diff --git a/hw/core/loader-fit.c b/hw/core/loader-fit.c index b7c7b3ba94d..9f20007dbb5 100644 --- a/hw/core/loader-fit.c +++ b/hw/core/loader-fit.c @@ -120,6 +120,7 @@ static int fit_load_kernel(const struct fit_loader *ldr, const void *itb, int cfg, void *opaque, hwaddr *pend, Error **errp) { + ERRP_GUARD(); const char *name; const void *data; const void *load_data; @@ -178,6 +179,7 @@ static int fit_load_fdt(const struct fit_loader *ldr, const void *itb, int cfg, void *opaque, const void *match_data, hwaddr kernel_end, Error **errp) { + ERRP_GUARD(); Error *err = NULL; const char *name; const void *data; diff --git a/hw/core/loader.c b/hw/core/loader.c index e7a9b3775bb..b8e52f3fb0f 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -62,7 +62,7 @@ #include "hw/boards.h" #include "qemu/cutils.h" #include "sysemu/runstate.h" -#include "accel/tcg/debuginfo.h" +#include "tcg/debuginfo.h" #include diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c index 3860a50c3b7..4b72009cd3c 100644 --- a/hw/core/machine-qmp-cmds.c +++ b/hw/core/machine-qmp-cmds.c @@ -19,7 +19,6 @@ #include "qapi/qmp/qobject.h" #include "qapi/qobject-input-visitor.h" #include "qapi/type-helpers.h" -#include "qemu/main-loop.h" #include "qemu/uuid.h" #include "qom/qom-qobject.h" #include "sysemu/hostmem.h" diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c index 25019c91ee3..27864c95076 100644 --- a/hw/core/machine-smp.c +++ b/hw/core/machine-smp.c @@ -91,6 +91,7 @@ void machine_parse_smp_config(MachineState *ms, unsigned cores = config->has_cores ? config->cores : 0; unsigned threads = config->has_threads ? config->threads : 0; unsigned maxcpus = config->has_maxcpus ? config->maxcpus : 0; + unsigned total_cpus; /* * Specified CPU topology parameters must be greater than zero, @@ -105,36 +106,68 @@ void machine_parse_smp_config(MachineState *ms, (config->has_cores && config->cores == 0) || (config->has_threads && config->threads == 0) || (config->has_maxcpus && config->maxcpus == 0)) { - warn_report("Deprecated CPU topology (considered invalid): " - "CPU topology parameters must be greater than zero"); + error_setg(errp, "Invalid CPU topology: " + "CPU topology parameters must be greater than zero"); + return; } /* * If not supported by the machine, a topology parameter must be - * omitted or specified equal to 1. + * omitted. */ - if (!mc->smp_props.dies_supported && dies > 1) { - error_setg(errp, "dies not supported by this machine's CPU topology"); - return; - } - if (!mc->smp_props.clusters_supported && clusters > 1) { - error_setg(errp, "clusters not supported by this machine's CPU topology"); - return; + if (!mc->smp_props.clusters_supported && config->has_clusters) { + if (config->clusters > 1) { + error_setg(errp, "clusters not supported by this " + "machine's CPU topology"); + return; + } else { + /* Here clusters only equals 1 since we've checked zero case. */ + warn_report("Deprecated CPU topology (considered invalid): " + "Unsupported clusters parameter mustn't be " + "specified as 1"); + } } + clusters = clusters > 0 ? clusters : 1; + if (!mc->smp_props.dies_supported && config->has_dies) { + if (config->dies > 1) { + error_setg(errp, "dies not supported by this " + "machine's CPU topology"); + return; + } else { + /* Here dies only equals 1 since we've checked zero case. */ + warn_report("Deprecated CPU topology (considered invalid): " + "Unsupported dies parameter mustn't be " + "specified as 1"); + } + } dies = dies > 0 ? dies : 1; - clusters = clusters > 0 ? clusters : 1; - if (!mc->smp_props.books_supported && books > 1) { - error_setg(errp, "books not supported by this machine's CPU topology"); - return; + if (!mc->smp_props.books_supported && config->has_books) { + if (config->books > 1) { + error_setg(errp, "books not supported by this " + "machine's CPU topology"); + return; + } else { + /* Here books only equals 1 since we've checked zero case. */ + warn_report("Deprecated CPU topology (considered invalid): " + "Unsupported books parameter mustn't be " + "specified as 1"); + } } books = books > 0 ? books : 1; - if (!mc->smp_props.drawers_supported && drawers > 1) { - error_setg(errp, - "drawers not supported by this machine's CPU topology"); - return; + if (!mc->smp_props.drawers_supported && config->has_drawers) { + if (config->drawers > 1) { + error_setg(errp, "drawers not supported by this " + "machine's CPU topology"); + return; + } else { + /* Here drawers only equals 1 since we've checked zero case. */ + warn_report("Deprecated CPU topology (considered invalid): " + "Unsupported drawers parameter mustn't be " + "specified as 1"); + } } drawers = drawers > 0 ? drawers : 1; @@ -179,8 +212,8 @@ void machine_parse_smp_config(MachineState *ms, } } - maxcpus = maxcpus > 0 ? maxcpus : drawers * books * sockets * dies * - clusters * cores * threads; + total_cpus = drawers * books * sockets * dies * clusters * cores * threads; + maxcpus = maxcpus > 0 ? maxcpus : total_cpus; cpus = cpus > 0 ? cpus : maxcpus; ms->smp.cpus = cpus; @@ -196,8 +229,7 @@ void machine_parse_smp_config(MachineState *ms, mc->smp_props.has_clusters = config->has_clusters; /* sanity-check of the computed topology */ - if (drawers * books * sockets * dies * clusters * cores * threads != - maxcpus) { + if (total_cpus != maxcpus) { g_autofree char *topo_msg = cpu_hierarchy_to_string(ms); error_setg(errp, "Invalid CPU topology: " "product of the hierarchy must match maxcpus: " diff --git a/hw/core/machine.c b/hw/core/machine.c index 0c173981412..37ede0e7d4f 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -30,8 +30,16 @@ #include "exec/confidential-guest-support.h" #include "hw/virtio/virtio-pci.h" #include "hw/virtio/virtio-net.h" +#include "hw/virtio/virtio-iommu.h" #include "audio/audio.h" +GlobalProperty hw_compat_8_2[] = { + { "migration", "zero-page-detection", "legacy"}, + { TYPE_VIRTIO_IOMMU_PCI, "granule", "4k" }, + { TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "64" }, +}; +const size_t hw_compat_8_2_len = G_N_ELEMENTS(hw_compat_8_2); + GlobalProperty hw_compat_8_1[] = { { TYPE_PCI_BRIDGE, "x-pci-express-writeable-slt-bug", "true" }, { "ramfb", "x-migrate", "off" }, @@ -97,6 +105,7 @@ GlobalProperty hw_compat_5_2[] = { { "PIIX4_PM", "smm-compat", "on"}, { "virtio-blk-device", "report-discard-granularity", "off" }, { "virtio-net-pci-base", "vectors", "3"}, + { "nvme", "msix-exclusive-bar", "on"}, }; const size_t hw_compat_5_2_len = G_N_ELEMENTS(hw_compat_5_2); @@ -713,7 +722,7 @@ HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine) mc->possible_cpu_arch_ids(machine); for (i = 0; i < machine->possible_cpus->len; i++) { - Object *cpu; + CPUState *cpu; HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1); cpu_item->type = g_strdup(machine->possible_cpus->cpus[i].type); @@ -723,7 +732,7 @@ HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine) cpu = machine->possible_cpus->cpus[i].cpu; if (cpu) { - cpu_item->qom_path = object_get_canonical_path(cpu); + cpu_item->qom_path = object_get_canonical_path(OBJECT(cpu)); } QAPI_LIST_PREPEND(head, cpu_item); } @@ -1309,7 +1318,7 @@ static void validate_cpu_cluster_to_numa_boundary(MachineState *ms) const CPUArchId *cpus = possible_cpus->cpus; int i, j; - if (state->num_nodes <= 1 || possible_cpus->len <= 1) { + if (qtest_enabled() || state->num_nodes <= 1 || possible_cpus->len <= 1) { return; } @@ -1387,13 +1396,74 @@ static bool create_default_memdev(MachineState *ms, const char *path, Error **er return r; } +const char *machine_class_default_cpu_type(MachineClass *mc) +{ + if (mc->valid_cpu_types && !mc->valid_cpu_types[1]) { + /* Only a single CPU type allowed: use it as default. */ + return mc->valid_cpu_types[0]; + } + return mc->default_cpu_type; +} + +static bool is_cpu_type_supported(const MachineState *machine, Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + ObjectClass *oc = object_class_by_name(machine->cpu_type); + CPUClass *cc; + int i; + + /* + * Check if the user specified CPU type is supported when the valid + * CPU types have been determined. Note that the user specified CPU + * type is provided through '-cpu' option. + */ + if (mc->valid_cpu_types) { + assert(mc->valid_cpu_types[0] != NULL); + for (i = 0; mc->valid_cpu_types[i]; i++) { + if (object_class_dynamic_cast(oc, mc->valid_cpu_types[i])) { + break; + } + } + + /* The user specified CPU type isn't valid */ + if (!mc->valid_cpu_types[i]) { + g_autofree char *requested = cpu_model_from_type(machine->cpu_type); + error_setg(errp, "Invalid CPU model: %s", requested); + if (!mc->valid_cpu_types[1]) { + g_autofree char *model = cpu_model_from_type( + mc->valid_cpu_types[0]); + error_append_hint(errp, "The only valid type is: %s\n", model); + } else { + error_append_hint(errp, "The valid models are: "); + for (i = 0; mc->valid_cpu_types[i]; i++) { + g_autofree char *model = cpu_model_from_type( + mc->valid_cpu_types[i]); + error_append_hint(errp, "%s%s", + model, + mc->valid_cpu_types[i + 1] ? ", " : ""); + } + error_append_hint(errp, "\n"); + } + + return false; + } + } + + /* Check if CPU type is deprecated and warn if so */ + cc = CPU_CLASS(oc); + assert(cc != NULL); + if (cc->deprecation_note) { + warn_report("CPU model %s is deprecated -- %s", + machine->cpu_type, cc->deprecation_note); + } + + return true; +} void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp) { ERRP_GUARD(); MachineClass *machine_class = MACHINE_GET_CLASS(machine); - ObjectClass *oc = object_class_by_name(machine->cpu_type); - CPUClass *cc; /* This checkpoint is required by replay to separate prior clock reading from the other reads, because timer polling functions query @@ -1448,41 +1518,9 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error * machine->ram = machine_consume_memdev(machine, machine->memdev); } - /* If the machine supports the valid_cpu_types check and the user - * specified a CPU with -cpu check here that the user CPU is supported. - */ - if (machine_class->valid_cpu_types && machine->cpu_type) { - int i; - - for (i = 0; machine_class->valid_cpu_types[i]; i++) { - if (object_class_dynamic_cast(oc, - machine_class->valid_cpu_types[i])) { - /* The user specified CPU is in the valid field, we are - * good to go. - */ - break; - } - } - - if (!machine_class->valid_cpu_types[i]) { - /* The user specified CPU is not valid */ - error_report("Invalid CPU type: %s", machine->cpu_type); - error_printf("The valid types are: %s", - machine_class->valid_cpu_types[0]); - for (i = 1; machine_class->valid_cpu_types[i]; i++) { - error_printf(", %s", machine_class->valid_cpu_types[i]); - } - error_printf("\n"); - - exit(1); - } - } - - /* Check if CPU type is deprecated and warn if so */ - cc = CPU_CLASS(oc); - if (cc && cc->deprecation_note) { - warn_report("CPU model %s is deprecated -- %s", machine->cpu_type, - cc->deprecation_note); + /* Check if the CPU type is supported */ + if (machine->cpu_type && !is_cpu_type_supported(machine, errp)) { + return; } if (machine->cgs) { @@ -1545,14 +1583,13 @@ void qdev_machine_creation_done(void) /* TODO: once all bus devices are qdevified, this should be done * when bus is created by qdev.c */ /* - * TODO: If we had a main 'reset container' that the whole system - * lived in, we could reset that using the multi-phase reset - * APIs. For the moment, we just reset the sysbus, which will cause + * This is where we arrange for the sysbus to be reset when the + * whole simulation is reset. In turn, resetting the sysbus will cause * all devices hanging off it (and all their child buses, recursively) * to be reset. Note that this will *not* reset any Device objects * which are not attached to some part of the qbus tree! */ - qemu_register_reset(resettable_cold_reset_fn, sysbus_get_default()); + qemu_register_resettable(OBJECT(sysbus_get_default())); notifier_list_notify(&machine_init_done_notifiers, NULL); diff --git a/hw/core/meson.build b/hw/core/meson.build index 67dad04de55..e26f2e088c3 100644 --- a/hw/core/meson.build +++ b/hw/core/meson.build @@ -4,6 +4,7 @@ hwcore_ss.add(files( 'qdev-properties.c', 'qdev.c', 'reset.c', + 'resetcontainer.c', 'resettable.c', 'vmstate-if.c', # irq.c needed for qdev GPIO handling: diff --git a/hw/core/numa.c b/hw/core/numa.c index f08956ddb0f..f8ce332cfe9 100644 --- a/hw/core/numa.c +++ b/hw/core/numa.c @@ -28,7 +28,6 @@ #include "sysemu/numa.h" #include "exec/cpu-common.h" #include "exec/ramlist.h" -#include "qemu/bitmap.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/opts-visitor.h" @@ -36,7 +35,6 @@ #include "sysemu/qtest.h" #include "hw/core/cpu.h" #include "hw/mem/pc-dimm.h" -#include "migration/vmstate.h" #include "hw/boards.h" #include "hw/mem/memory-device.h" #include "qemu/option.h" @@ -229,7 +227,8 @@ void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node, node->target, numa_state->num_nodes); return; } - if (!numa_info[node->initiator].has_cpu) { + if (!numa_info[node->initiator].has_cpu && + !numa_info[node->initiator].has_gi) { error_setg(errp, "Invalid initiator=%d, it isn't an " "initiator proximity domain", node->initiator); return; diff --git a/hw/core/or-irq.c b/hw/core/or-irq.c index 1df4bc05a7c..13907df0266 100644 --- a/hw/core/or-irq.c +++ b/hw/core/or-irq.c @@ -94,7 +94,7 @@ static const VMStateDescription vmstate_or_irq_extras = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_extras_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT16_UNSAFE(levels, OrIRQState, num_lines, 0, vmstate_info_bool, bool), VMSTATE_END_OF_LIST(), @@ -105,11 +105,11 @@ static const VMStateDescription vmstate_or_irq = { .name = TYPE_OR_IRQ, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL_SUB_ARRAY(levels, OrIRQState, 0, OLD_MAX_OR_LINES), VMSTATE_END_OF_LIST(), }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_or_irq_extras, NULL }, diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c index e03165febf1..b1517592c6b 100644 --- a/hw/core/ptimer.c +++ b/hw/core/ptimer.c @@ -441,7 +441,7 @@ const VMStateDescription vmstate_ptimer = { .name = "ptimer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(enabled, ptimer_state), VMSTATE_UINT64(limit, ptimer_state), VMSTATE_UINT64(delta, ptimer_state), diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index 1473ab3d5e9..d79d6f4b534 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -18,6 +18,7 @@ #include "qapi/qapi-types-block.h" #include "qapi/qapi-types-machine.h" #include "qapi/qapi-types-migration.h" +#include "qapi/qapi-visit-virtio.h" #include "qapi/qmp/qerror.h" #include "qemu/ctype.h" #include "qemu/cutils.h" @@ -120,9 +121,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, "node"); } - aio_context_acquire(ctx); blk_replace_bs(blk, bs, errp); - aio_context_release(ctx); return; } @@ -148,10 +147,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, 0, BLK_PERM_ALL); blk_created = true; - aio_context_acquire(ctx); ret = blk_insert_bs(blk, bs, errp); - aio_context_release(ctx); - if (ret < 0) { goto fail; } @@ -207,12 +203,8 @@ static void release_drive(Object *obj, const char *name, void *opaque) BlockBackend **ptr = object_field_prop_ptr(obj, prop); if (*ptr) { - AioContext *ctx = blk_get_aio_context(*ptr); - - aio_context_acquire(ctx); blockdev_auto_del(*ptr); blk_detach_dev(*ptr, dev); - aio_context_release(ctx); } } @@ -250,6 +242,7 @@ static void get_chr(Object *obj, Visitor *v, const char *name, void *opaque, static void set_chr(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { + ERRP_GUARD(); Property *prop = opaque; CharBackend *be = object_field_prop_ptr(obj, prop); Chardev *s; @@ -687,6 +680,30 @@ const PropertyInfo qdev_prop_mig_mode = { .set_default_value = qdev_propinfo_set_default_value_enum, }; +/* --- GranuleMode --- */ + +QEMU_BUILD_BUG_ON(sizeof(GranuleMode) != sizeof(int)); + +const PropertyInfo qdev_prop_granule_mode = { + .name = "GranuleMode", + .description = "granule_mode values, " + "4k, 8k, 16k, 64k, host", + .enum_table = &GranuleMode_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +const PropertyInfo qdev_prop_zero_page_detection = { + .name = "ZeroPageDetection", + .description = "zero_page_detection values, " + "none,legacy,multifd", + .enum_table = &ZeroPageDetection_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + /* --- Reserved Region --- */ /* @@ -949,7 +966,7 @@ const PropertyInfo qdev_prop_off_auto_pcibar = { .set_default_value = qdev_propinfo_set_default_value_enum, }; -/* --- PCIELinkSpeed 2_5/5/8/16 -- */ +/* --- PCIELinkSpeed 2_5/5/8/16/32/64 -- */ static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) @@ -971,6 +988,12 @@ static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, case QEMU_PCI_EXP_LNK_16GT: speed = PCIE_LINK_SPEED_16; break; + case QEMU_PCI_EXP_LNK_32GT: + speed = PCIE_LINK_SPEED_32; + break; + case QEMU_PCI_EXP_LNK_64GT: + speed = PCIE_LINK_SPEED_64; + break; default: /* Unreachable */ abort(); @@ -1004,6 +1027,12 @@ static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, case PCIE_LINK_SPEED_16: *p = QEMU_PCI_EXP_LNK_16GT; break; + case PCIE_LINK_SPEED_32: + *p = QEMU_PCI_EXP_LNK_32GT; + break; + case PCIE_LINK_SPEED_64: + *p = QEMU_PCI_EXP_LNK_64GT; + break; default: /* Unreachable */ abort(); @@ -1012,7 +1041,7 @@ static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, const PropertyInfo qdev_prop_pcie_link_speed = { .name = "PCIELinkSpeed", - .description = "2_5/5/8/16", + .description = "2_5/5/8/16/32/64", .enum_table = &PCIELinkSpeed_lookup, .get = get_prop_pcielinkspeed, .set = set_prop_pcielinkspeed, @@ -1169,3 +1198,48 @@ const PropertyInfo qdev_prop_cpus390entitlement = { .set = qdev_propinfo_set_enum, .set_default_value = qdev_propinfo_set_default_value_enum, }; + +/* --- IOThreadVirtQueueMappingList --- */ + +static void get_iothread_vq_mapping_list(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + IOThreadVirtQueueMappingList **prop_ptr = + object_field_prop_ptr(obj, opaque); + + visit_type_IOThreadVirtQueueMappingList(v, name, prop_ptr, errp); +} + +static void set_iothread_vq_mapping_list(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + IOThreadVirtQueueMappingList **prop_ptr = + object_field_prop_ptr(obj, opaque); + IOThreadVirtQueueMappingList *list; + + if (!visit_type_IOThreadVirtQueueMappingList(v, name, &list, errp)) { + return; + } + + qapi_free_IOThreadVirtQueueMappingList(*prop_ptr); + *prop_ptr = list; +} + +static void release_iothread_vq_mapping_list(Object *obj, + const char *name, void *opaque) +{ + IOThreadVirtQueueMappingList **prop_ptr = + object_field_prop_ptr(obj, opaque); + + qapi_free_IOThreadVirtQueueMappingList(*prop_ptr); + *prop_ptr = NULL; +} + +const PropertyInfo qdev_prop_iothread_vq_mapping_list = { + .name = "IOThreadVirtQueueMappingList", + .description = "IOThread virtqueue mapping list [{\"iothread\":\"\", " + "\"vqs\":[1,2,3,...]},...]", + .get = get_iothread_vq_mapping_list, + .set = set_iothread_vq_mapping_list, + .release = release_iothread_vq_mapping_list, +}; diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c index 840006e953c..7d6fa726fdf 100644 --- a/hw/core/qdev-properties.c +++ b/hw/core/qdev-properties.c @@ -1076,16 +1076,18 @@ void device_class_set_props(DeviceClass *dc, Property *props) void qdev_alias_all_properties(DeviceState *target, Object *source) { ObjectClass *class; - Property *prop; + ObjectPropertyIterator iter; + ObjectProperty *prop; class = object_get_class(OBJECT(target)); - do { - DeviceClass *dc = DEVICE_CLASS(class); - for (prop = dc->props_; prop && prop->name; prop++) { - object_property_add_alias(source, prop->name, - OBJECT(target), prop->name); + object_class_property_iter_init(&iter, class); + while ((prop = object_property_iter_next(&iter))) { + if (object_property_find(source, prop->name)) { + continue; /* skip duplicate properties */ } - class = object_class_get_parent(class); - } while (class != object_class_by_name(TYPE_DEVICE)); + + object_property_add_alias(source, prop->name, + OBJECT(target), prop->name); + } } diff --git a/hw/core/qdev.c b/hw/core/qdev.c index 43d863b0c5b..c68d0f7c512 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -879,6 +879,14 @@ Object *qdev_get_machine(void) return dev; } +char *qdev_get_human_name(DeviceState *dev) +{ + g_assert(dev != NULL); + + return dev->id ? + g_strdup(dev->id) : object_get_canonical_path(OBJECT(dev)); +} + static MachineInitPhase machine_phase; bool phase_check(MachineInitPhase phase) diff --git a/hw/core/reset.c b/hw/core/reset.c index d3263b613e6..d50da7e3041 100644 --- a/hw/core/reset.c +++ b/hw/core/reset.c @@ -24,64 +24,164 @@ */ #include "qemu/osdep.h" -#include "qemu/queue.h" #include "sysemu/reset.h" +#include "hw/resettable.h" +#include "hw/core/resetcontainer.h" -/* reset/shutdown handler */ +/* + * Return a pointer to the singleton container that holds all the Resettable + * items that will be reset when qemu_devices_reset() is called. + */ +static ResettableContainer *get_root_reset_container(void) +{ + static ResettableContainer *root_reset_container; -typedef struct QEMUResetEntry { - QTAILQ_ENTRY(QEMUResetEntry) entry; + if (!root_reset_container) { + root_reset_container = + RESETTABLE_CONTAINER(object_new(TYPE_RESETTABLE_CONTAINER)); + } + return root_reset_container; +} + +/* + * Reason why the currently in-progress qemu_devices_reset() was called. + * If we made at least SHUTDOWN_CAUSE_SNAPSHOT_LOAD have a corresponding + * ResetType we could perhaps avoid the need for this global. + */ +static ShutdownCause device_reset_reason; + +/* + * This is an Object which implements Resettable simply to call the + * callback function in the hold phase. + */ +#define TYPE_LEGACY_RESET "legacy-reset" +OBJECT_DECLARE_SIMPLE_TYPE(LegacyReset, LEGACY_RESET) + +struct LegacyReset { + Object parent; + ResettableState reset_state; QEMUResetHandler *func; void *opaque; bool skip_on_snapshot_load; -} QEMUResetEntry; +}; + +OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(LegacyReset, legacy_reset, LEGACY_RESET, OBJECT, { TYPE_RESETTABLE_INTERFACE }, { }) + +static ResettableState *legacy_reset_get_state(Object *obj) +{ + LegacyReset *lr = LEGACY_RESET(obj); + return &lr->reset_state; +} + +static void legacy_reset_hold(Object *obj) +{ + LegacyReset *lr = LEGACY_RESET(obj); + + if (device_reset_reason == SHUTDOWN_CAUSE_SNAPSHOT_LOAD && + lr->skip_on_snapshot_load) { + return; + } + lr->func(lr->opaque); +} + +static void legacy_reset_init(Object *obj) +{ +} + +static void legacy_reset_finalize(Object *obj) +{ +} + +static void legacy_reset_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); -static QTAILQ_HEAD(, QEMUResetEntry) reset_handlers = - QTAILQ_HEAD_INITIALIZER(reset_handlers); + rc->get_state = legacy_reset_get_state; + rc->phases.hold = legacy_reset_hold; +} void qemu_register_reset(QEMUResetHandler *func, void *opaque) { - QEMUResetEntry *re = g_new0(QEMUResetEntry, 1); + Object *obj = object_new(TYPE_LEGACY_RESET); + LegacyReset *lr = LEGACY_RESET(obj); - re->func = func; - re->opaque = opaque; - QTAILQ_INSERT_TAIL(&reset_handlers, re, entry); + lr->func = func; + lr->opaque = opaque; + qemu_register_resettable(obj); } void qemu_register_reset_nosnapshotload(QEMUResetHandler *func, void *opaque) { - QEMUResetEntry *re = g_new0(QEMUResetEntry, 1); + Object *obj = object_new(TYPE_LEGACY_RESET); + LegacyReset *lr = LEGACY_RESET(obj); - re->func = func; - re->opaque = opaque; - re->skip_on_snapshot_load = true; - QTAILQ_INSERT_TAIL(&reset_handlers, re, entry); + lr->func = func; + lr->opaque = opaque; + lr->skip_on_snapshot_load = true; + qemu_register_resettable(obj); } -void qemu_unregister_reset(QEMUResetHandler *func, void *opaque) +typedef struct FindLegacyInfo { + QEMUResetHandler *func; + void *opaque; + LegacyReset *lr; +} FindLegacyInfo; + +static void find_legacy_reset_cb(Object *obj, void *opaque, ResetType type) { - QEMUResetEntry *re; - - QTAILQ_FOREACH(re, &reset_handlers, entry) { - if (re->func == func && re->opaque == opaque) { - QTAILQ_REMOVE(&reset_handlers, re, entry); - g_free(re); - return; - } + LegacyReset *lr; + FindLegacyInfo *fli = opaque; + + /* Not everything in the ResettableContainer will be a LegacyReset */ + lr = LEGACY_RESET(object_dynamic_cast(obj, TYPE_LEGACY_RESET)); + if (lr && lr->func == fli->func && lr->opaque == fli->opaque) { + fli->lr = lr; } } -void qemu_devices_reset(ShutdownCause reason) +static LegacyReset *find_legacy_reset(QEMUResetHandler *func, void *opaque) +{ + /* + * Find the LegacyReset with the specified func and opaque, + * by getting the ResettableContainer to call our callback for + * every item in it. + */ + ResettableContainer *rootcon = get_root_reset_container(); + ResettableClass *rc = RESETTABLE_GET_CLASS(rootcon); + FindLegacyInfo fli; + + fli.func = func; + fli.opaque = opaque; + fli.lr = NULL; + rc->child_foreach(OBJECT(rootcon), find_legacy_reset_cb, + &fli, RESET_TYPE_COLD); + return fli.lr; +} + +void qemu_unregister_reset(QEMUResetHandler *func, void *opaque) { - QEMUResetEntry *re, *nre; - - /* reset all devices */ - QTAILQ_FOREACH_SAFE(re, &reset_handlers, entry, nre) { - if (reason == SHUTDOWN_CAUSE_SNAPSHOT_LOAD && - re->skip_on_snapshot_load) { - continue; - } - re->func(re->opaque); + Object *obj = OBJECT(find_legacy_reset(func, opaque)); + + if (obj) { + qemu_unregister_resettable(obj); + object_unref(obj); } } +void qemu_register_resettable(Object *obj) +{ + resettable_container_add(get_root_reset_container(), obj); +} + +void qemu_unregister_resettable(Object *obj) +{ + resettable_container_remove(get_root_reset_container(), obj); +} + +void qemu_devices_reset(ShutdownCause reason) +{ + device_reset_reason = reason; + + /* Reset the simulation */ + resettable_reset(OBJECT(get_root_reset_container()), RESET_TYPE_COLD); +} diff --git a/hw/core/resetcontainer.c b/hw/core/resetcontainer.c new file mode 100644 index 00000000000..e4ece68e83a --- /dev/null +++ b/hw/core/resetcontainer.c @@ -0,0 +1,77 @@ +/* + * Reset container + * + * Copyright (c) 2024 Linaro, Ltd + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* + * The "reset container" is an object which implements the Resettable + * interface. It contains a list of arbitrary other objects which also + * implement Resettable. Resetting the reset container resets all the + * objects in it. + */ + +#include "qemu/osdep.h" +#include "hw/resettable.h" +#include "hw/core/resetcontainer.h" + +struct ResettableContainer { + Object parent; + ResettableState reset_state; + GPtrArray *children; +}; + +OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(ResettableContainer, resettable_container, RESETTABLE_CONTAINER, OBJECT, { TYPE_RESETTABLE_INTERFACE }, { }) + +void resettable_container_add(ResettableContainer *rc, Object *obj) +{ + INTERFACE_CHECK(void, obj, TYPE_RESETTABLE_INTERFACE); + g_ptr_array_add(rc->children, obj); +} + +void resettable_container_remove(ResettableContainer *rc, Object *obj) +{ + g_ptr_array_remove(rc->children, obj); +} + +static ResettableState *resettable_container_get_state(Object *obj) +{ + ResettableContainer *rc = RESETTABLE_CONTAINER(obj); + return &rc->reset_state; +} + +static void resettable_container_child_foreach(Object *obj, + ResettableChildCallback cb, + void *opaque, ResetType type) +{ + ResettableContainer *rc = RESETTABLE_CONTAINER(obj); + unsigned int len = rc->children->len; + + for (unsigned int i = 0; i < len; i++) { + cb(g_ptr_array_index(rc->children, i), opaque, type); + /* Detect callbacks trying to unregister themselves */ + assert(len == rc->children->len); + } +} + +static void resettable_container_init(Object *obj) +{ + ResettableContainer *rc = RESETTABLE_CONTAINER(obj); + + rc->children = g_ptr_array_new(); +} + +static void resettable_container_finalize(Object *obj) +{ +} + +static void resettable_container_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->get_state = resettable_container_get_state; + rc->child_foreach = resettable_container_child_foreach; +} diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c index 35f902b582b..ad34fb73446 100644 --- a/hw/core/sysbus.c +++ b/hw/core/sysbus.c @@ -298,17 +298,6 @@ static char *sysbus_get_fw_dev_path(DeviceState *dev) return g_strdup(qdev_fw_name(dev)); } -void sysbus_add_io(SysBusDevice *dev, hwaddr addr, - MemoryRegion *mem) -{ - memory_region_add_subregion(get_system_io(), addr, mem); -} - -MemoryRegion *sysbus_address_space(SysBusDevice *dev) -{ - return get_system_memory(); -} - static void sysbus_device_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); diff --git a/hw/cpu/a15mpcore.c b/hw/cpu/a15mpcore.c index bfd8aa56448..967d8d3dd50 100644 --- a/hw/cpu/a15mpcore.c +++ b/hw/cpu/a15mpcore.c @@ -26,6 +26,7 @@ #include "hw/qdev-properties.h" #include "sysemu/kvm.h" #include "kvm_arm.h" +#include "target/arm/gtimer.h" static void a15mp_priv_set_irq(void *opaque, int irq, int level) { diff --git a/hw/cpu/a9mpcore.c b/hw/cpu/a9mpcore.c index d03f57e579b..c30ef72c669 100644 --- a/hw/cpu/a9mpcore.c +++ b/hw/cpu/a9mpcore.c @@ -15,7 +15,7 @@ #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/core/cpu.h" -#include "cpu.h" +#include "target/arm/cpu-qom.h" #define A9_GIC_NUM_PRIORITY_BITS 5 diff --git a/hw/cpu/cluster.c b/hw/cpu/cluster.c index e444b7c29d1..61289a840d4 100644 --- a/hw/cpu/cluster.c +++ b/hw/cpu/cluster.c @@ -19,12 +19,11 @@ */ #include "qemu/osdep.h" + +#include "hw/core/cpu.h" #include "hw/cpu/cluster.h" #include "hw/qdev-properties.h" -#include "hw/core/cpu.h" #include "qapi/error.h" -#include "qemu/module.h" -#include "qemu/cutils.h" static Property cpu_cluster_properties[] = { DEFINE_PROP_UINT32("cluster-id", CPUClusterState, cluster_id, 0), diff --git a/hw/cpu/core.c b/hw/cpu/core.c index 98760751557..495a5c30ffe 100644 --- a/hw/cpu/core.c +++ b/hw/cpu/core.c @@ -8,12 +8,11 @@ */ #include "qemu/osdep.h" + +#include "hw/boards.h" #include "hw/cpu/core.h" -#include "qapi/visitor.h" -#include "qemu/module.h" #include "qapi/error.h" -#include "sysemu/cpus.h" -#include "hw/boards.h" +#include "qapi/visitor.h" static void core_prop_get_core_id(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) diff --git a/hw/cpu/meson.build b/hw/cpu/meson.build index 6d319947ca0..38cdcfbe572 100644 --- a/hw/cpu/meson.build +++ b/hw/cpu/meson.build @@ -2,5 +2,5 @@ system_ss.add(files('core.c', 'cluster.c')) system_ss.add(when: 'CONFIG_ARM11MPCORE', if_true: files('arm11mpcore.c')) system_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview_mpcore.c')) -specific_ss.add(when: 'CONFIG_A9MPCORE', if_true: files('a9mpcore.c')) +system_ss.add(when: 'CONFIG_A9MPCORE', if_true: files('a9mpcore.c')) specific_ss.add(when: 'CONFIG_A15MPCORE', if_true: files('a15mpcore.c')) diff --git a/hw/cris/axis_dev88.c b/hw/cris/axis_dev88.c index d82050d927d..55566349210 100644 --- a/hw/cris/axis_dev88.c +++ b/hw/cris/axis_dev88.c @@ -308,15 +308,14 @@ void axisdev88_init(MachineState *machine) /* Add the two ethernet blocks. */ dma_eth = g_malloc0(sizeof dma_eth[0] * 4); /* Allocate 4 channels. */ - etraxfs_eth_init(&nd_table[0], 0x30034000, 1, &dma_eth[0], &dma_eth[1]); - if (nb_nics > 1) { - etraxfs_eth_init(&nd_table[1], 0x30036000, 2, &dma_eth[2], &dma_eth[3]); - } + etraxfs_eth_init(0x30034000, 1, &dma_eth[0], &dma_eth[1]); /* The DMA Connector block is missing, hardwire things for now. */ etraxfs_dmac_connect_client(etraxfs_dmac, 0, &dma_eth[0]); etraxfs_dmac_connect_client(etraxfs_dmac, 1, &dma_eth[1]); - if (nb_nics > 1) { + + if (qemu_find_nic_info("etraxfs-eth", true, "fseth")) { + etraxfs_eth_init(0x30036000, 2, &dma_eth[2], &dma_eth[3]); etraxfs_dmac_connect_client(etraxfs_dmac, 6, &dma_eth[2]); etraxfs_dmac_connect_client(etraxfs_dmac, 7, &dma_eth[3]); } diff --git a/hw/cxl/cxl-cdat.c b/hw/cxl/cxl-cdat.c index 2fea975671f..551545f7823 100644 --- a/hw/cxl/cxl-cdat.c +++ b/hw/cxl/cxl-cdat.c @@ -114,7 +114,7 @@ static void ct3_build_cdat(CDATObject *cdat, Error **errp) static void ct3_load_cdat(CDATObject *cdat, Error **errp) { g_autofree CDATEntry *cdat_st = NULL; - g_autofree char *buf = NULL; + g_autofree uint8_t *buf = NULL; uint8_t sum = 0; int num_ent; int i = 0, ent = 1; @@ -171,7 +171,7 @@ static void ct3_load_cdat(CDATObject *cdat, Error **errp) cdat_st[ent].base = hdr; cdat_st[ent].length = hdr->length; - while (buf + i < (char *)cdat_st[ent].base + cdat_st[ent].length) { + while (buf + i < (uint8_t *)cdat_st[ent].base + cdat_st[ent].length) { assert(i < file_size); sum += buf[i++]; } diff --git a/hw/cxl/cxl-component-utils.c b/hw/cxl/cxl-component-utils.c index 9dfde6c0b32..cd116c04012 100644 --- a/hw/cxl/cxl-component-utils.c +++ b/hw/cxl/cxl-component-utils.c @@ -13,7 +13,7 @@ #include "hw/pci/pci.h" #include "hw/cxl/cxl.h" -/* CXL r3.0 Section 8.2.4.19.1 CXL HDM Decoder Capability Register */ +/* CXL r3.1 Section 8.2.4.20.1 CXL HDM Decoder Capability Register */ int cxl_decoder_count_enc(int count) { switch (count) { @@ -160,11 +160,11 @@ static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value, } /* - * 8.2.3 + * CXL r3.1 Section 8.2.3: Component Register Layout and Definition * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0 * Component Registers. * - * 8.2.2 + * CXL r3.1 Section 8.2.2: Accessing Component Registers * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial * reads are not permitted. * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial @@ -197,7 +197,7 @@ void cxl_component_register_block_init(Object *obj, CXL2_COMPONENT_BLOCK_SIZE); /* io registers controls link which we don't care about in QEMU */ - memory_region_init_io(&cregs->io, obj, NULL, cregs, ".io", + memory_region_init_io(&cregs->io, obj, NULL, NULL, ".io", CXL2_COMPONENT_IO_REGION_SIZE); memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cxl_cstate, ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE); @@ -243,6 +243,14 @@ static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk, ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1); ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 0); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 16_WAY, 0); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, UIO, 0); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, + UIO_DECODER_COUNT, 0); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, MEMDATA_NXM_CAP, 0); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, + SUPPORTED_COHERENCY_MODEL, 0); /* Unknown */ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL, HDM_DECODER_ENABLE, 0); write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3; @@ -289,6 +297,7 @@ void cxl_component_register_init_common(uint32_t *reg_state, caps = 3; break; case CXL2_ROOT_PORT: + case CXL2_RC: /* + Extended Security, + Snoop */ caps = 5; break; @@ -300,7 +309,8 @@ void cxl_component_register_init_common(uint32_t *reg_state, /* CXL Capability Header Register */ ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1); - ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, 1); + ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, + CXL_CAPABILITY_VERSION); ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1); ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps); @@ -317,24 +327,36 @@ void cxl_component_register_init_common(uint32_t *reg_state, CXL_##reg##_REGISTERS_OFFSET); \ } while (0) - init_cap_reg(RAS, 2, 2); - ras_init_common(reg_state, write_msk); + switch (type) { + case CXL2_DEVICE: + case CXL2_TYPE3_DEVICE: + case CXL2_LOGICAL_DEVICE: + case CXL2_ROOT_PORT: + case CXL2_UPSTREAM_PORT: + case CXL2_DOWNSTREAM_PORT: + init_cap_reg(RAS, 2, CXL_RAS_CAPABILITY_VERSION); + ras_init_common(reg_state, write_msk); + break; + default: + break; + } - init_cap_reg(LINK, 4, 2); + init_cap_reg(LINK, 4, CXL_LINK_CAPABILITY_VERSION); if (caps < 3) { return; } - init_cap_reg(HDM, 5, 1); - hdm_init_common(reg_state, write_msk, type); - + if (type != CXL2_ROOT_PORT) { + init_cap_reg(HDM, 5, CXL_HDM_CAPABILITY_VERSION); + hdm_init_common(reg_state, write_msk, type); + } if (caps < 5) { return; } - init_cap_reg(EXTSEC, 6, 1); - init_cap_reg(SNOOP, 8, 1); + init_cap_reg(EXTSEC, 6, CXL_EXTSEC_CAP_VERSION); + init_cap_reg(SNOOP, 8, CXL_SNOOP_CAP_VERSION); #undef init_cap_reg } @@ -459,7 +481,7 @@ void cxl_component_create_dvsec(CXLComponentState *cxl, cxl->dvsec_offset += length; } -/* CXL r3.0 Section 8.2.4.19.7 CXL HDM Decoder n Control Register */ +/* CXL r3.1 Section 8.2.4.20.7 CXL HDM Decoder n Control Register */ uint8_t cxl_interleave_ways_enc(int iw, Error **errp) { switch (iw) { diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c index 40b619ffd9f..035d034f6dd 100644 --- a/hw/cxl/cxl-device-utils.c +++ b/hw/cxl/cxl-device-utils.c @@ -13,7 +13,7 @@ /* * Device registers have no restrictions per the spec, and so fall back to the - * default memory mapped register rules in 8.2: + * default memory mapped register rules in CXL r3.1 Section 8.2: * Software shall use CXL.io Memory Read and Write to access memory mapped * register defined in this section. Unless otherwise specified, software * shall restrict the accesses width based on the following: @@ -366,6 +366,10 @@ static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate) ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, MSI_N, msi_n); cxl_dstate->mbox_msi_n = msi_n; + ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, + MBOX_READY_TIME, 0); /* Not reported */ + ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, + TYPE, 0); /* Inferred from class code */ } static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) @@ -389,13 +393,15 @@ void cxl_device_register_init_t3(CXLType3Dev *ct3d) ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1); ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count); - cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2); + cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, + CXL_DEVICE_STATUS_VERSION); device_reg_init_common(cxl_dstate); - cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1); + cxl_device_cap_init(cxl_dstate, MAILBOX, 2, CXL_DEV_MAILBOX_VERSION); mailbox_reg_init_common(cxl_dstate); - cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1); + cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, + CXL_MEM_DEV_STATUS_VERSION); memdev_reg_init_common(cxl_dstate); cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d), diff --git a/hw/cxl/cxl-events.c b/hw/cxl/cxl-events.c index bee6dfaf148..d397718b1bd 100644 --- a/hw/cxl/cxl-events.c +++ b/hw/cxl/cxl-events.c @@ -7,11 +7,9 @@ * COPYING file in the top-level directory. */ -#include - #include "qemu/osdep.h" + #include "qemu/bswap.h" -#include "qemu/typedefs.h" #include "qemu/error-report.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" @@ -206,7 +204,7 @@ CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds, * record that will not be cleared when Clear Event Records is executed, * the device shall return the Invalid Handle return code and shall not * clear any of the specified event records." - * -- CXL 3.0 8.2.9.2.3 + * -- CXL r3.1 Section 8.2.9.2.3: Clear Event Records (0101h) */ entry = cxl_event_get_head(log); for (nr = 0; entry && nr < pl->nr_recs; nr++) { diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c index 2aa776c79c7..c5f5fcfd64d 100644 --- a/hw/cxl/cxl-host.c +++ b/hw/cxl/cxl-host.c @@ -26,6 +26,7 @@ static void cxl_fixed_memory_window_config(CXLState *cxl_state, CXLFixedMemoryWindowOptions *object, Error **errp) { + ERRP_GUARD(); g_autofree CXLFixedWindow *fw = g_malloc0(sizeof(*fw)); strList *target; int i; diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index 6eff56fb1b3..e5eb97cb914 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -86,7 +86,7 @@ enum { #define MANAGEMENT_COMMAND 0x0 }; -/* CCI Message Format CXL r3.0 Figure 7-19 */ +/* CCI Message Format CXL r3.1 Figure 7-19 */ typedef struct CXLCCIMessage { uint8_t category; #define CXL_CCI_CAT_REQ 0 @@ -342,7 +342,7 @@ static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -/* CXL r3.0 section 8.2.9.1.1: Identify (Opcode 0001h) */ +/* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -403,7 +403,7 @@ static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, } } -/* CXL r3 8.2.9.1.1 */ +/* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -455,7 +455,7 @@ static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -/* CXL r3.0 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ +/* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -463,14 +463,14 @@ static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, size_t *len_out, CXLCCI *cci) { - /* CXL r3.0 Table 7-18: Get Physical Port State Request Payload */ + /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ struct cxl_fmapi_get_phys_port_state_req_pl { uint8_t num_ports; uint8_t ports[]; } QEMU_PACKED *in; /* - * CXL r3.0 Table 7-20: Get Physical Port State Port Information Block + * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block * Format */ struct cxl_fmapi_port_state_info_block { @@ -491,7 +491,7 @@ static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, uint8_t supported_ld_count; } QEMU_PACKED; - /* CXL r3.0 Table 7-19: Get Physical Port State Response Payload */ + /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ struct cxl_fmapi_get_phys_port_state_resp_pl { uint8_t num_ports; uint8_t rsv1[3]; @@ -579,7 +579,7 @@ static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -/* CXL r3.0 8.2.9.1.2 */ +/* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -609,7 +609,7 @@ static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -/* 8.2.9.2.1 */ +/* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len, @@ -647,7 +647,7 @@ static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -/* 8.2.9.3.1 */ +/* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -664,7 +664,7 @@ static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -/* 8.2.9.3.2 */ +/* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -683,13 +683,13 @@ static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -/* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */ +/* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ static const QemuUUID cel_uuid = { .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) }; -/* 8.2.9.4.1 */ +/* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -715,7 +715,7 @@ static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -/* 8.2.9.4.2 */ +/* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -732,14 +732,11 @@ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, get_log = (void *)payload_in; /* - * 8.2.9.4.2 - * The device shall return Invalid Parameter if the Offset or Length + * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) + * The device shall return Invalid Input if the Offset or Length * fields attempt to access beyond the size of the log as reported by Get * Supported Logs. * - * XXX: Spec is wrong, "Invalid Parameter" isn't a thing. - * XXX: Spec doesn't address incorrect UUID incorrectness. - * * The CEL buffer is large enough to fit all commands in the emulation, so * the only possible failure would be if the mailbox itself isn't big * enough. @@ -749,7 +746,7 @@ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, } if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { - return CXL_MBOX_UNSUPPORTED; + return CXL_MBOX_INVALID_LOG; } /* Store off everything to local variables so we can wipe out the payload */ @@ -760,7 +757,7 @@ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -/* 8.2.9.5.1.1 */ +/* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -815,6 +812,7 @@ static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } +/* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -851,6 +849,7 @@ static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } +/* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -879,6 +878,7 @@ static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } +/* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -940,7 +940,7 @@ static void __do_sanitization(CXLType3Dev *ct3d) } /* - * CXL 3.0 spec section 8.2.9.8.5.1 - Sanitize. + * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) * * Once the Sanitize command has started successfully, the device shall be * placed in the media disabled state. If the command fails or is interrupted @@ -1001,15 +1001,8 @@ static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, cxl_dev_disable_media(&ct3d->cxl_dstate); - if (secs > 2) { - /* sanitize when done */ - return CXL_MBOX_BG_STARTED; - } else { - __do_sanitization(ct3d); - cxl_dev_enable_media(&ct3d->cxl_dstate); - - return CXL_MBOX_SUCCESS; - } + /* sanitize when done */ + return CXL_MBOX_BG_STARTED; } static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, @@ -1025,7 +1018,10 @@ static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, *len_out = 4; return CXL_MBOX_SUCCESS; } + /* + * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) + * * This is very inefficient, but good enough for now! * Also the payload will always fit, so no need to handle the MORE flag and * make this stateful. We may want to allow longer poison lists to aid @@ -1110,6 +1106,7 @@ static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } +/* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -1153,6 +1150,7 @@ static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } +/* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, uint8_t *payload_in, size_t len_in, @@ -1387,27 +1385,21 @@ static void bg_timercb(void *opaque) cci->bg.complete_pct = 100; cci->bg.ret_code = ret; - if (ret == CXL_MBOX_SUCCESS) { - switch (cci->bg.opcode) { - case 0x4400: /* sanitize */ - { - CXLType3Dev *ct3d = CXL_TYPE3(cci->d); - - __do_sanitization(ct3d); - cxl_dev_enable_media(&ct3d->cxl_dstate); - } + switch (cci->bg.opcode) { + case 0x4400: /* sanitize */ + { + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + + __do_sanitization(ct3d); + cxl_dev_enable_media(&ct3d->cxl_dstate); + } + break; + case 0x4304: /* TODO: scan media */ + break; + default: + __builtin_unreachable(); break; - case 0x4304: /* TODO: scan media */ - break; - default: - __builtin_unreachable(); - break; - } } - - qemu_log("Background command %04xh finished: %s\n", - cci->bg.opcode, - ret == CXL_MBOX_SUCCESS ? "success" : "aborted"); } else { /* estimate only */ cci->bg.complete_pct = 100 * now / total_time; diff --git a/hw/cxl/meson.build b/hw/cxl/meson.build index ea0aebf6e3c..3e375f61a98 100644 --- a/hw/cxl/meson.build +++ b/hw/cxl/meson.build @@ -11,5 +11,3 @@ system_ss.add(when: 'CONFIG_CXL', if_false: files( 'cxl-host-stubs.c', )) - -system_ss.add(when: 'CONFIG_ALL', if_true: files('cxl-host-stubs.c')) diff --git a/hw/display/Kconfig b/hw/display/Kconfig index 1aafe1923d2..234c7de027c 100644 --- a/hw/display/Kconfig +++ b/hw/display/Kconfig @@ -55,7 +55,7 @@ config VGA_MMIO config VMWARE_VGA bool - default y if PCI_DEVICES && PC_PCI + default y if PCI_DEVICES && (PC_PCI || MIPS) depends on PCI select VGA @@ -77,6 +77,7 @@ config SM501 select I2C select DDC select SERIAL + select USB_OHCI_SYSBUS config TCX bool diff --git a/hw/display/artist.c b/hw/display/artist.c index fde050c882b..d9134532fb7 100644 --- a/hw/display/artist.c +++ b/hw/display/artist.c @@ -1435,7 +1435,7 @@ static const VMStateDescription vmstate_artist = { .version_id = 2, .minimum_version_id = 2, .post_load = vmstate_artist_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(height, ARTISTState), VMSTATE_UINT16(width, ARTISTState), VMSTATE_UINT16(depth, ARTISTState), diff --git a/hw/display/ati.c b/hw/display/ati.c index 569b8f61650..8d2501bd821 100644 --- a/hw/display/ati.c +++ b/hw/display/ati.c @@ -991,7 +991,7 @@ static void ati_vga_realize(PCIDevice *dev, Error **errp) } vga_init(vga, OBJECT(s), pci_address_space(dev), pci_address_space_io(dev), true); - vga->con = graphic_console_init(DEVICE(s), 0, s->vga.hw_ops, &s->vga); + vga->con = graphic_console_init(DEVICE(s), 0, s->vga.hw_ops, vga); if (s->cursor_guest_mode) { vga->cursor_invalidate = ati_cursor_invalidate; vga->cursor_draw_line = ati_cursor_draw_line; diff --git a/hw/display/bcm2835_fb.c b/hw/display/bcm2835_fb.c index a05277674f2..e40ed2d2e18 100644 --- a/hw/display/bcm2835_fb.c +++ b/hw/display/bcm2835_fb.c @@ -355,7 +355,7 @@ static const VMStateDescription vmstate_bcm2835_fb = { .name = TYPE_BCM2835_FB, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(lock, BCM2835FBState), VMSTATE_BOOL(invalidate, BCM2835FBState), VMSTATE_BOOL(pending, BCM2835FBState), diff --git a/hw/display/bochs-display.c b/hw/display/bochs-display.c index 9138e98c3b6..3b1d922b6ea 100644 --- a/hw/display/bochs-display.c +++ b/hw/display/bochs-display.c @@ -61,7 +61,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(BochsDisplayState, BOCHS_DISPLAY) static const VMStateDescription vmstate_bochs_display = { .name = "bochs-display", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pci, BochsDisplayState), VMSTATE_UINT16_ARRAY(vbe_regs, BochsDisplayState, VBE_DISPI_INDEX_NB), VMSTATE_BOOL(big_endian_fb, BochsDisplayState), diff --git a/hw/display/cg3.c b/hw/display/cg3.c index 2e9656ae1c3..b271faaa484 100644 --- a/hw/display/cg3.c +++ b/hw/display/cg3.c @@ -334,7 +334,7 @@ static const VMStateDescription vmstate_cg3 = { .version_id = 1, .minimum_version_id = 1, .post_load = vmstate_cg3_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(height, CG3State), VMSTATE_UINT16(width, CG3State), VMSTATE_UINT16(depth, CG3State), diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c index b80f98b6c4c..150883a9716 100644 --- a/hw/display/cirrus_vga.c +++ b/hw/display/cirrus_vga.c @@ -43,6 +43,7 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "ui/pixel_ops.h" +#include "vga_regs.h" #include "cirrus_vga_internal.h" #include "qom/object.h" #include "ui/console.h" @@ -798,9 +799,9 @@ static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s) if (blit_is_unsafe(s, false)) return 0; - return cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->vga.start_addr, - s->cirrus_blt_srcaddr - s->vga.start_addr, - s->cirrus_blt_width, s->cirrus_blt_height); + return cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->vga.params.start_addr, + s->cirrus_blt_srcaddr - s->vga.params.start_addr, + s->cirrus_blt_width, s->cirrus_blt_height); } /*************************************** @@ -1101,30 +1102,29 @@ static void cirrus_write_bitblt(CirrusVGAState * s, unsigned reg_value) * ***************************************/ -static void cirrus_get_offsets(VGACommonState *s1, - uint32_t *pline_offset, - uint32_t *pstart_addr, - uint32_t *pline_compare) +static void cirrus_get_params(VGACommonState *s1, + VGADisplayParams *params) { CirrusVGAState * s = container_of(s1, CirrusVGAState, vga); - uint32_t start_addr, line_offset, line_compare; + uint32_t line_offset; line_offset = s->vga.cr[0x13] | ((s->vga.cr[0x1b] & 0x10) << 4); line_offset <<= 3; - *pline_offset = line_offset; + params->line_offset = line_offset; - start_addr = (s->vga.cr[0x0c] << 8) + params->start_addr = (s->vga.cr[0x0c] << 8) | s->vga.cr[0x0d] | ((s->vga.cr[0x1b] & 0x01) << 16) | ((s->vga.cr[0x1b] & 0x0c) << 15) | ((s->vga.cr[0x1d] & 0x80) << 12); - *pstart_addr = start_addr; - line_compare = s->vga.cr[0x18] | + params->line_compare = s->vga.cr[0x18] | ((s->vga.cr[0x07] & 0x10) << 4) | ((s->vga.cr[0x09] & 0x40) << 3); - *pline_compare = line_compare; + + params->hpel = s->vga.ar[VGA_ATC_PEL]; + params->hpel_split = s->vga.ar[VGA_ATC_MODE] & 0x20; } static uint32_t cirrus_get_bpp16_depth(CirrusVGAState * s) @@ -2739,7 +2739,7 @@ const VMStateDescription vmstate_cirrus_vga = { .version_id = 2, .minimum_version_id = 1, .post_load = cirrus_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vga.latch, CirrusVGAState), VMSTATE_UINT8(vga.sr_index, CirrusVGAState), VMSTATE_BUFFER(vga.sr, CirrusVGAState), @@ -2777,7 +2777,7 @@ static const VMStateDescription vmstate_pci_cirrus_vga = { .name = "cirrus_vga", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCICirrusVGAState), VMSTATE_STRUCT(cirrus_vga, PCICirrusVGAState, 0, vmstate_cirrus_vga, CirrusVGAState), @@ -2925,7 +2925,7 @@ void cirrus_init_common(CirrusVGAState *s, Object *owner, s->linear_mmio_mask = s->real_vram_size - 256; s->vga.get_bpp = cirrus_get_bpp; - s->vga.get_offsets = cirrus_get_offsets; + s->vga.get_params = cirrus_get_params; s->vga.get_resolution = cirrus_get_resolution; s->vga.cursor_invalidate = cirrus_cursor_invalidate; s->vga.cursor_draw_line = cirrus_cursor_draw_line; diff --git a/hw/display/dpcd.c b/hw/display/dpcd.c index 64463654a1a..aab1b1a2d7f 100644 --- a/hw/display/dpcd.c +++ b/hw/display/dpcd.c @@ -135,7 +135,7 @@ static const VMStateDescription vmstate_dpcd = { .name = TYPE_DPCD, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY_V(dpcd_info, DPCDState, DPCD_READABLE_AREA, 0), VMSTATE_END_OF_LIST() } diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c index 34a960a9765..5712558e13d 100644 --- a/hw/display/exynos4210_fimd.c +++ b/hw/display/exynos4210_fimd.c @@ -23,6 +23,7 @@ */ #include "qemu/osdep.h" +#include "hw/qdev-properties.h" #include "hw/hw.h" #include "hw/irq.h" #include "hw/sysbus.h" @@ -32,6 +33,7 @@ #include "qemu/bswap.h" #include "qemu/module.h" #include "qemu/log.h" +#include "qapi/error.h" #include "qom/object.h" /* Debug messages configuration */ @@ -302,6 +304,7 @@ struct Exynos4210fimdState { MemoryRegion iomem; QemuConsole *console; qemu_irq irq[3]; + MemoryRegion *fbmem; uint32_t vidcon[4]; /* Video main control registers 0-3 */ uint32_t vidtcon[4]; /* Video time control registers 0-3 */ @@ -1119,7 +1122,6 @@ static void exynos4210_fimd_invalidate(void *opaque) * VIDOSDA, VIDOSDB, VIDWADDx and SHADOWCON registers */ static void fimd_update_memory_section(Exynos4210fimdState *s, unsigned win) { - SysBusDevice *sbd = SYS_BUS_DEVICE(s); Exynos4210fimdWindow *w = &s->window[win]; hwaddr fb_start_addr, fb_mapped_len; @@ -1147,8 +1149,7 @@ static void fimd_update_memory_section(Exynos4210fimdState *s, unsigned win) memory_region_unref(w->mem_section.mr); } - w->mem_section = memory_region_find(sysbus_address_space(sbd), - fb_start_addr, w->fb_len); + w->mem_section = memory_region_find(s->fbmem, fb_start_addr, w->fb_len); assert(w->mem_section.mr); assert(w->mem_section.offset_within_address_space == fb_start_addr); DPRINT_TRACE("Window %u framebuffer changed: address=0x%08x, len=0x%x\n", @@ -1865,7 +1866,7 @@ static const VMStateDescription exynos4210_fimd_window_vmstate = { .name = "exynos4210.fimd_window", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(wincon, Exynos4210fimdWindow), VMSTATE_UINT32_ARRAY(buf_start, Exynos4210fimdWindow, 3), VMSTATE_UINT32_ARRAY(buf_end, Exynos4210fimdWindow, 3), @@ -1895,7 +1896,7 @@ static const VMStateDescription exynos4210_fimd_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = exynos4210_fimd_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(vidcon, Exynos4210fimdState, 4), VMSTATE_UINT32_ARRAY(vidtcon, Exynos4210fimdState, 4), VMSTATE_UINT32(shadowcon, Exynos4210fimdState), @@ -1924,6 +1925,12 @@ static const GraphicHwOps exynos4210_fimd_ops = { .gfx_update = exynos4210_fimd_update, }; +static Property exynos4210_fimd_properties[] = { + DEFINE_PROP_LINK("framebuffer-memory", Exynos4210fimdState, fbmem, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + static void exynos4210_fimd_init(Object *obj) { Exynos4210fimdState *s = EXYNOS4210_FIMD(obj); @@ -1944,6 +1951,11 @@ static void exynos4210_fimd_realize(DeviceState *dev, Error **errp) { Exynos4210fimdState *s = EXYNOS4210_FIMD(dev); + if (!s->fbmem) { + error_setg(errp, "'framebuffer-memory' property was not set"); + return; + } + s->console = graphic_console_init(dev, 0, &exynos4210_fimd_ops, s); } @@ -1954,6 +1966,7 @@ static void exynos4210_fimd_class_init(ObjectClass *klass, void *data) dc->vmsd = &exynos4210_fimd_vmstate; dc->reset = exynos4210_fimd_reset; dc->realize = exynos4210_fimd_realize; + device_class_set_props(dc, exynos4210_fimd_properties); } static const TypeInfo exynos4210_fimd_info = { diff --git a/hw/display/g364fb.c b/hw/display/g364fb.c index 2903cab82d8..e08ec3f8de4 100644 --- a/hw/display/g364fb.c +++ b/hw/display/g364fb.c @@ -455,7 +455,7 @@ static const VMStateDescription vmstate_g364fb = { .version_id = 2, .minimum_version_id = 2, .post_load = g364fb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER_UNSAFE(color_palette, G364State, 0, 256 * 3), VMSTATE_BUFFER_UNSAFE(cursor_palette, G364State, 0, 9), VMSTATE_UINT16_ARRAY(cursor, G364State, 512), @@ -521,7 +521,7 @@ static const VMStateDescription vmstate_g364fb_sysbus = { .name = "g364fb-sysbus", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(g364, G364SysBusState, 2, vmstate_g364fb, G364State), VMSTATE_END_OF_LIST() } diff --git a/hw/display/i2c-ddc.c b/hw/display/i2c-ddc.c index 146489518c7..3f9d1e1f6fe 100644 --- a/hw/display/i2c-ddc.c +++ b/hw/display/i2c-ddc.c @@ -88,7 +88,7 @@ static void i2c_ddc_init(Object *obj) static const VMStateDescription vmstate_i2c_ddc = { .name = TYPE_I2CDDC, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(firstbyte, I2CDDCState), VMSTATE_UINT8(reg, I2CDDCState), VMSTATE_END_OF_LIST() diff --git a/hw/display/jazz_led.c b/hw/display/jazz_led.c index dd5f4696c4f..534f15dcfd4 100644 --- a/hw/display/jazz_led.c +++ b/hw/display/jazz_led.c @@ -257,7 +257,7 @@ static const VMStateDescription vmstate_jazz_led = { .version_id = 0, .minimum_version_id = 0, .post_load = jazz_led_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(segments, LedState), VMSTATE_END_OF_LIST() } diff --git a/hw/display/macfb.c b/hw/display/macfb.c index d61541ccb5d..1ace341a0ff 100644 --- a/hw/display/macfb.c +++ b/hw/display/macfb.c @@ -627,7 +627,7 @@ static const VMStateDescription vmstate_macfb = { .version_id = 1, .minimum_version_id = 1, .post_load = macfb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(type, MacfbState), VMSTATE_UINT8_ARRAY(color_palette, MacfbState, 256 * 3), VMSTATE_UINT32(palette_current, MacfbState), @@ -714,6 +714,7 @@ static void macfb_nubus_set_irq(void *opaque, int n, int level) static void macfb_nubus_realize(DeviceState *dev, Error **errp) { + ERRP_GUARD(); NubusDevice *nd = NUBUS_DEVICE(dev); MacfbNubusState *s = NUBUS_MACFB(dev); MacfbNubusDeviceClass *ndc = NUBUS_MACFB_GET_CLASS(dev); @@ -770,7 +771,7 @@ static const VMStateDescription vmstate_macfb_sysbus = { .name = "macfb-sysbus", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(macfb, MacfbSysBusState, 1, vmstate_macfb, MacfbState), VMSTATE_END_OF_LIST() } @@ -789,7 +790,7 @@ static const VMStateDescription vmstate_macfb_nubus = { .name = "macfb-nubus", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(macfb, MacfbNubusState, 1, vmstate_macfb, MacfbState), VMSTATE_END_OF_LIST() } diff --git a/hw/display/meson.build b/hw/display/meson.build index 344dfe3d8c2..f93a69f70f4 100644 --- a/hw/display/meson.build +++ b/hw/display/meson.build @@ -69,8 +69,11 @@ if config_all_devices.has_key('CONFIG_VIRTIO_GPU') virtio_gpu_ss = ss.source_set() virtio_gpu_ss.add(when: 'CONFIG_VIRTIO_GPU', if_true: [files('virtio-gpu-base.c', 'virtio-gpu.c'), pixman]) - virtio_gpu_ss.add(when: 'CONFIG_LINUX', if_true: files('virtio-gpu-udmabuf.c'), - if_false: files('virtio-gpu-udmabuf-stubs.c')) + if host_os == 'linux' + virtio_gpu_ss.add(files('virtio-gpu-udmabuf.c')) + else + virtio_gpu_ss.add(files('virtio-gpu-udmabuf-stubs.c')) + endif virtio_gpu_ss.add(when: 'CONFIG_VHOST_USER_GPU', if_true: files('vhost-user-gpu.c')) hw_display_modules += {'virtio-gpu': virtio_gpu_ss} @@ -140,5 +143,4 @@ endif system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_lcdc.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-vga-stub.c')) modules += { 'hw-display': hw_display_modules } diff --git a/hw/display/pl110.c b/hw/display/pl110.c index 4bf15c1da51..7f145bbdbab 100644 --- a/hw/display/pl110.c +++ b/hw/display/pl110.c @@ -10,6 +10,7 @@ #include "qemu/osdep.h" #include "hw/irq.h" #include "hw/sysbus.h" +#include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "ui/console.h" #include "framebuffer.h" @@ -17,6 +18,7 @@ #include "qemu/timer.h" #include "qemu/log.h" #include "qemu/module.h" +#include "qapi/error.h" #include "qom/object.h" #define PL110_CR_EN 0x001 @@ -74,6 +76,7 @@ struct PL110State { uint32_t palette[256]; uint32_t raw_palette[128]; qemu_irq irq; + MemoryRegion *fbmem; }; static int vmstate_pl110_post_load(void *opaque, int version_id); @@ -83,7 +86,7 @@ static const VMStateDescription vmstate_pl110 = { .version_id = 2, .minimum_version_id = 1, .post_load = vmstate_pl110_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(version, PL110State), VMSTATE_UINT32_ARRAY(timing, PL110State, 4), VMSTATE_UINT32(cr, PL110State), @@ -210,7 +213,6 @@ static int pl110_enabled(PL110State *s) static void pl110_update_display(void *opaque) { PL110State *s = (PL110State *)opaque; - SysBusDevice *sbd; DisplaySurface *surface = qemu_console_surface(s->con); drawfn fn; int src_width; @@ -222,8 +224,6 @@ static void pl110_update_display(void *opaque) return; } - sbd = SYS_BUS_DEVICE(s); - if (s->cr & PL110_CR_BGR) bpp_offset = 0; else @@ -290,7 +290,7 @@ static void pl110_update_display(void *opaque) first = 0; if (s->invalidate) { framebuffer_update_memory_section(&s->fbsection, - sysbus_address_space(sbd), + s->fbmem, s->upbase, s->rows, src_width); } @@ -535,11 +535,22 @@ static const GraphicHwOps pl110_gfx_ops = { .gfx_update = pl110_update_display, }; +static Property pl110_properties[] = { + DEFINE_PROP_LINK("framebuffer-memory", PL110State, fbmem, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + static void pl110_realize(DeviceState *dev, Error **errp) { PL110State *s = PL110(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + if (!s->fbmem) { + error_setg(errp, "'framebuffer-memory' property was not set"); + return; + } + memory_region_init_io(&s->iomem, OBJECT(s), &pl110_ops, s, "pl110", 0x1000); sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->irq); @@ -577,6 +588,7 @@ static void pl110_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); dc->vmsd = &vmstate_pl110; dc->realize = pl110_realize; + device_class_set_props(dc, pl110_properties); } static const TypeInfo pl110_info = { diff --git a/hw/display/pxa2xx_lcd.c b/hw/display/pxa2xx_lcd.c index eb83d882222..a9d0d981a08 100644 --- a/hw/display/pxa2xx_lcd.c +++ b/hw/display/pxa2xx_lcd.c @@ -1371,7 +1371,7 @@ static const VMStateDescription vmstate_dma_channel = { .name = "dma_channel", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(branch, struct DMAChannel), VMSTATE_UINT8(up, struct DMAChannel), VMSTATE_BUFFER(pbuffer, struct DMAChannel), @@ -1398,7 +1398,7 @@ static const VMStateDescription vmstate_pxa2xx_lcdc = { .version_id = 0, .minimum_version_id = 0, .post_load = pxa2xx_lcdc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(irqlevel, PXA2xxLCDState), VMSTATE_INT32(transp, PXA2xxLCDState), VMSTATE_UINT32_ARRAY(control, PXA2xxLCDState, 6), diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 7bb00d68f57..7178dec85d9 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -2388,7 +2388,7 @@ static const VMStateDescription qxl_memslot = { .name = "qxl-memslot", .version_id = QXL_SAVE_VERSION, .minimum_version_id = QXL_SAVE_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(slot.mem_start, struct guest_slots), VMSTATE_UINT64(slot.mem_end, struct guest_slots), VMSTATE_UINT32(active, struct guest_slots), @@ -2400,7 +2400,7 @@ static const VMStateDescription qxl_surface = { .name = "qxl-surface", .version_id = QXL_SAVE_VERSION, .minimum_version_id = QXL_SAVE_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(width, QXLSurfaceCreate), VMSTATE_UINT32(height, QXLSurfaceCreate), VMSTATE_INT32(stride, QXLSurfaceCreate), @@ -2419,7 +2419,7 @@ static const VMStateDescription qxl_vmstate_monitors_config = { .version_id = 1, .minimum_version_id = 1, .needed = qxl_monitors_config_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(guest_monitors_config, PCIQXLDevice), VMSTATE_END_OF_LIST() }, @@ -2432,7 +2432,7 @@ static const VMStateDescription qxl_vmstate = { .pre_save = qxl_pre_save, .pre_load = qxl_pre_load, .post_load = qxl_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pci, PCIQXLDevice), VMSTATE_STRUCT(vga, PCIQXLDevice, 0, vmstate_vga_common, VGACommonState), VMSTATE_UINT32(shadow_rom.mode, PCIQXLDevice), @@ -2452,7 +2452,7 @@ static const VMStateDescription qxl_vmstate = { VMSTATE_UINT64(guest_cursor, PCIQXLDevice), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &qxl_vmstate_monitors_config, NULL } diff --git a/hw/display/qxl.h b/hw/display/qxl.h index fdac14edade..e0a85a5ca49 100644 --- a/hw/display/qxl.h +++ b/hw/display/qxl.h @@ -159,7 +159,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL) * * Use with care; by the time this function returns, the returned pointer is * not protected by RCU anymore. If the caller is not within an RCU critical - * section and does not hold the iothread lock, it must have other means of + * section and does not hold the BQL, it must have other means of * protecting the pointer, such as a reference to the region that includes * the incoming ram_addr_t. * diff --git a/hw/display/ramfb-standalone.c b/hw/display/ramfb-standalone.c index a96e7ebcd9f..20eab34ff41 100644 --- a/hw/display/ramfb-standalone.c +++ b/hw/display/ramfb-standalone.c @@ -54,7 +54,7 @@ static const VMStateDescription ramfb_dev_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = migrate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER(state, RAMFBStandaloneState, ramfb_vmstate, RAMFBState), VMSTATE_END_OF_LIST() } diff --git a/hw/display/ramfb.c b/hw/display/ramfb.c index 477ef7272ab..6086baf7a98 100644 --- a/hw/display/ramfb.c +++ b/hw/display/ramfb.c @@ -129,7 +129,7 @@ const VMStateDescription ramfb_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = ramfb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER_UNSAFE(cfg, RAMFBState, 0, sizeof(RAMFBCfg)), VMSTATE_END_OF_LIST() } diff --git a/hw/display/sii9022.c b/hw/display/sii9022.c index 664fd4046d8..60c3f785498 100644 --- a/hw/display/sii9022.c +++ b/hw/display/sii9022.c @@ -51,7 +51,7 @@ static const VMStateDescription vmstate_sii9022 = { .name = "sii9022", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, sii9022_state), VMSTATE_UINT8(ptr, sii9022_state), VMSTATE_BOOL(addr_byte, sii9022_state), diff --git a/hw/display/sm501.c b/hw/display/sm501.c index 5b4e4509e19..26dc8170d89 100644 --- a/hw/display/sm501.c +++ b/hw/display/sm501.c @@ -1940,7 +1940,7 @@ static const VMStateDescription vmstate_sm501_state = { .name = "sm501-state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(local_mem_size_index, SM501State), VMSTATE_UINT32(system_control, SM501State), VMSTATE_UINT32(misc_control, SM501State), @@ -2071,7 +2071,7 @@ static const VMStateDescription vmstate_sm501_sysbus = { .name = TYPE_SYSBUS_SM501, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, SM501SysBusState, 1, vmstate_sm501_state, SM501State), VMSTATE_END_OF_LIST() @@ -2161,7 +2161,7 @@ static const VMStateDescription vmstate_sm501_pci = { .name = TYPE_PCI_SM501, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, SM501PCIState), VMSTATE_STRUCT(state, SM501PCIState, 1, vmstate_sm501_state, SM501State), diff --git a/hw/display/ssd0303.c b/hw/display/ssd0303.c index 32b32a3044e..e292cff44ea 100644 --- a/hw/display/ssd0303.c +++ b/hw/display/ssd0303.c @@ -281,7 +281,7 @@ static const VMStateDescription vmstate_ssd0303 = { .name = "ssd0303_oled", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(row, ssd0303_state), VMSTATE_INT32(col, ssd0303_state), VMSTATE_INT32(start_line, ssd0303_state), diff --git a/hw/display/ssd0323.c b/hw/display/ssd0323.c index 09b1bbed0a4..96cf0dc662b 100644 --- a/hw/display/ssd0323.c +++ b/hw/display/ssd0323.c @@ -324,7 +324,7 @@ static const VMStateDescription vmstate_ssd0323 = { .version_id = 2, .minimum_version_id = 2, .post_load = ssd0323_post_load, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(cmd_len, ssd0323_state), VMSTATE_INT32(cmd, ssd0323_state), VMSTATE_INT32_ARRAY(cmd_data, ssd0323_state, 8), diff --git a/hw/display/tcx.c b/hw/display/tcx.c index 1b27b64f6d1..99507e76388 100644 --- a/hw/display/tcx.c +++ b/hw/display/tcx.c @@ -344,7 +344,7 @@ static const VMStateDescription vmstate_tcx = { .version_id = 4, .minimum_version_id = 4, .post_load = vmstate_tcx_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(height, TCXState), VMSTATE_UINT16(width, TCXState), VMSTATE_UINT16(depth, TCXState), diff --git a/hw/display/vga-helpers.h b/hw/display/vga-helpers.h index 10e9cfd40a0..2029b61791b 100644 --- a/hw/display/vga-helpers.h +++ b/hw/display/vga-helpers.h @@ -98,17 +98,22 @@ static void vga_draw_glyph9(uint8_t *d, int linesize, /* * 4 color mode */ -static void vga_draw_line2(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line2(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { uint32_t plane_mask, *palette, data, v; int x; palette = vga->last_palette; plane_mask = mask16[vga->ar[VGA_ATC_PLANE_ENABLE] & 0xf]; + hpel &= 7; + if (hpel) { + width += 8; + d = vga->panning_buf; + } width >>= 3; for(x = 0; x < width; x++) { - data = vga_read_dword_le(vga, addr); + data = vga_read_dword_le(vga, addr & (VGA_VRAM_SIZE - 1)); data &= plane_mask; v = expand2[GET_PLANE(data, 0)]; v |= expand2[GET_PLANE(data, 2)] << 2; @@ -126,6 +131,7 @@ static void vga_draw_line2(VGACommonState *vga, uint8_t *d, d += 32; addr += 4; } + return hpel ? vga->panning_buf + 4 * hpel : NULL; } #define PUT_PIXEL2(d, n, v) \ @@ -134,17 +140,22 @@ static void vga_draw_line2(VGACommonState *vga, uint8_t *d, /* * 4 color mode, dup2 horizontal */ -static void vga_draw_line2d2(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line2d2(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { uint32_t plane_mask, *palette, data, v; int x; palette = vga->last_palette; plane_mask = mask16[vga->ar[VGA_ATC_PLANE_ENABLE] & 0xf]; + hpel &= 7; + if (hpel) { + width += 8; + d = vga->panning_buf; + } width >>= 3; for(x = 0; x < width; x++) { - data = vga_read_dword_le(vga, addr); + data = vga_read_dword_le(vga, addr & (VGA_VRAM_SIZE - 1)); data &= plane_mask; v = expand2[GET_PLANE(data, 0)]; v |= expand2[GET_PLANE(data, 2)] << 2; @@ -162,22 +173,28 @@ static void vga_draw_line2d2(VGACommonState *vga, uint8_t *d, d += 64; addr += 4; } + return hpel ? vga->panning_buf + 8 * hpel : NULL; } /* * 16 color mode */ -static void vga_draw_line4(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line4(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { uint32_t plane_mask, data, v, *palette; int x; palette = vga->last_palette; plane_mask = mask16[vga->ar[VGA_ATC_PLANE_ENABLE] & 0xf]; + hpel &= 7; + if (hpel) { + width += 8; + d = vga->panning_buf; + } width >>= 3; for(x = 0; x < width; x++) { - data = vga_read_dword_le(vga, addr); + data = vga_read_dword_le(vga, addr & (VGA_VRAM_SIZE - 1)); data &= plane_mask; v = expand4[GET_PLANE(data, 0)]; v |= expand4[GET_PLANE(data, 1)] << 1; @@ -194,22 +211,28 @@ static void vga_draw_line4(VGACommonState *vga, uint8_t *d, d += 32; addr += 4; } + return hpel ? vga->panning_buf + 4 * hpel : NULL; } /* * 16 color mode, dup2 horizontal */ -static void vga_draw_line4d2(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line4d2(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { uint32_t plane_mask, data, v, *palette; int x; palette = vga->last_palette; plane_mask = mask16[vga->ar[VGA_ATC_PLANE_ENABLE] & 0xf]; + hpel &= 7; + if (hpel) { + width += 8; + d = vga->panning_buf; + } width >>= 3; for(x = 0; x < width; x++) { - data = vga_read_dword_le(vga, addr); + data = vga_read_dword_le(vga, addr & (VGA_VRAM_SIZE - 1)); data &= plane_mask; v = expand4[GET_PLANE(data, 0)]; v |= expand4[GET_PLANE(data, 1)] << 1; @@ -226,6 +249,7 @@ static void vga_draw_line4d2(VGACommonState *vga, uint8_t *d, d += 64; addr += 4; } + return hpel ? vga->panning_buf + 8 * hpel : NULL; } /* @@ -233,15 +257,33 @@ static void vga_draw_line4d2(VGACommonState *vga, uint8_t *d, * * XXX: add plane_mask support (never used in standard VGA modes) */ -static void vga_draw_line8d2(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line8d2(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { uint32_t *palette; int x; palette = vga->last_palette; + hpel = (hpel >> 1) & 3; + + /* For 256 color modes, we can adjust the source address and write directly + * to the destination, even if horizontal pel panning is active. However, + * the loop below assumes that the address does not wrap in the middle of a + * plane. If that happens... + */ + if (addr + (width >> 3) * 4 < VGA_VRAM_SIZE) { + addr += hpel * 4; + hpel = 0; + } + + /* ... use the panning buffer as in planar modes. */ + if (hpel) { + width += 8; + d = vga->panning_buf; + } width >>= 3; for(x = 0; x < width; x++) { + addr &= VGA_VRAM_SIZE - 1; PUT_PIXEL2(d, 0, palette[vga_read_byte(vga, addr + 0)]); PUT_PIXEL2(d, 1, palette[vga_read_byte(vga, addr + 1)]); PUT_PIXEL2(d, 2, palette[vga_read_byte(vga, addr + 2)]); @@ -249,6 +291,7 @@ static void vga_draw_line8d2(VGACommonState *vga, uint8_t *d, d += 32; addr += 4; } + return hpel ? vga->panning_buf + 8 * hpel : NULL; } /* @@ -256,13 +299,18 @@ static void vga_draw_line8d2(VGACommonState *vga, uint8_t *d, * * XXX: add plane_mask support (never used in standard VGA modes) */ -static void vga_draw_line8(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line8(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { uint32_t *palette; int x; palette = vga->last_palette; + hpel = (hpel >> 1) & 3; + if (hpel) { + width += 8; + d = vga->panning_buf; + } width >>= 3; for(x = 0; x < width; x++) { ((uint32_t *)d)[0] = palette[vga_read_byte(vga, addr + 0)]; @@ -276,13 +324,14 @@ static void vga_draw_line8(VGACommonState *vga, uint8_t *d, d += 32; addr += 8; } + return hpel ? vga->panning_buf + 4 * hpel : NULL; } /* * 15 bit color */ -static void vga_draw_line15_le(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line15_le(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { int w; uint32_t v, r, g, b; @@ -297,10 +346,11 @@ static void vga_draw_line15_le(VGACommonState *vga, uint8_t *d, addr += 2; d += 4; } while (--w != 0); + return NULL; } -static void vga_draw_line15_be(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line15_be(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { int w; uint32_t v, r, g, b; @@ -315,13 +365,14 @@ static void vga_draw_line15_be(VGACommonState *vga, uint8_t *d, addr += 2; d += 4; } while (--w != 0); + return NULL; } /* * 16 bit color */ -static void vga_draw_line16_le(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line16_le(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { int w; uint32_t v, r, g, b; @@ -336,10 +387,11 @@ static void vga_draw_line16_le(VGACommonState *vga, uint8_t *d, addr += 2; d += 4; } while (--w != 0); + return NULL; } -static void vga_draw_line16_be(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line16_be(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { int w; uint32_t v, r, g, b; @@ -354,13 +406,14 @@ static void vga_draw_line16_be(VGACommonState *vga, uint8_t *d, addr += 2; d += 4; } while (--w != 0); + return NULL; } /* * 24 bit color */ -static void vga_draw_line24_le(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line24_le(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { int w; uint32_t r, g, b; @@ -374,10 +427,11 @@ static void vga_draw_line24_le(VGACommonState *vga, uint8_t *d, addr += 3; d += 4; } while (--w != 0); + return NULL; } -static void vga_draw_line24_be(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line24_be(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { int w; uint32_t r, g, b; @@ -391,13 +445,14 @@ static void vga_draw_line24_be(VGACommonState *vga, uint8_t *d, addr += 3; d += 4; } while (--w != 0); + return NULL; } /* * 32 bit color */ -static void vga_draw_line32_le(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line32_le(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { int w; uint32_t r, g, b; @@ -411,10 +466,11 @@ static void vga_draw_line32_le(VGACommonState *vga, uint8_t *d, addr += 4; d += 4; } while (--w != 0); + return NULL; } -static void vga_draw_line32_be(VGACommonState *vga, uint8_t *d, - uint32_t addr, int width) +static void *vga_draw_line32_be(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width, int hpel) { int w; uint32_t r, g, b; @@ -428,4 +484,5 @@ static void vga_draw_line32_be(VGACommonState *vga, uint8_t *d, addr += 4; d += 4; } while (--w != 0); + return NULL; } diff --git a/hw/display/vga-pci.c b/hw/display/vga-pci.c index e4f45b4476d..2d8adce5da6 100644 --- a/hw/display/vga-pci.c +++ b/hw/display/vga-pci.c @@ -61,7 +61,7 @@ static const VMStateDescription vmstate_vga_pci = { .name = "vga", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCIVGAState), VMSTATE_STRUCT(vga, PCIVGAState, 0, vmstate_vga_common, VGACommonState), VMSTATE_END_OF_LIST() diff --git a/hw/display/vga.c b/hw/display/vga.c index 37557c3442a..77f59e8c113 100644 --- a/hw/display/vga.c +++ b/hw/display/vga.c @@ -47,6 +47,16 @@ bool have_vga = true; /* 16 state changes per vertical frame @60 Hz */ #define VGA_TEXT_CURSOR_PERIOD_MS (1000 * 2 * 16 / 60) +/* Address mask for non-VESA modes. */ +#define VGA_VRAM_SIZE (256 * KiB) + +/* This value corresponds to a shift of zero pixels + * in 9-dot text mode. In other modes, bit 3 is undefined; + * we just ignore it, so that 8 corresponds to zero pixels + * in all modes. + */ +#define VGA_HPEL_NEUTRAL 8 + /* * Video Graphics Array (VGA) * @@ -90,58 +100,27 @@ const uint8_t gr_mask[16] = { 0x00, /* 0x0f */ }; -#define cbswap_32(__x) \ -((uint32_t)( \ - (((uint32_t)(__x) & (uint32_t)0x000000ffUL) << 24) | \ - (((uint32_t)(__x) & (uint32_t)0x0000ff00UL) << 8) | \ - (((uint32_t)(__x) & (uint32_t)0x00ff0000UL) >> 8) | \ - (((uint32_t)(__x) & (uint32_t)0xff000000UL) >> 24) )) - -#if HOST_BIG_ENDIAN -#define PAT(x) cbswap_32(x) -#else -#define PAT(x) (x) -#endif - -#if HOST_BIG_ENDIAN -#define BIG 1 -#else -#define BIG 0 -#endif - -#if HOST_BIG_ENDIAN -#define GET_PLANE(data, p) (((data) >> (24 - (p) * 8)) & 0xff) -#else -#define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff) -#endif +#define GET_PLANE(data, p) ((cpu_to_le32(data) >> ((p) * 8)) & 0xff) static const uint32_t mask16[16] = { - PAT(0x00000000), - PAT(0x000000ff), - PAT(0x0000ff00), - PAT(0x0000ffff), - PAT(0x00ff0000), - PAT(0x00ff00ff), - PAT(0x00ffff00), - PAT(0x00ffffff), - PAT(0xff000000), - PAT(0xff0000ff), - PAT(0xff00ff00), - PAT(0xff00ffff), - PAT(0xffff0000), - PAT(0xffff00ff), - PAT(0xffffff00), - PAT(0xffffffff), + const_le32(0x00000000), + const_le32(0x000000ff), + const_le32(0x0000ff00), + const_le32(0x0000ffff), + const_le32(0x00ff0000), + const_le32(0x00ff00ff), + const_le32(0x00ffff00), + const_le32(0x00ffffff), + const_le32(0xff000000), + const_le32(0xff0000ff), + const_le32(0xff00ff00), + const_le32(0xff00ffff), + const_le32(0xffff0000), + const_le32(0xffff00ff), + const_le32(0xffffff00), + const_le32(0xffffffff), }; -#undef PAT - -#if HOST_BIG_ENDIAN -#define PAT(x) (x) -#else -#define PAT(x) cbswap_32(x) -#endif - static uint32_t expand4[256]; static uint16_t expand2[256]; static uint8_t expand4to8[16]; @@ -836,45 +815,62 @@ uint32_t vga_mem_readb(VGACommonState *s, hwaddr addr) } if (sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_CHN_4M) { - /* chain 4 mode : simplest access */ - assert(addr < s->vram_size); - ret = s->vram_ptr[addr]; - } else if (s->gr[VGA_GFX_MODE] & 0x10) { + /* chain4 mode */ + plane = addr & 3; + addr &= ~3; + } else if (s->gr[VGA_GFX_MODE] & VGA_GR05_HOST_ODD_EVEN) { /* odd/even mode (aka text mode mapping) */ plane = (s->gr[VGA_GFX_PLANE_READ] & 2) | (addr & 1); - addr = ((addr & ~1) << 1) | plane; - if (addr >= s->vram_size) { - return 0xff; - } - ret = s->vram_ptr[addr]; } else { /* standard VGA latched access */ - if (addr * sizeof(uint32_t) >= s->vram_size) { - return 0xff; - } - s->latch = ((uint32_t *)s->vram_ptr)[addr]; + plane = s->gr[VGA_GFX_PLANE_READ]; + } - if (!(s->gr[VGA_GFX_MODE] & 0x08)) { - /* read mode 0 */ - plane = s->gr[VGA_GFX_PLANE_READ]; - ret = GET_PLANE(s->latch, plane); - } else { - /* read mode 1 */ - ret = (s->latch ^ mask16[s->gr[VGA_GFX_COMPARE_VALUE]]) & - mask16[s->gr[VGA_GFX_COMPARE_MASK]]; - ret |= ret >> 16; - ret |= ret >> 8; - ret = (~ret) & 0xff; - } + if (s->gr[VGA_GFX_MISC] & VGA_GR06_CHAIN_ODD_EVEN) { + addr &= ~1; + } + + /* Doubleword/word mode. See comment in vga_mem_writeb */ + if (s->cr[VGA_CRTC_UNDERLINE] & VGA_CR14_DW) { + addr >>= 2; + } else if ((s->gr[VGA_GFX_MODE] & VGA_GR05_HOST_ODD_EVEN) && + (s->cr[VGA_CRTC_MODE] & VGA_CR17_WORD_BYTE) == 0) { + addr >>= 1; } + + if (addr * sizeof(uint32_t) >= s->vram_size) { + return 0xff; + } + + if (s->sr[VGA_SEQ_MEMORY_MODE] & VGA_SR04_CHN_4M) { + /* chain 4 mode: simplified access (but it should use the same + * algorithms as below, see e.g. vga_mem_writeb's plane mask check). + */ + return s->vram_ptr[(addr << 2) | plane]; + } + + s->latch = ((uint32_t *)s->vram_ptr)[addr]; + if (!(s->gr[VGA_GFX_MODE] & 0x08)) { + /* read mode 0 */ + ret = GET_PLANE(s->latch, plane); + } else { + /* read mode 1 */ + ret = (s->latch ^ mask16[s->gr[VGA_GFX_COMPARE_VALUE]]) & + mask16[s->gr[VGA_GFX_COMPARE_MASK]]; + ret |= ret >> 16; + ret |= ret >> 8; + ret = (~ret) & 0xff; + } + return ret; } /* called for accesses between 0xa0000 and 0xc0000 */ void vga_mem_writeb(VGACommonState *s, hwaddr addr, uint32_t val) { - int memory_map_mode, plane, write_mode, b, func_select, mask; + int memory_map_mode, write_mode, b, func_select, mask; uint32_t write_mask, bit_mask, set_mask; + int plane = 0; #ifdef DEBUG_VGA_MEM printf("vga: [0x" HWADDR_FMT_plx "] = 0x%02x\n", addr, val); @@ -903,117 +899,136 @@ void vga_mem_writeb(VGACommonState *s, hwaddr addr, uint32_t val) break; } + mask = sr(s, VGA_SEQ_PLANE_WRITE); if (sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_CHN_4M) { /* chain 4 mode : simplest access */ plane = addr & 3; - mask = (1 << plane); - if (sr(s, VGA_SEQ_PLANE_WRITE) & mask) { - assert(addr < s->vram_size); - s->vram_ptr[addr] = val; -#ifdef DEBUG_VGA_MEM - printf("vga: chain4: [0x" HWADDR_FMT_plx "]\n", addr); -#endif - s->plane_updated |= mask; /* only used to detect font change */ - memory_region_set_dirty(&s->vram, addr, 1); + mask &= (1 << plane); + addr &= ~3; + } else { + if ((sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_SEQ_MODE) == 0) { + mask &= (addr & 1) ? 0x0a : 0x05; } - } else if (s->gr[VGA_GFX_MODE] & 0x10) { - /* odd/even mode (aka text mode mapping) */ - plane = (s->gr[VGA_GFX_PLANE_READ] & 2) | (addr & 1); - mask = (1 << plane); - if (sr(s, VGA_SEQ_PLANE_WRITE) & mask) { - addr = ((addr & ~1) << 1) | plane; - if (addr >= s->vram_size) { - return; - } - s->vram_ptr[addr] = val; + if (s->gr[VGA_GFX_MISC] & VGA_GR06_CHAIN_ODD_EVEN) { + addr &= ~1; + } + } + + /* Doubleword/word mode. These should be honored when displaying, + * not when reading/writing to memory! For example, chain4 modes + * use double-word mode and, on real hardware, would fetch bytes + * 0,1,2,3, 16,17,18,19, 32,33,34,35, etc. Text modes use word + * mode and, on real hardware, would fetch bytes 0,1, 8,9, etc. + * + * QEMU instead shifted addresses on memory accesses because it + * allows more optimizations (e.g. chain4_alias) and simplifies + * the draw_line handlers. Unfortunately, there is one case where + * the difference shows. When fetching font data, accesses are + * always in consecutive bytes, even if the text/attribute pairs + * are done in word mode. Hence, doing a right shift when operating + * on font data is wrong. So check the odd/even mode bits together with + * word mode bit. The odd/even read bit is 0 when reading font data, + * and the odd/even write bit is 1 when writing it. + */ + if (s->cr[VGA_CRTC_UNDERLINE] & VGA_CR14_DW) { + addr >>= 2; + } else if ((sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_SEQ_MODE) == 0 && + (s->cr[VGA_CRTC_MODE] & VGA_CR17_WORD_BYTE) == 0) { + addr >>= 1; + } + + if (addr * sizeof(uint32_t) >= s->vram_size) { + return; + } + + if (sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_CHN_4M) { + if (mask) { + s->vram_ptr[(addr << 2) | plane] = val; #ifdef DEBUG_VGA_MEM - printf("vga: odd/even: [0x" HWADDR_FMT_plx "]\n", addr); + printf("vga: chain4: [0x" HWADDR_FMT_plx "]\n", addr); #endif s->plane_updated |= mask; /* only used to detect font change */ memory_region_set_dirty(&s->vram, addr, 1); } - } else { - /* standard VGA latched access */ - write_mode = s->gr[VGA_GFX_MODE] & 3; - switch(write_mode) { - default: - case 0: - /* rotate */ - b = s->gr[VGA_GFX_DATA_ROTATE] & 7; - val = ((val >> b) | (val << (8 - b))) & 0xff; - val |= val << 8; - val |= val << 16; - - /* apply set/reset mask */ - set_mask = mask16[s->gr[VGA_GFX_SR_ENABLE]]; - val = (val & ~set_mask) | - (mask16[s->gr[VGA_GFX_SR_VALUE]] & set_mask); - bit_mask = s->gr[VGA_GFX_BIT_MASK]; - break; - case 1: - val = s->latch; - goto do_write; - case 2: - val = mask16[val & 0x0f]; - bit_mask = s->gr[VGA_GFX_BIT_MASK]; - break; - case 3: - /* rotate */ - b = s->gr[VGA_GFX_DATA_ROTATE] & 7; - val = (val >> b) | (val << (8 - b)); + return; + } - bit_mask = s->gr[VGA_GFX_BIT_MASK] & val; - val = mask16[s->gr[VGA_GFX_SR_VALUE]]; - break; - } + /* standard VGA latched access */ + write_mode = s->gr[VGA_GFX_MODE] & 3; + switch(write_mode) { + default: + case 0: + /* rotate */ + b = s->gr[VGA_GFX_DATA_ROTATE] & 7; + val = ((val >> b) | (val << (8 - b))) & 0xff; + val |= val << 8; + val |= val << 16; + + /* apply set/reset mask */ + set_mask = mask16[s->gr[VGA_GFX_SR_ENABLE]]; + val = (val & ~set_mask) | + (mask16[s->gr[VGA_GFX_SR_VALUE]] & set_mask); + bit_mask = s->gr[VGA_GFX_BIT_MASK]; + break; + case 1: + val = s->latch; + goto do_write; + case 2: + val = mask16[val & 0x0f]; + bit_mask = s->gr[VGA_GFX_BIT_MASK]; + break; + case 3: + /* rotate */ + b = s->gr[VGA_GFX_DATA_ROTATE] & 7; + val = (val >> b) | (val << (8 - b)); - /* apply logical operation */ - func_select = s->gr[VGA_GFX_DATA_ROTATE] >> 3; - switch(func_select) { - case 0: - default: - /* nothing to do */ - break; - case 1: - /* and */ - val &= s->latch; - break; - case 2: - /* or */ - val |= s->latch; - break; - case 3: - /* xor */ - val ^= s->latch; - break; - } + bit_mask = s->gr[VGA_GFX_BIT_MASK] & val; + val = mask16[s->gr[VGA_GFX_SR_VALUE]]; + break; + } - /* apply bit mask */ - bit_mask |= bit_mask << 8; - bit_mask |= bit_mask << 16; - val = (val & bit_mask) | (s->latch & ~bit_mask); - - do_write: - /* mask data according to sr[2] */ - mask = sr(s, VGA_SEQ_PLANE_WRITE); - s->plane_updated |= mask; /* only used to detect font change */ - write_mask = mask16[mask]; - if (addr * sizeof(uint32_t) >= s->vram_size) { - return; - } - ((uint32_t *)s->vram_ptr)[addr] = - (((uint32_t *)s->vram_ptr)[addr] & ~write_mask) | - (val & write_mask); + /* apply logical operation */ + func_select = s->gr[VGA_GFX_DATA_ROTATE] >> 3; + switch(func_select) { + case 0: + default: + /* nothing to do */ + break; + case 1: + /* and */ + val &= s->latch; + break; + case 2: + /* or */ + val |= s->latch; + break; + case 3: + /* xor */ + val ^= s->latch; + break; + } + + /* apply bit mask */ + bit_mask |= bit_mask << 8; + bit_mask |= bit_mask << 16; + val = (val & bit_mask) | (s->latch & ~bit_mask); + +do_write: + /* mask data according to sr[2] */ + s->plane_updated |= mask; /* only used to detect font change */ + write_mask = mask16[mask]; + ((uint32_t *)s->vram_ptr)[addr] = + (((uint32_t *)s->vram_ptr)[addr] & ~write_mask) | + (val & write_mask); #ifdef DEBUG_VGA_MEM - printf("vga: latch: [0x" HWADDR_FMT_plx "] mask=0x%08x val=0x%08x\n", - addr * 4, write_mask, val); + printf("vga: latch: [0x" HWADDR_FMT_plx "] mask=0x%08x val=0x%08x\n", + addr * 4, write_mask, val); #endif - memory_region_set_dirty(&s->vram, addr << 2, sizeof(uint32_t)); - } + memory_region_set_dirty(&s->vram, addr << 2, sizeof(uint32_t)); } -typedef void vga_draw_line_func(VGACommonState *s1, uint8_t *d, - uint32_t srcaddr, int width); +typedef void *vga_draw_line_func(VGACommonState *s1, uint8_t *d, + uint32_t srcaddr, int width, int hpel); #include "vga-access.h" #include "vga-helpers.h" @@ -1073,52 +1088,45 @@ static int update_palette256(VGACommonState *s) return full_update; } -static void vga_get_offsets(VGACommonState *s, - uint32_t *pline_offset, - uint32_t *pstart_addr, - uint32_t *pline_compare) +static void vga_get_params(VGACommonState *s, + VGADisplayParams *params) { - uint32_t start_addr, line_offset, line_compare; - if (vbe_enabled(s)) { - line_offset = s->vbe_line_offset; - start_addr = s->vbe_start_addr; - line_compare = 65535; + params->line_offset = s->vbe_line_offset; + params->start_addr = s->vbe_start_addr; + params->line_compare = 65535; + params->hpel = VGA_HPEL_NEUTRAL; + params->hpel_split = false; } else { /* compute line_offset in bytes */ - line_offset = s->cr[VGA_CRTC_OFFSET]; - line_offset <<= 3; + params->line_offset = s->cr[VGA_CRTC_OFFSET] << 3; /* starting address */ - start_addr = s->cr[VGA_CRTC_START_LO] | + params->start_addr = s->cr[VGA_CRTC_START_LO] | (s->cr[VGA_CRTC_START_HI] << 8); /* line compare */ - line_compare = s->cr[VGA_CRTC_LINE_COMPARE] | + params->line_compare = s->cr[VGA_CRTC_LINE_COMPARE] | ((s->cr[VGA_CRTC_OVERFLOW] & 0x10) << 4) | ((s->cr[VGA_CRTC_MAX_SCAN] & 0x40) << 3); + + params->hpel = s->ar[VGA_ATC_PEL]; + params->hpel_split = s->ar[VGA_ATC_MODE] & 0x20; } - *pline_offset = line_offset; - *pstart_addr = start_addr; - *pline_compare = line_compare; } /* update start_addr and line_offset. Return TRUE if modified */ static int update_basic_params(VGACommonState *s) { int full_update; - uint32_t start_addr, line_offset, line_compare; + VGADisplayParams current; full_update = 0; - s->get_offsets(s, &line_offset, &start_addr, &line_compare); + s->get_params(s, ¤t); - if (line_offset != s->line_offset || - start_addr != s->start_addr || - line_compare != s->line_compare) { - s->line_offset = line_offset; - s->start_addr = start_addr; - s->line_compare = line_compare; + if (memcmp(¤t, &s->params, sizeof(current))) { + s->params = current; full_update = 1; } return full_update; @@ -1219,7 +1227,7 @@ static void vga_draw_text(VGACommonState *s, int full_update) } full_update |= update_basic_params(s); - line_offset = s->line_offset; + line_offset = s->params.line_offset; vga_get_text_resolution(s, &width, &height, &cw, &cheight); if ((height * width) <= 1) { @@ -1258,7 +1266,7 @@ static void vga_draw_text(VGACommonState *s, int full_update) } cursor_offset = ((s->cr[VGA_CRTC_CURSOR_HI] << 8) | - s->cr[VGA_CRTC_CURSOR_LO]) - s->start_addr; + s->cr[VGA_CRTC_CURSOR_LO]) - s->params.start_addr; if (cursor_offset != s->cursor_offset || s->cr[VGA_CRTC_CURSOR_START] != s->cursor_start || s->cr[VGA_CRTC_CURSOR_END] != s->cursor_end) { @@ -1272,7 +1280,7 @@ static void vga_draw_text(VGACommonState *s, int full_update) s->cursor_start = s->cr[VGA_CRTC_CURSOR_START]; s->cursor_end = s->cr[VGA_CRTC_CURSOR_END]; } - cursor_ptr = s->vram_ptr + (s->start_addr + cursor_offset) * 4; + cursor_ptr = s->vram_ptr + (s->params.start_addr + cursor_offset) * 4; if (now >= s->cursor_blink_time) { s->cursor_blink_time = now + VGA_TEXT_CURSOR_PERIOD_MS / 2; s->cursor_visible_phase = !s->cursor_visible_phase; @@ -1282,7 +1290,7 @@ static void vga_draw_text(VGACommonState *s, int full_update) linesize = surface_stride(surface); ch_attr_ptr = s->last_ch_attr; line = 0; - offset = s->start_addr * 4; + offset = s->params.start_addr * 4; for(cy = 0; cy < height; cy++) { d1 = dest; src = s->vram_ptr + offset; @@ -1362,7 +1370,7 @@ static void vga_draw_text(VGACommonState *s, int full_update) dest += linesize * cheight; line1 = line + cheight; offset += line_offset; - if (line < s->line_compare && line1 >= s->line_compare) { + if (line < s->params.line_compare && line1 >= s->params.line_compare) { offset = 0; } line = line1; @@ -1475,6 +1483,7 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) ram_addr_t page0, page1, region_start, region_end; DirtyBitmapSnapshot *snap = NULL; int disp_width, multi_scan, multi_run; + int hpel; uint8_t *d; uint32_t v, addr1, addr; vga_draw_line_func *vga_draw_line = NULL; @@ -1492,31 +1501,6 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) disp_width = width; depth = s->get_bpp(s); - region_start = (s->start_addr * 4); - region_end = region_start + (ram_addr_t)s->line_offset * height; - region_end += width * depth / 8; /* scanline length */ - region_end -= s->line_offset; - if (region_end > s->vbe_size || depth == 0 || depth == 15) { - /* - * We land here on: - * - wraps around (can happen with cirrus vbe modes) - * - depth == 0 (256 color palette video mode) - * - depth == 15 - * - * Take the safe and slow route: - * - create a dirty bitmap snapshot for all vga memory. - * - force shadowing (so all vga memory access goes - * through vga_read_*() helpers). - * - * Given this affects only vga features which are pretty much - * unused by modern guests there should be no performance - * impact. - */ - region_start = 0; - region_end = s->vbe_size; - force_shadow = true; - } - /* bits 5-6: 0 = 16-color mode, 1 = 4-color mode, 2 = 256-color mode. */ shift_control = (s->gr[VGA_GFX_MODE] >> 5) & 3; double_scan = (s->cr[VGA_CRTC_MAX_SCAN] >> 7); @@ -1537,15 +1521,88 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) } if (shift_control == 0) { + full_update |= update_palette16(s); if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { disp_width <<= 1; + v = VGA_DRAW_LINE4D2; + } else { + v = VGA_DRAW_LINE4; } + bits = 4; + } else if (shift_control == 1) { + full_update |= update_palette16(s); if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { disp_width <<= 1; + v = VGA_DRAW_LINE2D2; + } else { + v = VGA_DRAW_LINE2; + } + bits = 4; + + } else { + switch (depth) { + default: + case 0: + full_update |= update_palette256(s); + v = VGA_DRAW_LINE8D2; + bits = 4; + break; + case 8: + full_update |= update_palette256(s); + v = VGA_DRAW_LINE8; + bits = 8; + break; + case 15: + v = s->big_endian_fb ? VGA_DRAW_LINE15_BE : VGA_DRAW_LINE15_LE; + bits = 16; + break; + case 16: + v = s->big_endian_fb ? VGA_DRAW_LINE16_BE : VGA_DRAW_LINE16_LE; + bits = 16; + break; + case 24: + v = s->big_endian_fb ? VGA_DRAW_LINE24_BE : VGA_DRAW_LINE24_LE; + bits = 24; + break; + case 32: + v = s->big_endian_fb ? VGA_DRAW_LINE32_BE : VGA_DRAW_LINE32_LE; + bits = 32; + break; } } + /* Horizontal pel panning bit 3 is only used in text mode. */ + hpel = bits <= 8 ? s->params.hpel & 7 : 0; + + region_start = (s->params.start_addr * 4); + region_end = region_start + (ram_addr_t)s->params.line_offset * height; + region_end += width * depth / 8; /* scanline length */ + region_end -= s->params.line_offset; + if (hpel) { + region_end += 4; + } + if (region_end > s->vbe_size || depth == 0 || depth == 15) { + /* + * We land here on: + * - wraps around (can happen with cirrus vbe modes) + * - depth == 0 (256 color palette video mode) + * - depth == 15 + * + * Take the safe and slow route: + * - create a dirty bitmap snapshot for all vga memory. + * - force shadowing (so all vga memory access goes + * through vga_read_*() helpers). + * + * Given this affects only vga features which are pretty much + * unused by modern guests there should be no performance + * impact. + */ + region_start = 0; + region_end = s->vbe_size; + force_shadow = true; + } + /* * Check whether we can share the surface with the backend * or whether we need a shadow surface. We share native @@ -1560,7 +1617,7 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) share_surface = false; } - if (s->line_offset != s->last_line_offset || + if (s->params.line_offset != s->last_line_offset || disp_width != s->last_width || height != s->last_height || s->last_depth != depth || @@ -1571,12 +1628,15 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) s->last_scr_height = height; s->last_width = disp_width; s->last_height = height; - s->last_line_offset = s->line_offset; + s->last_line_offset = s->params.line_offset; s->last_depth = depth; s->last_byteswap = byteswap; + /* 16 extra pixels are needed for double-width planar modes. */ + s->panning_buf = g_realloc(s->panning_buf, + (disp_width + 16) * sizeof(uint32_t)); full_update = 1; } - if (surface_data(surface) != s->vram_ptr + (s->start_addr * 4) + if (surface_data(surface) != s->vram_ptr + (s->params.start_addr * 4) && is_buffer_shared(surface)) { /* base address changed (page flip) -> shared display surfaces * must be updated with the new base address */ @@ -1586,8 +1646,8 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) if (full_update) { if (share_surface) { surface = qemu_create_displaysurface_from(disp_width, - height, format, s->line_offset, - s->vram_ptr + (s->start_addr * 4)); + height, format, s->params.line_offset, + s->vram_ptr + (s->params.start_addr * 4)); dpy_gfx_replace_surface(s->con, surface); } else { qemu_console_resize(s->con, disp_width, height); @@ -1595,53 +1655,6 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) } } - if (shift_control == 0) { - full_update |= update_palette16(s); - if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { - v = VGA_DRAW_LINE4D2; - } else { - v = VGA_DRAW_LINE4; - } - bits = 4; - } else if (shift_control == 1) { - full_update |= update_palette16(s); - if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { - v = VGA_DRAW_LINE2D2; - } else { - v = VGA_DRAW_LINE2; - } - bits = 4; - } else { - switch(s->get_bpp(s)) { - default: - case 0: - full_update |= update_palette256(s); - v = VGA_DRAW_LINE8D2; - bits = 4; - break; - case 8: - full_update |= update_palette256(s); - v = VGA_DRAW_LINE8; - bits = 8; - break; - case 15: - v = s->big_endian_fb ? VGA_DRAW_LINE15_BE : VGA_DRAW_LINE15_LE; - bits = 16; - break; - case 16: - v = s->big_endian_fb ? VGA_DRAW_LINE16_BE : VGA_DRAW_LINE16_LE; - bits = 16; - break; - case 24: - v = s->big_endian_fb ? VGA_DRAW_LINE24_BE : VGA_DRAW_LINE24_LE; - bits = 24; - break; - case 32: - v = s->big_endian_fb ? VGA_DRAW_LINE32_BE : VGA_DRAW_LINE32_LE; - bits = 32; - break; - } - } vga_draw_line = vga_draw_line_table[v]; if (!is_buffer_shared(surface) && s->cursor_invalidate) { @@ -1651,17 +1664,20 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) #if 0 printf("w=%d h=%d v=%d line_offset=%d cr[0x09]=0x%02x cr[0x17]=0x%02x linecmp=%d sr[0x01]=0x%02x\n", width, height, v, line_offset, s->cr[9], s->cr[VGA_CRTC_MODE], - s->line_compare, sr(s, VGA_SEQ_CLOCK_MODE)); + s->params.line_compare, sr(s, VGA_SEQ_CLOCK_MODE)); #endif - addr1 = (s->start_addr * 4); + addr1 = (s->params.start_addr * 4); bwidth = DIV_ROUND_UP(width * bits, 8); + if (hpel) { + bwidth += 4; + } y_start = -1; d = surface_data(surface); linesize = surface_stride(surface); y1 = 0; if (!full_update) { - if (s->line_compare < height) { + if (s->params.line_compare < height) { /* split screen mode */ region_start = 0; } @@ -1702,7 +1718,11 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) if (y_start < 0) y_start = y; if (!(is_buffer_shared(surface))) { - vga_draw_line(s, d, addr, width); + uint8_t *p; + p = vga_draw_line(s, d, addr, width, hpel); + if (p) { + memcpy(d, p, disp_width * sizeof(uint32_t)); + } if (s->cursor_draw_line) s->cursor_draw_line(s, d, y); } @@ -1717,15 +1737,19 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) if (!multi_run) { mask = (s->cr[VGA_CRTC_MODE] & 3) ^ 3; if ((y1 & mask) == mask) - addr1 += s->line_offset; + addr1 += s->params.line_offset; y1++; multi_run = multi_scan; } else { multi_run--; } /* line compare acts on the displayed lines */ - if (y == s->line_compare) + if (y == s->params.line_compare) { + if (s->params.hpel_split) { + hpel = VGA_HPEL_NEUTRAL; + } addr1 = 0; + } d += linesize; } if (y_start >= 0) { @@ -1841,9 +1865,7 @@ void vga_common_reset(VGACommonState *s) s->graphic_mode = -1; /* force full update */ s->shift_control = 0; s->double_scan = 0; - s->line_offset = 0; - s->line_compare = 0; - s->start_addr = 0; + memset(&s->params, '\0', sizeof(s->params)); s->plane_updated = 0; s->last_cw = 0; s->last_ch = 0; @@ -1965,7 +1987,7 @@ static void vga_update_text(void *opaque, console_ch_t *chardata) /* Update "hardware" cursor */ cursor_offset = ((s->cr[VGA_CRTC_CURSOR_HI] << 8) | - s->cr[VGA_CRTC_CURSOR_LO]) - s->start_addr; + s->cr[VGA_CRTC_CURSOR_LO]) - s->params.start_addr; if (cursor_offset != s->cursor_offset || s->cr[VGA_CRTC_CURSOR_START] != s->cursor_start || s->cr[VGA_CRTC_CURSOR_END] != s->cursor_end || full_update) { @@ -1981,7 +2003,7 @@ static void vga_update_text(void *opaque, console_ch_t *chardata) s->cursor_end = s->cr[VGA_CRTC_CURSOR_END]; } - src = (uint32_t *) s->vram_ptr + s->start_addr; + src = (uint32_t *) s->vram_ptr + s->params.start_addr; dst = chardata; if (full_update) { @@ -2106,7 +2128,7 @@ static const VMStateDescription vmstate_vga_endian = { .version_id = 1, .minimum_version_id = 1, .needed = vga_endian_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(big_endian_fb, VGACommonState), VMSTATE_END_OF_LIST() } @@ -2117,7 +2139,7 @@ const VMStateDescription vmstate_vga_common = { .version_id = 2, .minimum_version_id = 2, .post_load = vga_common_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(latch, VGACommonState), VMSTATE_UINT8(sr_index, VGACommonState), VMSTATE_PARTIAL_BUFFER(sr, VGACommonState, 8), @@ -2149,7 +2171,7 @@ const VMStateDescription vmstate_vga_common = { VMSTATE_UINT32(vbe_bank_mask, VGACommonState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_vga_endian, NULL } @@ -2226,7 +2248,7 @@ bool vga_common_init(VGACommonState *s, Object *obj, Error **errp) xen_register_framebuffer(&s->vram); s->vram_ptr = memory_region_get_ram_ptr(&s->vram); s->get_bpp = vga_get_bpp; - s->get_offsets = vga_get_offsets; + s->get_params = vga_get_params; s->get_resolution = vga_get_resolution; s->hw_ops = &vga_ops; switch (vga_retrace_method) { diff --git a/hw/display/vga_int.h b/hw/display/vga_int.h index 7cf0d11201a..876a1d3697b 100644 --- a/hw/display/vga_int.h +++ b/hw/display/vga_int.h @@ -56,6 +56,14 @@ struct VGACommonState; typedef uint8_t (* vga_retrace_fn)(struct VGACommonState *s); typedef void (* vga_update_retrace_info_fn)(struct VGACommonState *s); +typedef struct VGADisplayParams { + uint32_t line_offset; + uint32_t start_addr; + uint32_t line_compare; + uint8_t hpel; + bool hpel_split; +} VGADisplayParams; + typedef struct VGACommonState { MemoryRegion *legacy_address_space; uint8_t *vram_ptr; @@ -90,10 +98,7 @@ typedef struct VGACommonState { uint8_t palette[768]; int32_t bank_offset; int (*get_bpp)(struct VGACommonState *s); - void (*get_offsets)(struct VGACommonState *s, - uint32_t *pline_offset, - uint32_t *pstart_addr, - uint32_t *pline_compare); + void (*get_params)(struct VGACommonState *s, VGADisplayParams *params); void (*get_resolution)(struct VGACommonState *s, int *pwidth, int *pheight); @@ -108,12 +113,11 @@ typedef struct VGACommonState { /* display refresh support */ QemuConsole *con; uint32_t font_offsets[2]; + uint8_t *panning_buf; int graphic_mode; uint8_t shift_control; uint8_t double_scan; - uint32_t line_offset; - uint32_t line_compare; - uint32_t start_addr; + VGADisplayParams params; uint32_t plane_updated; uint32_t last_line_offset; uint8_t last_cw, last_ch; diff --git a/hw/display/vga_regs.h b/hw/display/vga_regs.h index 7fdba34b9b1..40e673f164d 100644 --- a/hw/display/vga_regs.h +++ b/hw/display/vga_regs.h @@ -100,7 +100,9 @@ /* VGA CRT controller bit masks */ #define VGA_CR11_LOCK_CR0_CR7 0x80 /* lock writes to CR0 - CR7 */ +#define VGA_CR14_DW 0x40 #define VGA_CR17_H_V_SIGNALS_ENABLED 0x80 +#define VGA_CR17_WORD_BYTE 0x40 /* VGA attribute controller register indices */ #define VGA_ATC_PALETTE0 0x00 @@ -154,6 +156,8 @@ #define VGA_GFX_BIT_MASK 0x08 /* VGA graphics controller bit masks */ +#define VGA_GR05_HOST_ODD_EVEN 0x10 #define VGA_GR06_GRAPHICS_MODE 0x01 +#define VGA_GR06_CHAIN_ODD_EVEN 0x02 #endif /* HW_VGA_REGS_H */ diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c index 37af2562198..4fc7ef8896c 100644 --- a/hw/display/virtio-gpu-base.c +++ b/hw/display/virtio-gpu-base.c @@ -251,7 +251,11 @@ void virtio_gpu_base_device_unrealize(DeviceState *qdev) { VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev); + VirtIODevice *vdev = VIRTIO_DEVICE(qdev); + virtio_del_queue(vdev, 0); + virtio_del_queue(vdev, 1); + virtio_cleanup(vdev); migrate_del_blocker(&g->migration_blocker); } diff --git a/hw/display/virtio-gpu-rutabaga.c b/hw/display/virtio-gpu-rutabaga.c index 9e67f9bd51b..17bf701a216 100644 --- a/hw/display/virtio-gpu-rutabaga.c +++ b/hw/display/virtio-gpu-rutabaga.c @@ -147,15 +147,39 @@ rutabaga_cmd_create_resource_3d(VirtIOGPU *g, QTAILQ_INSERT_HEAD(&g->reslist, res, next); } +static void +virtio_gpu_rutabaga_resource_unref(VirtIOGPU *g, + struct virtio_gpu_simple_resource *res, + Error **errp) +{ + int32_t result; + VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g); + + result = rutabaga_resource_unref(vr->rutabaga, res->resource_id); + if (result) { + error_setg_errno(errp, + (int)result, + "%s: rutabaga_resource_unref returned %"PRIi32 + " for resource_id = %"PRIu32, __func__, result, + res->resource_id); + } + + if (res->image) { + pixman_image_unref(res->image); + } + + QTAILQ_REMOVE(&g->reslist, res, next); + g_free(res); +} + static void rutabaga_cmd_resource_unref(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { - int32_t result; + int32_t result = 0; struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_unref unref; - - VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g); + Error *local_err = NULL; VIRTIO_GPU_FILL_CMD(unref); @@ -164,15 +188,14 @@ rutabaga_cmd_resource_unref(VirtIOGPU *g, res = virtio_gpu_find_resource(g, unref.resource_id); CHECK(res, cmd); - result = rutabaga_resource_unref(vr->rutabaga, unref.resource_id); - CHECK(!result, cmd); - - if (res->image) { - pixman_image_unref(res->image); + virtio_gpu_rutabaga_resource_unref(g, res, &local_err); + if (local_err) { + error_report_err(local_err); + /* local_err was freed, do not reuse it. */ + local_err = NULL; + result = 1; } - - QTAILQ_REMOVE(&g->reslist, res, next); - g_free(res); + CHECK(!result, cmd); } static void @@ -1099,7 +1122,7 @@ static void virtio_gpu_rutabaga_class_init(ObjectClass *klass, void *data) vgc->handle_ctrl = virtio_gpu_rutabaga_handle_ctrl; vgc->process_cmd = virtio_gpu_rutabaga_process_cmd; vgc->update_cursor_data = virtio_gpu_rutabaga_update_cursor; - + vgc->resource_destroy = virtio_gpu_rutabaga_resource_unref; vdc->realize = virtio_gpu_rutabaga_realize; device_class_set_props(dc, virtio_gpu_rutabaga_properties); } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index b016d3bac85..ae831b6b3e3 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -402,7 +402,8 @@ static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) } static void virtio_gpu_resource_destroy(VirtIOGPU *g, - struct virtio_gpu_simple_resource *res) + struct virtio_gpu_simple_resource *res, + Error **errp) { int i; @@ -438,7 +439,11 @@ static void virtio_gpu_resource_unref(VirtIOGPU *g, cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } - virtio_gpu_resource_destroy(g, res); + /* + * virtio_gpu_resource_destroy does not set any errors, so pass a NULL errp + * to ignore them. + */ + virtio_gpu_resource_destroy(g, res, NULL); } static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, @@ -595,6 +600,7 @@ static void virtio_unref_resource(pixman_image_t *image, void *data) static void virtio_gpu_update_scanout(VirtIOGPU *g, uint32_t scanout_id, struct virtio_gpu_simple_resource *res, + struct virtio_gpu_framebuffer *fb, struct virtio_gpu_rect *r) { struct virtio_gpu_simple_resource *ores; @@ -612,9 +618,10 @@ static void virtio_gpu_update_scanout(VirtIOGPU *g, scanout->y = r->y; scanout->width = r->width; scanout->height = r->height; + scanout->fb = *fb; } -static void virtio_gpu_do_set_scanout(VirtIOGPU *g, +static bool virtio_gpu_do_set_scanout(VirtIOGPU *g, uint32_t scanout_id, struct virtio_gpu_framebuffer *fb, struct virtio_gpu_simple_resource *res, @@ -640,7 +647,7 @@ static void virtio_gpu_do_set_scanout(VirtIOGPU *g, r->x, r->y, r->width, r->height, fb->width, fb->height); *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; - return; + return false; } g->parent_obj.enable = 1; @@ -648,11 +655,12 @@ static void virtio_gpu_do_set_scanout(VirtIOGPU *g, if (res->blob) { if (console_has_gl(scanout->con)) { if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { - virtio_gpu_update_scanout(g, scanout_id, res, r); + virtio_gpu_update_scanout(g, scanout_id, res, fb, r); } else { *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; + return false; } - return; + return true; } data = res->blob; @@ -679,10 +687,6 @@ static void virtio_gpu_do_set_scanout(VirtIOGPU *g, /* realloc the surface ptr */ scanout->ds = qemu_create_displaysurface_pixman(rect); - if (!scanout->ds) { - *error = VIRTIO_GPU_RESP_ERR_UNSPEC; - return; - } #ifdef WIN32 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset); #endif @@ -692,7 +696,8 @@ static void virtio_gpu_do_set_scanout(VirtIOGPU *g, scanout->ds); } - virtio_gpu_update_scanout(g, scanout_id, res, r); + virtio_gpu_update_scanout(g, scanout_id, res, fb, r); + return true; } static void virtio_gpu_set_scanout(VirtIOGPU *g, @@ -1163,8 +1168,9 @@ static void virtio_gpu_cursor_bh(void *opaque) static const VMStateDescription vmstate_virtio_gpu_scanout = { .name = "virtio-gpu-one-scanout", - .version_id = 1, - .fields = (VMStateField[]) { + .version_id = 2, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), VMSTATE_UINT32(width, struct virtio_gpu_scanout), VMSTATE_UINT32(height, struct virtio_gpu_scanout), @@ -1175,6 +1181,12 @@ static const VMStateDescription vmstate_virtio_gpu_scanout = { VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), + VMSTATE_UINT32_V(fb.format, struct virtio_gpu_scanout, 2), + VMSTATE_UINT32_V(fb.bytes_pp, struct virtio_gpu_scanout, 2), + VMSTATE_UINT32_V(fb.width, struct virtio_gpu_scanout, 2), + VMSTATE_UINT32_V(fb.height, struct virtio_gpu_scanout, 2), + VMSTATE_UINT32_V(fb.stride, struct virtio_gpu_scanout, 2), + VMSTATE_UINT32_V(fb.offset, struct virtio_gpu_scanout, 2), VMSTATE_END_OF_LIST() }, }; @@ -1182,7 +1194,7 @@ static const VMStateDescription vmstate_virtio_gpu_scanout = { static const VMStateDescription vmstate_virtio_gpu_scanouts = { .name = "virtio-gpu-scanouts", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, struct VirtIOGPU, NULL), @@ -1346,6 +1358,7 @@ static int virtio_gpu_blob_save(QEMUFile *f, void *opaque, size_t size, if (!res->blob_size) { continue; } + assert(!res->image); qemu_put_be32(f, res->resource_id); qemu_put_be32(f, res->blob_size); qemu_put_be32(f, res->iov_cnt); @@ -1408,24 +1421,40 @@ static int virtio_gpu_post_load(void *opaque, int version_id) int i; for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { - /* FIXME: should take scanout.r.{x,y} into account */ scanout = &g->parent_obj.scanout[i]; if (!scanout->resource_id) { continue; } + res = virtio_gpu_find_resource(g, scanout->resource_id); if (!res) { return -EINVAL; } - scanout->ds = qemu_create_displaysurface_pixman(res->image); - if (!scanout->ds) { - return -EINVAL; - } + + if (scanout->fb.format != 0) { + uint32_t error = 0; + struct virtio_gpu_rect r = { + .x = scanout->x, + .y = scanout->y, + .width = scanout->width, + .height = scanout->height + }; + + if (!virtio_gpu_do_set_scanout(g, i, &scanout->fb, res, &r, &error)) { + return -EINVAL; + } + } else { + /* legacy v1 migration support */ + if (!res->image) { + return -EINVAL; + } + scanout->ds = qemu_create_displaysurface_pixman(res->image); #ifdef WIN32 - qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0); + qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0); #endif + dpy_gfx_replace_surface(scanout->con, scanout->ds); + } - dpy_gfx_replace_surface(scanout->con, scanout->ds); dpy_gfx_update_full(scanout->con); if (scanout->cursor.resource_id) { update_cursor(g, &scanout->cursor); @@ -1463,10 +1492,8 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) g->ctrl_vq = virtio_get_queue(vdev, 0); g->cursor_vq = virtio_get_queue(vdev, 1); - g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g, - &qdev->mem_reentrancy_guard); - g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g, - &qdev->mem_reentrancy_guard); + g->ctrl_bh = virtio_bh_new_guarded(qdev, virtio_gpu_ctrl_bh, g); + g->cursor_bh = virtio_bh_new_guarded(qdev, virtio_gpu_cursor_bh, g); g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g); qemu_cond_init(&g->reset_cond); QTAILQ_INIT(&g->reslist); @@ -1488,11 +1515,24 @@ static void virtio_gpu_device_unrealize(DeviceState *qdev) static void virtio_gpu_reset_bh(void *opaque) { VirtIOGPU *g = VIRTIO_GPU(opaque); + VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); struct virtio_gpu_simple_resource *res, *tmp; + uint32_t resource_id; + Error *local_err = NULL; int i = 0; QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { - virtio_gpu_resource_destroy(g, res); + resource_id = res->resource_id; + vgc->resource_destroy(g, res, &local_err); + if (local_err) { + error_append_hint(&local_err, "%s: %s resource_destroy" + "for resource_id = %"PRIu32" failed.\n", + __func__, object_get_typename(OBJECT(g)), + resource_id); + /* error_report_err frees the error object for us */ + error_report_err(local_err); + local_err = NULL; + } } for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { @@ -1512,10 +1552,10 @@ void virtio_gpu_reset(VirtIODevice *vdev) g->reset_finished = false; qemu_bh_schedule(g->reset_bh); while (!g->reset_finished) { - qemu_cond_wait_iothread(&g->reset_cond); + qemu_cond_wait_bql(&g->reset_cond); } } else { - virtio_gpu_reset_bh(g); + aio_bh_call(g->reset_bh); } while (!QTAILQ_EMPTY(&g->cmdq)) { @@ -1592,7 +1632,7 @@ static const VMStateDescription vmstate_virtio_gpu = { .name = "virtio-gpu", .minimum_version_id = VIRTIO_GPU_VM_VERSION, .version_id = VIRTIO_GPU_VM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE /* core */, { .name = "virtio-gpu", @@ -1605,7 +1645,7 @@ static const VMStateDescription vmstate_virtio_gpu = { } /* device */, VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_gpu_blob_state, NULL }, @@ -1632,6 +1672,7 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data) vgc->handle_ctrl = virtio_gpu_handle_ctrl; vgc->process_cmd = virtio_gpu_simple_process_cmd; vgc->update_cursor_data = virtio_gpu_update_cursor_data; + vgc->resource_destroy = virtio_gpu_resource_destroy; vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; vdc->realize = virtio_gpu_device_realize; diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c index c8552ff760f..94d3353f540 100644 --- a/hw/display/virtio-vga.c +++ b/hw/display/virtio-vga.c @@ -88,7 +88,7 @@ static const VMStateDescription vmstate_virtio_vga_base = { .name = "virtio-vga", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* no pci stuff here, saving the virtio device will handle that */ VMSTATE_STRUCT(vga, VirtIOVGABase, 0, vmstate_vga_common, VGACommonState), diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c index 3f26bea1904..1c0f9d9a991 100644 --- a/hw/display/vmware_vga.c +++ b/hw/display/vmware_vga.c @@ -1210,7 +1210,7 @@ static const VMStateDescription vmstate_vmware_vga_internal = { .version_id = 0, .minimum_version_id = 0, .post_load = vmsvga_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(new_depth, struct vmsvga_state_s, NULL), VMSTATE_INT32(enable, struct vmsvga_state_s), VMSTATE_INT32(config, struct vmsvga_state_s), @@ -1235,7 +1235,7 @@ static const VMStateDescription vmstate_vmware_vga = { .name = "vmware_vga", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, struct pci_vmsvga_state_s), VMSTATE_STRUCT(chip, struct pci_vmsvga_state_s, 0, vmstate_vmware_vga_internal, struct vmsvga_state_s), diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c index eee8f33a584..c42fc388dc7 100644 --- a/hw/display/xlnx_dp.c +++ b/hw/display/xlnx_dp.c @@ -262,7 +262,7 @@ typedef enum DPVideoFmt DPVideoFmt; static const VMStateDescription vmstate_dp = { .name = TYPE_XLNX_DP, .version_id = 2, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32_ARRAY(core_registers, XlnxDPState, DP_CORE_REG_ARRAY_SIZE), VMSTATE_UINT32_ARRAY(avbufm_registers, XlnxDPState, diff --git a/hw/dma/bcm2835_dma.c b/hw/dma/bcm2835_dma.c index 5e9306110dc..9bda45072b6 100644 --- a/hw/dma/bcm2835_dma.c +++ b/hw/dma/bcm2835_dma.c @@ -311,7 +311,7 @@ static const VMStateDescription vmstate_bcm2835_dma_chan = { .name = TYPE_BCM2835_DMA "-chan", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cs, BCM2835DMAChan), VMSTATE_UINT32(conblk_ad, BCM2835DMAChan), VMSTATE_UINT32(ti, BCM2835DMAChan), @@ -329,7 +329,7 @@ static const VMStateDescription vmstate_bcm2835_dma = { .name = TYPE_BCM2835_DMA, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(chan, BCM2835DMAState, BCM2835_DMA_NCHANS, 1, vmstate_bcm2835_dma_chan, BCM2835DMAChan), VMSTATE_UINT32(int_status, BCM2835DMAState), diff --git a/hw/dma/i82374.c b/hw/dma/i82374.c index 63734c22c9d..e72aa2e1cef 100644 --- a/hw/dma/i82374.c +++ b/hw/dma/i82374.c @@ -58,7 +58,7 @@ static const VMStateDescription vmstate_i82374 = { .name = "i82374", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(commands, I82374State, 8), VMSTATE_END_OF_LIST() }, @@ -129,7 +129,7 @@ static void i82374_realize(DeviceState *dev, Error **errp) error_setg(errp, "DMA already initialized on ISA bus"); return; } - i8257_dma_init(isa_bus, true); + i8257_dma_init(OBJECT(dev), isa_bus, true); portio_list_init(&s->port_list, OBJECT(s), i82374_portio_list, s, "i82374"); diff --git a/hw/dma/i8257.c b/hw/dma/i8257.c index de5f696919c..24a54ca272d 100644 --- a/hw/dma/i8257.c +++ b/hw/dma/i8257.c @@ -517,7 +517,7 @@ static const VMStateDescription vmstate_i8257_regs = { .name = "dma_regs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_ARRAY(now, I8257Regs, 2), VMSTATE_UINT16_ARRAY(base, I8257Regs, 2), VMSTATE_UINT8(mode, I8257Regs), @@ -542,7 +542,7 @@ static const VMStateDescription vmstate_i8257 = { .version_id = 1, .minimum_version_id = 1, .post_load = i8257_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(command, I8257State), VMSTATE_UINT8(mask, I8257State), VMSTATE_UINT8(flip_flop, I8257State), @@ -632,12 +632,13 @@ static void i8257_register_types(void) type_init(i8257_register_types) -void i8257_dma_init(ISABus *bus, bool high_page_enable) +void i8257_dma_init(Object *parent, ISABus *bus, bool high_page_enable) { ISADevice *isa1, *isa2; DeviceState *d; isa1 = isa_new(TYPE_I8257); + object_property_add_child(parent, "dma[*]", OBJECT(isa1)); d = DEVICE(isa1); qdev_prop_set_int32(d, "base", 0x00); qdev_prop_set_int32(d, "page-base", 0x80); @@ -646,6 +647,7 @@ void i8257_dma_init(ISABus *bus, bool high_page_enable) isa_realize_and_unref(isa1, bus, &error_fatal); isa2 = isa_new(TYPE_I8257); + object_property_add_child(parent, "dma[*]", OBJECT(isa2)); d = DEVICE(isa2); qdev_prop_set_int32(d, "base", 0xc0); qdev_prop_set_int32(d, "page-base", 0x88); diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c index 2627307cc85..1e49c22e933 100644 --- a/hw/dma/pl080.c +++ b/hw/dma/pl080.c @@ -39,7 +39,7 @@ static const VMStateDescription vmstate_pl080_channel = { .name = "pl080_channel", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(src, pl080_channel), VMSTATE_UINT32(dest, pl080_channel), VMSTATE_UINT32(lli, pl080_channel), @@ -53,7 +53,7 @@ static const VMStateDescription vmstate_pl080 = { .name = "pl080", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(tc_int, PL080State), VMSTATE_UINT8(tc_mask, PL080State), VMSTATE_UINT8(err_int, PL080State), diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c index e7e67dd8b6a..70a502d2452 100644 --- a/hw/dma/pl330.c +++ b/hw/dma/pl330.c @@ -139,7 +139,7 @@ static const VMStateDescription vmstate_pl330_chan = { .name = "pl330_chan", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(src, PL330Chan), VMSTATE_UINT32(dst, PL330Chan), VMSTATE_UINT32(pc, PL330Chan), @@ -170,7 +170,7 @@ static const VMStateDescription vmstate_pl330_fifo = { .name = "pl330_chan", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VBUFFER_UINT32(buf, PL330Fifo, 1, NULL, buf_size), VMSTATE_VBUFFER_UINT32(tag, PL330Fifo, 1, NULL, buf_size), VMSTATE_UINT32(head, PL330Fifo), @@ -194,7 +194,7 @@ static const VMStateDescription vmstate_pl330_queue_entry = { .name = "pl330_queue_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(addr, PL330QueueEntry), VMSTATE_UINT32(len, PL330QueueEntry), VMSTATE_UINT8(n, PL330QueueEntry), @@ -216,7 +216,7 @@ static const VMStateDescription vmstate_pl330_queue = { .name = "pl330_queue", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_UINT32(queue, PL330Queue, queue_size, vmstate_pl330_queue_entry, PL330QueueEntry), @@ -280,7 +280,7 @@ static const VMStateDescription vmstate_pl330 = { .name = "pl330", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(manager, PL330State, 0, vmstate_pl330_chan, PL330Chan), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(chan, PL330State, num_chnls, vmstate_pl330_chan, PL330Chan), diff --git a/hw/dma/pxa2xx_dma.c b/hw/dma/pxa2xx_dma.c index fa896f7edf7..9f62f0b633b 100644 --- a/hw/dma/pxa2xx_dma.c +++ b/hw/dma/pxa2xx_dma.c @@ -529,7 +529,7 @@ static const VMStateDescription vmstate_pxa2xx_dma_chan = { .name = "pxa2xx_dma_chan", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(descr, PXA2xxDMAChannel), VMSTATE_UINT32(src, PXA2xxDMAChannel), VMSTATE_UINT32(dest, PXA2xxDMAChannel), @@ -544,7 +544,7 @@ static const VMStateDescription vmstate_pxa2xx_dma = { .name = "pxa2xx_dma", .version_id = 1, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UNUSED_TEST(is_version_0, 4), VMSTATE_UINT32(stopintr, PXA2xxDMAState), VMSTATE_UINT32(eorintr, PXA2xxDMAState), diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c index aa1d323a36b..915284194fe 100644 --- a/hw/dma/rc4030.c +++ b/hw/dma/rc4030.c @@ -568,7 +568,7 @@ static const VMStateDescription vmstate_rc4030 = { .name = "rc4030", .version_id = 3, .post_load = rc4030_post_load, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(config, rc4030State), VMSTATE_UINT32(invalid_address_register, rc4030State), VMSTATE_UINT32_2DARRAY(dma_regs, rc4030State, 8, 4), diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c index 0ef13c5e9a8..80196419427 100644 --- a/hw/dma/sparc32_dma.c +++ b/hw/dma/sparc32_dma.c @@ -249,7 +249,7 @@ static const VMStateDescription vmstate_sparc32_dma_device = { .name ="sparc32_dma", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(dmaregs, DMADeviceState, DMA_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c index 84c0083013e..670c9568669 100644 --- a/hw/dma/xlnx-zdma.c +++ b/hw/dma/xlnx-zdma.c @@ -801,7 +801,7 @@ static const VMStateDescription vmstate_zdma = { .name = TYPE_XLNX_ZDMA, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX), VMSTATE_UINT32(state, XlnxZDMA), VMSTATE_UINT32_ARRAY(dsc_src.words, XlnxZDMA, 4), diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c index f5ad1a0d22c..e901f68ff34 100644 --- a/hw/dma/xlnx-zynq-devcfg.c +++ b/hw/dma/xlnx-zynq-devcfg.c @@ -333,7 +333,7 @@ static const VMStateDescription vmstate_xlnx_zynq_devcfg_dma_cmd = { .name = "xlnx_zynq_devcfg_dma_cmd", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(src_addr, XlnxZynqDevcfgDMACmd), VMSTATE_UINT32(dest_addr, XlnxZynqDevcfgDMACmd), VMSTATE_UINT32(src_len, XlnxZynqDevcfgDMACmd), @@ -346,7 +346,7 @@ static const VMStateDescription vmstate_xlnx_zynq_devcfg = { .name = "xlnx_zynq_devcfg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(dma_cmd_fifo, XlnxZynqDevcfg, XLNX_ZYNQ_DEVCFG_DMA_CMD_FIFO_LEN, 0, vmstate_xlnx_zynq_devcfg_dma_cmd, diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c index bc1505aade7..ae307482f22 100644 --- a/hw/dma/xlnx_csu_dma.c +++ b/hw/dma/xlnx_csu_dma.c @@ -681,7 +681,7 @@ static const VMStateDescription vmstate_xlnx_csu_dma = { .name = TYPE_XLNX_CSU_DMA, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(src_timer, XlnxCSUDMA), VMSTATE_UINT16(width, XlnxCSUDMA), VMSTATE_BOOL(is_dst, XlnxCSUDMA), diff --git a/hw/dma/xlnx_dpdma.c b/hw/dma/xlnx_dpdma.c index dd66be5265d..1f5cd64ed10 100644 --- a/hw/dma/xlnx_dpdma.c +++ b/hw/dma/xlnx_dpdma.c @@ -277,7 +277,7 @@ static inline bool xlnx_dpdma_desc_ignore_done_bit(DPDMADescriptor *desc) static const VMStateDescription vmstate_xlnx_dpdma = { .name = TYPE_XLNX_DPDMA, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(registers, XlnxDPDMAState, XLNX_DPDMA_REG_ARRAY_SIZE), VMSTATE_BOOL_ARRAY(operation_finished, XlnxDPDMAState, 6), diff --git a/hw/fsi/Kconfig b/hw/fsi/Kconfig new file mode 100644 index 00000000000..9cee657a0f0 --- /dev/null +++ b/hw/fsi/Kconfig @@ -0,0 +1,7 @@ +config FSI_APB2OPB_ASPEED + bool + depends on ASPEED_SOC + select FSI + +config FSI + bool diff --git a/hw/fsi/aspeed_apb2opb.c b/hw/fsi/aspeed_apb2opb.c new file mode 100644 index 00000000000..ea50718b6a2 --- /dev/null +++ b/hw/fsi/aspeed_apb2opb.c @@ -0,0 +1,367 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 IBM Corp. + * + * ASPEED APB-OPB FSI interface + * IBM On-chip Peripheral Bus + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qom/object.h" +#include "qapi/error.h" +#include "trace.h" + +#include "hw/fsi/aspeed_apb2opb.h" +#include "hw/qdev-core.h" + +#define TO_REG(x) (x >> 2) + +#define APB2OPB_VERSION TO_REG(0x00) +#define APB2OPB_TRIGGER TO_REG(0x04) + +#define APB2OPB_CONTROL TO_REG(0x08) +#define APB2OPB_CONTROL_OFF BE_GENMASK(31, 13) + +#define APB2OPB_OPB2FSI TO_REG(0x0c) +#define APB2OPB_OPB2FSI_OFF BE_GENMASK(31, 22) + +#define APB2OPB_OPB0_SEL TO_REG(0x10) +#define APB2OPB_OPB1_SEL TO_REG(0x28) +#define APB2OPB_OPB_SEL_EN BIT(0) + +#define APB2OPB_OPB0_MODE TO_REG(0x14) +#define APB2OPB_OPB1_MODE TO_REG(0x2c) +#define APB2OPB_OPB_MODE_RD BIT(0) + +#define APB2OPB_OPB0_XFER TO_REG(0x18) +#define APB2OPB_OPB1_XFER TO_REG(0x30) +#define APB2OPB_OPB_XFER_FULL BIT(1) +#define APB2OPB_OPB_XFER_HALF BIT(0) + +#define APB2OPB_OPB0_ADDR TO_REG(0x1c) +#define APB2OPB_OPB0_WRITE_DATA TO_REG(0x20) + +#define APB2OPB_OPB1_ADDR TO_REG(0x34) +#define APB2OPB_OPB1_WRITE_DATA TO_REG(0x38) + +#define APB2OPB_IRQ_STS TO_REG(0x48) +#define APB2OPB_IRQ_STS_OPB1_TX_ACK BIT(17) +#define APB2OPB_IRQ_STS_OPB0_TX_ACK BIT(16) + +#define APB2OPB_OPB0_WRITE_WORD_ENDIAN TO_REG(0x4c) +#define APB2OPB_OPB0_WRITE_WORD_ENDIAN_BE 0x0011101b +#define APB2OPB_OPB0_WRITE_BYTE_ENDIAN TO_REG(0x50) +#define APB2OPB_OPB0_WRITE_BYTE_ENDIAN_BE 0x0c330f3f +#define APB2OPB_OPB1_WRITE_WORD_ENDIAN TO_REG(0x54) +#define APB2OPB_OPB1_WRITE_BYTE_ENDIAN TO_REG(0x58) +#define APB2OPB_OPB0_READ_BYTE_ENDIAN TO_REG(0x5c) +#define APB2OPB_OPB1_READ_BYTE_ENDIAN TO_REG(0x60) +#define APB2OPB_OPB0_READ_WORD_ENDIAN_BE 0x00030b1b + +#define APB2OPB_OPB0_READ_DATA TO_REG(0x84) +#define APB2OPB_OPB1_READ_DATA TO_REG(0x90) + +/* + * The following magic values came from AST2600 data sheet + * The register values are defined under section "FSI controller" + * as initial values. + */ +static const uint32_t aspeed_apb2opb_reset[ASPEED_APB2OPB_NR_REGS] = { + [APB2OPB_VERSION] = 0x000000a1, + [APB2OPB_OPB0_WRITE_WORD_ENDIAN] = 0x0044eee4, + [APB2OPB_OPB0_WRITE_BYTE_ENDIAN] = 0x0055aaff, + [APB2OPB_OPB1_WRITE_WORD_ENDIAN] = 0x00117717, + [APB2OPB_OPB1_WRITE_BYTE_ENDIAN] = 0xffaa5500, + [APB2OPB_OPB0_READ_BYTE_ENDIAN] = 0x0044eee4, + [APB2OPB_OPB1_READ_BYTE_ENDIAN] = 0x00117717 +}; + +static void fsi_opb_fsi_master_address(FSIMasterState *fsi, hwaddr addr) +{ + memory_region_transaction_begin(); + memory_region_set_address(&fsi->iomem, addr); + memory_region_transaction_commit(); +} + +static void fsi_opb_opb2fsi_address(FSIMasterState *fsi, hwaddr addr) +{ + memory_region_transaction_begin(); + memory_region_set_address(&fsi->opb2fsi, addr); + memory_region_transaction_commit(); +} + +static uint64_t fsi_aspeed_apb2opb_read(void *opaque, hwaddr addr, + unsigned size) +{ + AspeedAPB2OPBState *s = ASPEED_APB2OPB(opaque); + unsigned int reg = TO_REG(addr); + + trace_fsi_aspeed_apb2opb_read(addr, size); + + if (reg >= ASPEED_APB2OPB_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out of bounds read: 0x%"HWADDR_PRIx" for %u\n", + __func__, addr, size); + return 0; + } + + return s->regs[reg]; +} + +static MemTxResult fsi_aspeed_apb2opb_rw(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, uint32_t *data, + uint32_t size, bool is_write) +{ + MemTxResult res; + + if (is_write) { + switch (size) { + case 4: + address_space_stl_le(as, addr, *data, attrs, &res); + break; + case 2: + address_space_stw_le(as, addr, *data, attrs, &res); + break; + case 1: + address_space_stb(as, addr, *data, attrs, &res); + break; + default: + g_assert_not_reached(); + } + } else { + switch (size) { + case 4: + *data = address_space_ldl_le(as, addr, attrs, &res); + break; + case 2: + *data = address_space_lduw_le(as, addr, attrs, &res); + break; + case 1: + *data = address_space_ldub(as, addr, attrs, &res); + break; + default: + g_assert_not_reached(); + } + } + return res; +} + +static void fsi_aspeed_apb2opb_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + AspeedAPB2OPBState *s = ASPEED_APB2OPB(opaque); + unsigned int reg = TO_REG(addr); + + trace_fsi_aspeed_apb2opb_write(addr, size, data); + + if (reg >= ASPEED_APB2OPB_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out of bounds write: %"HWADDR_PRIx" for %u\n", + __func__, addr, size); + return; + } + + switch (reg) { + case APB2OPB_CONTROL: + fsi_opb_fsi_master_address(&s->fsi[0], + data & APB2OPB_CONTROL_OFF); + break; + case APB2OPB_OPB2FSI: + fsi_opb_opb2fsi_address(&s->fsi[0], + data & APB2OPB_OPB2FSI_OFF); + break; + case APB2OPB_OPB0_WRITE_WORD_ENDIAN: + if (data != APB2OPB_OPB0_WRITE_WORD_ENDIAN_BE) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bridge needs to be driven as BE (0x%x)\n", + __func__, APB2OPB_OPB0_WRITE_WORD_ENDIAN_BE); + } + break; + case APB2OPB_OPB0_WRITE_BYTE_ENDIAN: + if (data != APB2OPB_OPB0_WRITE_BYTE_ENDIAN_BE) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bridge needs to be driven as BE (0x%x)\n", + __func__, APB2OPB_OPB0_WRITE_BYTE_ENDIAN_BE); + } + break; + case APB2OPB_OPB0_READ_BYTE_ENDIAN: + if (data != APB2OPB_OPB0_READ_WORD_ENDIAN_BE) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bridge needs to be driven as BE (0x%x)\n", + __func__, APB2OPB_OPB0_READ_WORD_ENDIAN_BE); + } + break; + case APB2OPB_TRIGGER: + { + uint32_t opb, op_mode, op_size, op_addr, op_data; + MemTxResult result; + bool is_write; + int index; + AddressSpace *as; + + assert((s->regs[APB2OPB_OPB0_SEL] & APB2OPB_OPB_SEL_EN) ^ + (s->regs[APB2OPB_OPB1_SEL] & APB2OPB_OPB_SEL_EN)); + + if (s->regs[APB2OPB_OPB0_SEL] & APB2OPB_OPB_SEL_EN) { + opb = 0; + op_mode = s->regs[APB2OPB_OPB0_MODE]; + op_size = s->regs[APB2OPB_OPB0_XFER]; + op_addr = s->regs[APB2OPB_OPB0_ADDR]; + op_data = s->regs[APB2OPB_OPB0_WRITE_DATA]; + } else if (s->regs[APB2OPB_OPB1_SEL] & APB2OPB_OPB_SEL_EN) { + opb = 1; + op_mode = s->regs[APB2OPB_OPB1_MODE]; + op_size = s->regs[APB2OPB_OPB1_XFER]; + op_addr = s->regs[APB2OPB_OPB1_ADDR]; + op_data = s->regs[APB2OPB_OPB1_WRITE_DATA]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid operation: 0x%"HWADDR_PRIx" for %u\n", + __func__, addr, size); + return; + } + + if (op_size & ~(APB2OPB_OPB_XFER_HALF | APB2OPB_OPB_XFER_FULL)) { + qemu_log_mask(LOG_GUEST_ERROR, + "OPB transaction failed: Unrecognized access width: %d\n", + op_size); + return; + } + + op_size += 1; + is_write = !(op_mode & APB2OPB_OPB_MODE_RD); + index = opb ? APB2OPB_OPB1_READ_DATA : APB2OPB_OPB0_READ_DATA; + as = &s->opb[opb].as; + + result = fsi_aspeed_apb2opb_rw(as, op_addr, MEMTXATTRS_UNSPECIFIED, + &op_data, op_size, is_write); + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: OPB %s failed @%08x\n", + __func__, is_write ? "write" : "read", op_addr); + return; + } + + if (!is_write) { + s->regs[index] = op_data; + } + + s->regs[APB2OPB_IRQ_STS] |= opb ? APB2OPB_IRQ_STS_OPB1_TX_ACK + : APB2OPB_IRQ_STS_OPB0_TX_ACK; + break; + } + } + + s->regs[reg] = data; +} + +static const struct MemoryRegionOps aspeed_apb2opb_ops = { + .read = fsi_aspeed_apb2opb_read, + .write = fsi_aspeed_apb2opb_write, + .valid.max_access_size = 4, + .valid.min_access_size = 4, + .impl.max_access_size = 4, + .impl.min_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void fsi_aspeed_apb2opb_init(Object *o) +{ + AspeedAPB2OPBState *s = ASPEED_APB2OPB(o); + int i; + + for (i = 0; i < ASPEED_FSI_NUM; i++) { + object_initialize_child(o, "fsi-master[*]", &s->fsi[i], + TYPE_FSI_MASTER); + } +} + +static void fsi_aspeed_apb2opb_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedAPB2OPBState *s = ASPEED_APB2OPB(dev); + int i; + + /* + * TODO: The OPBus model initializes the OPB address space in + * the .instance_init handler and this is problematic for test + * device-introspect-test. To avoid a memory corruption and a QEMU + * crash, qbus_init() should be called from realize(). Something to + * improve. Possibly, OPBus could also be removed. + */ + for (i = 0; i < ASPEED_FSI_NUM; i++) { + qbus_init(&s->opb[i], sizeof(s->opb[i]), TYPE_OP_BUS, DEVICE(s), + NULL); + } + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_apb2opb_ops, s, + TYPE_ASPEED_APB2OPB, 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + + for (i = 0; i < ASPEED_FSI_NUM; i++) { + if (!qdev_realize(DEVICE(&s->fsi[i]), BUS(&s->opb[i]), errp)) { + return; + } + + memory_region_add_subregion(&s->opb[i].mr, 0x80000000, + &s->fsi[i].iomem); + + memory_region_add_subregion(&s->opb[i].mr, 0xa0000000, + &s->fsi[i].opb2fsi); + } +} + +static void fsi_aspeed_apb2opb_reset(DeviceState *dev) +{ + AspeedAPB2OPBState *s = ASPEED_APB2OPB(dev); + + memcpy(s->regs, aspeed_apb2opb_reset, ASPEED_APB2OPB_NR_REGS); +} + +static void fsi_aspeed_apb2opb_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "ASPEED APB2OPB Bridge"; + dc->realize = fsi_aspeed_apb2opb_realize; + dc->reset = fsi_aspeed_apb2opb_reset; +} + +static const TypeInfo aspeed_apb2opb_info = { + .name = TYPE_ASPEED_APB2OPB, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = fsi_aspeed_apb2opb_init, + .instance_size = sizeof(AspeedAPB2OPBState), + .class_init = fsi_aspeed_apb2opb_class_init, +}; + +static void aspeed_apb2opb_register_types(void) +{ + type_register_static(&aspeed_apb2opb_info); +} + +type_init(aspeed_apb2opb_register_types); + +static void fsi_opb_init(Object *o) +{ + OPBus *opb = OP_BUS(o); + + memory_region_init(&opb->mr, 0, TYPE_FSI_OPB, UINT32_MAX); + address_space_init(&opb->as, &opb->mr, TYPE_FSI_OPB); +} + +static const TypeInfo opb_info = { + .name = TYPE_OP_BUS, + .parent = TYPE_BUS, + .instance_init = fsi_opb_init, + .instance_size = sizeof(OPBus), +}; + +static void fsi_opb_register_types(void) +{ + type_register_static(&opb_info); +} + +type_init(fsi_opb_register_types); diff --git a/hw/fsi/cfam.c b/hw/fsi/cfam.c new file mode 100644 index 00000000000..c62f0f78dee --- /dev/null +++ b/hw/fsi/cfam.c @@ -0,0 +1,168 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 IBM Corp. + * + * IBM Common FRU Access Macro + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" + +#include "qapi/error.h" +#include "trace.h" + +#include "hw/fsi/cfam.h" +#include "hw/fsi/fsi.h" + +#include "hw/qdev-properties.h" + +#define ENGINE_CONFIG_NEXT BIT(31) +#define ENGINE_CONFIG_TYPE_PEEK (0x02 << 4) +#define ENGINE_CONFIG_TYPE_FSI (0x03 << 4) +#define ENGINE_CONFIG_TYPE_SCRATCHPAD (0x06 << 4) + +/* Valid, slots, version, type, crc */ +#define CFAM_CONFIG_REG(__VER, __TYPE, __CRC) \ + (ENGINE_CONFIG_NEXT | \ + 0x00010000 | \ + (__VER) | \ + (__TYPE) | \ + (__CRC)) + +#define TO_REG(x) ((x) >> 2) + +#define CFAM_CONFIG_CHIP_ID TO_REG(0x00) +#define CFAM_CONFIG_PEEK_STATUS TO_REG(0x04) +#define CFAM_CONFIG_CHIP_ID_P9 0xc0022d15 +#define CFAM_CONFIG_CHIP_ID_BREAK 0xc0de0000 + +static uint64_t fsi_cfam_config_read(void *opaque, hwaddr addr, unsigned size) +{ + trace_fsi_cfam_config_read(addr, size); + + switch (addr) { + case 0x00: + return CFAM_CONFIG_CHIP_ID_P9; + case 0x04: + return CFAM_CONFIG_REG(0x1000, ENGINE_CONFIG_TYPE_PEEK, 0xc); + case 0x08: + return CFAM_CONFIG_REG(0x5000, ENGINE_CONFIG_TYPE_FSI, 0xa); + case 0xc: + return CFAM_CONFIG_REG(0x1000, ENGINE_CONFIG_TYPE_SCRATCHPAD, 0x7); + default: + /* + * The config table contains different engines from 0xc onwards. + * The scratch pad is already added at address 0xc. We need to add + * future engines from address 0x10 onwards. Returning 0 as engine + * is not implemented. + */ + return 0; + } +} + +static void fsi_cfam_config_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + FSICFAMState *cfam = FSI_CFAM(opaque); + + trace_fsi_cfam_config_write(addr, size, data); + + switch (TO_REG(addr)) { + case CFAM_CONFIG_CHIP_ID: + case CFAM_CONFIG_PEEK_STATUS: + if (data == CFAM_CONFIG_CHIP_ID_BREAK) { + bus_cold_reset(BUS(&cfam->lbus)); + } + break; + default: + trace_fsi_cfam_config_write_noaddr(addr, size, data); + } +} + +static const struct MemoryRegionOps cfam_config_ops = { + .read = fsi_cfam_config_read, + .write = fsi_cfam_config_write, + .valid.max_access_size = 4, + .valid.min_access_size = 4, + .impl.max_access_size = 4, + .impl.min_access_size = 4, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static uint64_t fsi_cfam_unimplemented_read(void *opaque, hwaddr addr, + unsigned size) +{ + trace_fsi_cfam_unimplemented_read(addr, size); + + return 0; +} + +static void fsi_cfam_unimplemented_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + trace_fsi_cfam_unimplemented_write(addr, size, data); +} + +static const struct MemoryRegionOps fsi_cfam_unimplemented_ops = { + .read = fsi_cfam_unimplemented_read, + .write = fsi_cfam_unimplemented_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void fsi_cfam_instance_init(Object *obj) +{ + FSICFAMState *s = FSI_CFAM(obj); + + object_initialize_child(obj, "scratchpad", &s->scratchpad, + TYPE_FSI_SCRATCHPAD); +} + +static void fsi_cfam_realize(DeviceState *dev, Error **errp) +{ + FSICFAMState *cfam = FSI_CFAM(dev); + FSISlaveState *slave = FSI_SLAVE(dev); + + /* Each slave has a 2MiB address space */ + memory_region_init_io(&cfam->mr, OBJECT(cfam), &fsi_cfam_unimplemented_ops, + cfam, TYPE_FSI_CFAM, 2 * MiB); + + qbus_init(&cfam->lbus, sizeof(cfam->lbus), TYPE_FSI_LBUS, DEVICE(cfam), + NULL); + + memory_region_init_io(&cfam->config_iomem, OBJECT(cfam), &cfam_config_ops, + cfam, TYPE_FSI_CFAM ".config", 0x400); + + memory_region_add_subregion(&cfam->mr, 0, &cfam->config_iomem); + memory_region_add_subregion(&cfam->mr, 0x800, &slave->iomem); + memory_region_add_subregion(&cfam->mr, 0xc00, &cfam->lbus.mr); + + /* Add scratchpad engine */ + if (!qdev_realize(DEVICE(&cfam->scratchpad), BUS(&cfam->lbus), errp)) { + return; + } + + FSILBusDevice *fsi_dev = FSI_LBUS_DEVICE(&cfam->scratchpad); + memory_region_add_subregion(&cfam->lbus.mr, 0, &fsi_dev->iomem); +} + +static void fsi_cfam_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->bus_type = TYPE_FSI_BUS; + dc->realize = fsi_cfam_realize; +} + +static const TypeInfo fsi_cfam_info = { + .name = TYPE_FSI_CFAM, + .parent = TYPE_FSI_SLAVE, + .instance_init = fsi_cfam_instance_init, + .instance_size = sizeof(FSICFAMState), + .class_init = fsi_cfam_class_init, +}; + +static void fsi_cfam_register_types(void) +{ + type_register_static(&fsi_cfam_info); +} + +type_init(fsi_cfam_register_types); diff --git a/hw/fsi/fsi-master.c b/hw/fsi/fsi-master.c new file mode 100644 index 00000000000..a5f0598c98e --- /dev/null +++ b/hw/fsi/fsi-master.c @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 IBM Corp. + * + * IBM Flexible Service Interface master + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "trace.h" + +#include "hw/fsi/fsi-master.h" + +#define TYPE_OP_BUS "opb" + +#define TO_REG(x) ((x) >> 2) + +#define FSI_MENP0 TO_REG(0x010) +#define FSI_MENP32 TO_REG(0x014) +#define FSI_MSENP0 TO_REG(0x018) +#define FSI_MLEVP0 TO_REG(0x018) +#define FSI_MSENP32 TO_REG(0x01c) +#define FSI_MLEVP32 TO_REG(0x01c) +#define FSI_MCENP0 TO_REG(0x020) +#define FSI_MREFP0 TO_REG(0x020) +#define FSI_MCENP32 TO_REG(0x024) +#define FSI_MREFP32 TO_REG(0x024) + +#define FSI_MVER TO_REG(0x074) +#define FSI_MRESP0 TO_REG(0x0d0) + +#define FSI_MRESB0 TO_REG(0x1d0) +#define FSI_MRESB0_RESET_GENERAL BIT(31) +#define FSI_MRESB0_RESET_ERROR BIT(30) + +static uint64_t fsi_master_read(void *opaque, hwaddr addr, unsigned size) +{ + FSIMasterState *s = FSI_MASTER(opaque); + int reg = TO_REG(addr); + + trace_fsi_master_read(addr, size); + + if (reg >= FSI_MASTER_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out of bounds read: 0x%"HWADDR_PRIx" for %u\n", + __func__, addr, size); + return 0; + } + + return s->regs[reg]; +} + +static void fsi_master_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + FSIMasterState *s = FSI_MASTER(opaque); + int reg = TO_REG(addr); + + trace_fsi_master_write(addr, size, data); + + if (reg >= FSI_MASTER_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out of bounds write: %"HWADDR_PRIx" for %u\n", + __func__, addr, size); + return; + } + + switch (reg) { + case FSI_MENP0: + s->regs[FSI_MENP0] = data; + break; + case FSI_MENP32: + s->regs[FSI_MENP32] = data; + break; + case FSI_MSENP0: + s->regs[FSI_MENP0] |= data; + break; + case FSI_MSENP32: + s->regs[FSI_MENP32] |= data; + break; + case FSI_MCENP0: + s->regs[FSI_MENP0] &= ~data; + break; + case FSI_MCENP32: + s->regs[FSI_MENP32] &= ~data; + break; + case FSI_MRESP0: + /* Perform necessary resets leave register 0 to indicate no errors */ + break; + case FSI_MRESB0: + if (data & FSI_MRESB0_RESET_GENERAL) { + device_cold_reset(DEVICE(opaque)); + } + if (data & FSI_MRESB0_RESET_ERROR) { + /* FIXME: this seems dubious */ + device_cold_reset(DEVICE(opaque)); + } + break; + default: + s->regs[reg] = data; + } +} + +static const struct MemoryRegionOps fsi_master_ops = { + .read = fsi_master_read, + .write = fsi_master_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void fsi_master_init(Object *o) +{ + FSIMasterState *s = FSI_MASTER(o); + + object_initialize_child(o, "cfam", &s->cfam, TYPE_FSI_CFAM); + + qbus_init(&s->bus, sizeof(s->bus), TYPE_FSI_BUS, DEVICE(s), NULL); + + memory_region_init_io(&s->iomem, OBJECT(s), &fsi_master_ops, s, + TYPE_FSI_MASTER, 0x10000000); + memory_region_init(&s->opb2fsi, OBJECT(s), "fsi.opb2fsi", 0x10000000); +} + +static void fsi_master_realize(DeviceState *dev, Error **errp) +{ + FSIMasterState *s = FSI_MASTER(dev); + + if (!qdev_realize(DEVICE(&s->cfam), BUS(&s->bus), errp)) { + return; + } + + /* address ? */ + memory_region_add_subregion(&s->opb2fsi, 0, &s->cfam.mr); +} + +static void fsi_master_reset(DeviceState *dev) +{ + FSIMasterState *s = FSI_MASTER(dev); + + /* Initialize registers */ + memset(s->regs, 0, sizeof(s->regs)); + + /* ASPEED default */ + s->regs[FSI_MVER] = 0xe0050101; +} + +static void fsi_master_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->bus_type = TYPE_OP_BUS; + dc->desc = "FSI Master"; + dc->realize = fsi_master_realize; + dc->reset = fsi_master_reset; +} + +static const TypeInfo fsi_master_info = { + .name = TYPE_FSI_MASTER, + .parent = TYPE_DEVICE, + .instance_init = fsi_master_init, + .instance_size = sizeof(FSIMasterState), + .class_init = fsi_master_class_init, +}; + +static void fsi_register_types(void) +{ + type_register_static(&fsi_master_info); +} + +type_init(fsi_register_types); diff --git a/hw/fsi/fsi.c b/hw/fsi/fsi.c new file mode 100644 index 00000000000..9a5f4e616f1 --- /dev/null +++ b/hw/fsi/fsi.c @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 IBM Corp. + * + * IBM Flexible Service Interface + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "trace.h" + +#include "hw/fsi/fsi.h" + +#define TO_REG(x) ((x) >> 2) + +static const TypeInfo fsi_bus_info = { + .name = TYPE_FSI_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(FSIBus), +}; + +static uint64_t fsi_slave_read(void *opaque, hwaddr addr, unsigned size) +{ + FSISlaveState *s = FSI_SLAVE(opaque); + int reg = TO_REG(addr); + + trace_fsi_slave_read(addr, size); + + if (reg >= FSI_SLAVE_CONTROL_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out of bounds read: 0x%"HWADDR_PRIx" for %u\n", + __func__, addr, size); + return 0; + } + + return s->regs[reg]; +} + +static void fsi_slave_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + FSISlaveState *s = FSI_SLAVE(opaque); + int reg = TO_REG(addr); + + trace_fsi_slave_write(addr, size, data); + + if (reg >= FSI_SLAVE_CONTROL_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out of bounds write: 0x%"HWADDR_PRIx" for %u\n", + __func__, addr, size); + return; + } + + s->regs[reg] = data; +} + +static const struct MemoryRegionOps fsi_slave_ops = { + .read = fsi_slave_read, + .write = fsi_slave_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void fsi_slave_reset(DeviceState *dev) +{ + FSISlaveState *s = FSI_SLAVE(dev); + + /* Initialize registers */ + memset(s->regs, 0, sizeof(s->regs)); +} + +static void fsi_slave_init(Object *o) +{ + FSISlaveState *s = FSI_SLAVE(o); + + memory_region_init_io(&s->iomem, OBJECT(s), &fsi_slave_ops, + s, TYPE_FSI_SLAVE, 0x400); +} + +static void fsi_slave_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->bus_type = TYPE_FSI_BUS; + dc->desc = "FSI Slave"; + dc->reset = fsi_slave_reset; +} + +static const TypeInfo fsi_slave_info = { + .name = TYPE_FSI_SLAVE, + .parent = TYPE_DEVICE, + .instance_init = fsi_slave_init, + .instance_size = sizeof(FSISlaveState), + .class_init = fsi_slave_class_init, +}; + +static void fsi_register_types(void) +{ + type_register_static(&fsi_bus_info); + type_register_static(&fsi_slave_info); +} + +type_init(fsi_register_types); diff --git a/hw/fsi/lbus.c b/hw/fsi/lbus.c new file mode 100644 index 00000000000..20495f42fd9 --- /dev/null +++ b/hw/fsi/lbus.c @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 IBM Corp. + * + * IBM Local bus where FSI slaves are connected + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/fsi/lbus.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "trace.h" + +#define TO_REG(offset) ((offset) >> 2) + +static void fsi_lbus_init(Object *o) +{ + FSILBus *lbus = FSI_LBUS(o); + + memory_region_init(&lbus->mr, OBJECT(lbus), TYPE_FSI_LBUS, 1 * MiB); +} + +static const TypeInfo fsi_lbus_info = { + .name = TYPE_FSI_LBUS, + .parent = TYPE_BUS, + .instance_init = fsi_lbus_init, + .instance_size = sizeof(FSILBus), +}; + +static const TypeInfo fsi_lbus_device_type_info = { + .name = TYPE_FSI_LBUS_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(FSILBusDevice), + .abstract = true, +}; + +static uint64_t fsi_scratchpad_read(void *opaque, hwaddr addr, unsigned size) +{ + FSIScratchPad *s = SCRATCHPAD(opaque); + int reg = TO_REG(addr); + + trace_fsi_scratchpad_read(addr, size); + + if (reg >= FSI_SCRATCHPAD_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, addr); + return 0; + } + + return s->regs[reg]; +} + +static void fsi_scratchpad_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + FSIScratchPad *s = SCRATCHPAD(opaque); + + trace_fsi_scratchpad_write(addr, size, data); + int reg = TO_REG(addr); + + if (reg >= FSI_SCRATCHPAD_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, addr); + return; + } + + s->regs[reg] = data; +} + +static const struct MemoryRegionOps scratchpad_ops = { + .read = fsi_scratchpad_read, + .write = fsi_scratchpad_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void fsi_scratchpad_realize(DeviceState *dev, Error **errp) +{ + FSILBusDevice *ldev = FSI_LBUS_DEVICE(dev); + + memory_region_init_io(&ldev->iomem, OBJECT(ldev), &scratchpad_ops, + ldev, TYPE_FSI_SCRATCHPAD, 0x400); +} + +static void fsi_scratchpad_reset(DeviceState *dev) +{ + FSIScratchPad *s = SCRATCHPAD(dev); + + memset(s->regs, 0, sizeof(s->regs)); +} + +static void fsi_scratchpad_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->bus_type = TYPE_FSI_LBUS; + dc->realize = fsi_scratchpad_realize; + dc->reset = fsi_scratchpad_reset; +} + +static const TypeInfo fsi_scratchpad_info = { + .name = TYPE_FSI_SCRATCHPAD, + .parent = TYPE_FSI_LBUS_DEVICE, + .instance_size = sizeof(FSIScratchPad), + .class_init = fsi_scratchpad_class_init, +}; + +static void fsi_lbus_register_types(void) +{ + type_register_static(&fsi_lbus_info); + type_register_static(&fsi_lbus_device_type_info); + type_register_static(&fsi_scratchpad_info); +} + +type_init(fsi_lbus_register_types); diff --git a/hw/fsi/meson.build b/hw/fsi/meson.build new file mode 100644 index 00000000000..a18a076552f --- /dev/null +++ b/hw/fsi/meson.build @@ -0,0 +1,2 @@ +system_ss.add(when: 'CONFIG_FSI', if_true: files('lbus.c','fsi.c','cfam.c','fsi-master.c')) +system_ss.add(when: 'CONFIG_FSI_APB2OPB_ASPEED', if_true: files('aspeed_apb2opb.c')) diff --git a/hw/fsi/trace-events b/hw/fsi/trace-events new file mode 100644 index 00000000000..9e286d08d3c --- /dev/null +++ b/hw/fsi/trace-events @@ -0,0 +1,13 @@ +fsi_scratchpad_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size=%d" +fsi_scratchpad_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 +fsi_slave_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size=%d" +fsi_slave_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 +fsi_cfam_config_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size=%d" +fsi_cfam_config_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 +fsi_cfam_unimplemented_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size=%d" +fsi_cfam_unimplemented_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 +fsi_cfam_config_write_noaddr(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 +fsi_master_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size=%d" +fsi_master_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 +fsi_aspeed_apb2opb_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size=%d" +fsi_aspeed_apb2opb_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 diff --git a/hw/fsi/trace.h b/hw/fsi/trace.h new file mode 100644 index 00000000000..ee67c7fb04d --- /dev/null +++ b/hw/fsi/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_fsi.h" diff --git a/hw/gpio/Kconfig b/hw/gpio/Kconfig index d2cf3accc88..19c97cc823f 100644 --- a/hw/gpio/Kconfig +++ b/hw/gpio/Kconfig @@ -16,3 +16,10 @@ config GPIO_PWR config SIFIVE_GPIO bool + +config STM32L4X5_GPIO + bool + +config PCF8574 + bool + depends on I2C diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c index 1e267dd4820..c1781e2ba36 100644 --- a/hw/gpio/aspeed_gpio.c +++ b/hw/gpio/aspeed_gpio.c @@ -1067,7 +1067,7 @@ static const VMStateDescription vmstate_gpio_regs = { .name = TYPE_ASPEED_GPIO"/regs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(data_value, GPIOSets), VMSTATE_UINT32(data_read, GPIOSets), VMSTATE_UINT32(direction, GPIOSets), @@ -1090,7 +1090,7 @@ static const VMStateDescription vmstate_aspeed_gpio = { .name = TYPE_ASPEED_GPIO, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(sets, AspeedGPIOState, ASPEED_GPIO_MAX_NR_SETS, 1, vmstate_gpio_regs, GPIOSets), VMSTATE_UINT32_ARRAY(debounce_regs, AspeedGPIOState, diff --git a/hw/gpio/bcm2835_gpio.c b/hw/gpio/bcm2835_gpio.c index c995bba1d9f..6bd50bb0b69 100644 --- a/hw/gpio/bcm2835_gpio.c +++ b/hw/gpio/bcm2835_gpio.c @@ -284,7 +284,7 @@ static const VMStateDescription vmstate_bcm2835_gpio = { .name = "bcm2835_gpio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(fsel, BCM2835GpioState, 54), VMSTATE_UINT32(lev0, BCM2835GpioState), VMSTATE_UINT32(lev1, BCM2835GpioState), diff --git a/hw/gpio/bcm2838_gpio.c b/hw/gpio/bcm2838_gpio.c new file mode 100644 index 00000000000..2ddf62f6959 --- /dev/null +++ b/hw/gpio/bcm2838_gpio.c @@ -0,0 +1,390 @@ +/* + * Raspberry Pi (BCM2838) GPIO Controller + * This implementation is based on bcm2835_gpio (hw/gpio/bcm2835_gpio.c) + * + * Copyright (c) 2022 Auriga LLC + * + * Authors: + * Lotosh, Aleksey + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/sd/sd.h" +#include "hw/gpio/bcm2838_gpio.h" +#include "hw/irq.h" + +#define GPFSEL0 0x00 +#define GPFSEL1 0x04 +#define GPFSEL2 0x08 +#define GPFSEL3 0x0C +#define GPFSEL4 0x10 +#define GPFSEL5 0x14 +#define GPSET0 0x1C +#define GPSET1 0x20 +#define GPCLR0 0x28 +#define GPCLR1 0x2C +#define GPLEV0 0x34 +#define GPLEV1 0x38 +#define GPEDS0 0x40 +#define GPEDS1 0x44 +#define GPREN0 0x4C +#define GPREN1 0x50 +#define GPFEN0 0x58 +#define GPFEN1 0x5C +#define GPHEN0 0x64 +#define GPHEN1 0x68 +#define GPLEN0 0x70 +#define GPLEN1 0x74 +#define GPAREN0 0x7C +#define GPAREN1 0x80 +#define GPAFEN0 0x88 +#define GPAFEN1 0x8C + +#define GPIO_PUP_PDN_CNTRL_REG0 0xE4 +#define GPIO_PUP_PDN_CNTRL_REG1 0xE8 +#define GPIO_PUP_PDN_CNTRL_REG2 0xEC +#define GPIO_PUP_PDN_CNTRL_REG3 0xF0 + +#define RESET_VAL_CNTRL_REG0 0xAAA95555 +#define RESET_VAL_CNTRL_REG1 0xA0AAAAAA +#define RESET_VAL_CNTRL_REG2 0x50AAA95A +#define RESET_VAL_CNTRL_REG3 0x00055555 + +#define NUM_FSELN_IN_GPFSELN 10 +#define NUM_BITS_FSELN 3 +#define MASK_FSELN 0x7 + +#define BYTES_IN_WORD 4 + +/* bcm,function property */ +#define BCM2838_FSEL_GPIO_IN 0 +#define BCM2838_FSEL_GPIO_OUT 1 +#define BCM2838_FSEL_ALT5 2 +#define BCM2838_FSEL_ALT4 3 +#define BCM2838_FSEL_ALT0 4 +#define BCM2838_FSEL_ALT1 5 +#define BCM2838_FSEL_ALT2 6 +#define BCM2838_FSEL_ALT3 7 + +static uint32_t gpfsel_get(BCM2838GpioState *s, uint8_t reg) +{ + int i; + uint32_t value = 0; + for (i = 0; i < NUM_FSELN_IN_GPFSELN; i++) { + uint32_t index = NUM_FSELN_IN_GPFSELN * reg + i; + if (index < sizeof(s->fsel)) { + value |= (s->fsel[index] & MASK_FSELN) << (NUM_BITS_FSELN * i); + } + } + return value; +} + +static void gpfsel_set(BCM2838GpioState *s, uint8_t reg, uint32_t value) +{ + int i; + for (i = 0; i < NUM_FSELN_IN_GPFSELN; i++) { + uint32_t index = NUM_FSELN_IN_GPFSELN * reg + i; + if (index < sizeof(s->fsel)) { + int fsel = (value >> (NUM_BITS_FSELN * i)) & MASK_FSELN; + s->fsel[index] = fsel; + } + } + + /* SD controller selection (48-53) */ + if (s->sd_fsel != BCM2838_FSEL_GPIO_IN + && (s->fsel[48] == BCM2838_FSEL_GPIO_IN) + && (s->fsel[49] == BCM2838_FSEL_GPIO_IN) + && (s->fsel[50] == BCM2838_FSEL_GPIO_IN) + && (s->fsel[51] == BCM2838_FSEL_GPIO_IN) + && (s->fsel[52] == BCM2838_FSEL_GPIO_IN) + && (s->fsel[53] == BCM2838_FSEL_GPIO_IN) + ) { + /* SDHCI controller selected */ + sdbus_reparent_card(s->sdbus_sdhost, s->sdbus_sdhci); + s->sd_fsel = BCM2838_FSEL_GPIO_IN; + } else if (s->sd_fsel != BCM2838_FSEL_ALT0 + && (s->fsel[48] == BCM2838_FSEL_ALT0) /* SD_CLK_R */ + && (s->fsel[49] == BCM2838_FSEL_ALT0) /* SD_CMD_R */ + && (s->fsel[50] == BCM2838_FSEL_ALT0) /* SD_DATA0_R */ + && (s->fsel[51] == BCM2838_FSEL_ALT0) /* SD_DATA1_R */ + && (s->fsel[52] == BCM2838_FSEL_ALT0) /* SD_DATA2_R */ + && (s->fsel[53] == BCM2838_FSEL_ALT0) /* SD_DATA3_R */ + ) { + /* SDHost controller selected */ + sdbus_reparent_card(s->sdbus_sdhci, s->sdbus_sdhost); + s->sd_fsel = BCM2838_FSEL_ALT0; + } +} + +static int gpfsel_is_out(BCM2838GpioState *s, int index) +{ + if (index >= 0 && index < BCM2838_GPIO_NUM) { + return s->fsel[index] == 1; + } + return 0; +} + +static void gpset(BCM2838GpioState *s, uint32_t val, uint8_t start, + uint8_t count, uint32_t *lev) +{ + uint32_t changes = val & ~*lev; + uint32_t cur = 1; + + int i; + for (i = 0; i < count; i++) { + if ((changes & cur) && (gpfsel_is_out(s, start + i))) { + qemu_set_irq(s->out[start + i], 1); + } + cur <<= 1; + } + + *lev |= val; +} + +static void gpclr(BCM2838GpioState *s, uint32_t val, uint8_t start, + uint8_t count, uint32_t *lev) +{ + uint32_t changes = val & *lev; + uint32_t cur = 1; + + int i; + for (i = 0; i < count; i++) { + if ((changes & cur) && (gpfsel_is_out(s, start + i))) { + qemu_set_irq(s->out[start + i], 0); + } + cur <<= 1; + } + + *lev &= ~val; +} + +static uint64_t bcm2838_gpio_read(void *opaque, hwaddr offset, unsigned size) +{ + BCM2838GpioState *s = (BCM2838GpioState *)opaque; + uint64_t value = 0; + + switch (offset) { + case GPFSEL0: + case GPFSEL1: + case GPFSEL2: + case GPFSEL3: + case GPFSEL4: + case GPFSEL5: + value = gpfsel_get(s, offset / BYTES_IN_WORD); + break; + case GPSET0: + case GPSET1: + case GPCLR0: + case GPCLR1: + /* Write Only */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: %s: Attempt reading from write only" + " register. 0x%"PRIx64" will be returned." + " Address 0x%"HWADDR_PRIx", size %u\n", + TYPE_BCM2838_GPIO, __func__, value, offset, size); + break; + case GPLEV0: + value = s->lev0; + break; + case GPLEV1: + value = s->lev1; + break; + case GPEDS0: + case GPEDS1: + case GPREN0: + case GPREN1: + case GPFEN0: + case GPFEN1: + case GPHEN0: + case GPHEN1: + case GPLEN0: + case GPLEN1: + case GPAREN0: + case GPAREN1: + case GPAFEN0: + case GPAFEN1: + /* Not implemented */ + qemu_log_mask(LOG_UNIMP, "%s: %s: not implemented for %"HWADDR_PRIx"\n", + TYPE_BCM2838_GPIO, __func__, offset); + break; + case GPIO_PUP_PDN_CNTRL_REG0: + case GPIO_PUP_PDN_CNTRL_REG1: + case GPIO_PUP_PDN_CNTRL_REG2: + case GPIO_PUP_PDN_CNTRL_REG3: + value = s->pup_cntrl_reg[(offset - GPIO_PUP_PDN_CNTRL_REG0) + / sizeof(s->pup_cntrl_reg[0])]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: %s: bad offset %"HWADDR_PRIx"\n", + TYPE_BCM2838_GPIO, __func__, offset); + break; + } + + return value; +} + +static void bcm2838_gpio_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + BCM2838GpioState *s = (BCM2838GpioState *)opaque; + + switch (offset) { + case GPFSEL0: + case GPFSEL1: + case GPFSEL2: + case GPFSEL3: + case GPFSEL4: + case GPFSEL5: + gpfsel_set(s, offset / BYTES_IN_WORD, value); + break; + case GPSET0: + gpset(s, value, 0, 32, &s->lev0); + break; + case GPSET1: + gpset(s, value, 32, 22, &s->lev1); + break; + case GPCLR0: + gpclr(s, value, 0, 32, &s->lev0); + break; + case GPCLR1: + gpclr(s, value, 32, 22, &s->lev1); + break; + case GPLEV0: + case GPLEV1: + /* Read Only */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: %s: Attempt writing 0x%"PRIx64"" + " to read only register. Ignored." + " Address 0x%"HWADDR_PRIx", size %u\n", + TYPE_BCM2838_GPIO, __func__, value, offset, size); + break; + case GPEDS0: + case GPEDS1: + case GPREN0: + case GPREN1: + case GPFEN0: + case GPFEN1: + case GPHEN0: + case GPHEN1: + case GPLEN0: + case GPLEN1: + case GPAREN0: + case GPAREN1: + case GPAFEN0: + case GPAFEN1: + /* Not implemented */ + qemu_log_mask(LOG_UNIMP, "%s: %s: not implemented for %"HWADDR_PRIx"\n", + TYPE_BCM2838_GPIO, __func__, offset); + break; + case GPIO_PUP_PDN_CNTRL_REG0: + case GPIO_PUP_PDN_CNTRL_REG1: + case GPIO_PUP_PDN_CNTRL_REG2: + case GPIO_PUP_PDN_CNTRL_REG3: + s->pup_cntrl_reg[(offset - GPIO_PUP_PDN_CNTRL_REG0) + / sizeof(s->pup_cntrl_reg[0])] = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: %s: bad offset %"HWADDR_PRIx"\n", + TYPE_BCM2838_GPIO, __func__, offset); + } + return; +} + +static void bcm2838_gpio_reset(DeviceState *dev) +{ + BCM2838GpioState *s = BCM2838_GPIO(dev); + + memset(s->fsel, 0, sizeof(s->fsel)); + + s->sd_fsel = 0; + + /* SDHCI is selected by default */ + sdbus_reparent_card(&s->sdbus, s->sdbus_sdhci); + + s->lev0 = 0; + s->lev1 = 0; + + memset(s->fsel, 0, sizeof(s->fsel)); + + s->pup_cntrl_reg[0] = RESET_VAL_CNTRL_REG0; + s->pup_cntrl_reg[1] = RESET_VAL_CNTRL_REG1; + s->pup_cntrl_reg[2] = RESET_VAL_CNTRL_REG2; + s->pup_cntrl_reg[3] = RESET_VAL_CNTRL_REG3; +} + +static const MemoryRegionOps bcm2838_gpio_ops = { + .read = bcm2838_gpio_read, + .write = bcm2838_gpio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_bcm2838_gpio = { + .name = "bcm2838_gpio", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(fsel, BCM2838GpioState, BCM2838_GPIO_NUM), + VMSTATE_UINT32(lev0, BCM2838GpioState), + VMSTATE_UINT32(lev1, BCM2838GpioState), + VMSTATE_UINT8(sd_fsel, BCM2838GpioState), + VMSTATE_UINT32_ARRAY(pup_cntrl_reg, BCM2838GpioState, + GPIO_PUP_PDN_CNTRL_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2838_gpio_init(Object *obj) +{ + BCM2838GpioState *s = BCM2838_GPIO(obj); + DeviceState *dev = DEVICE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(s), "sd-bus"); + + memory_region_init_io(&s->iomem, obj, &bcm2838_gpio_ops, s, + "bcm2838_gpio", BCM2838_GPIO_REGS_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + qdev_init_gpio_out(dev, s->out, BCM2838_GPIO_NUM); +} + +static void bcm2838_gpio_realize(DeviceState *dev, Error **errp) +{ + BCM2838GpioState *s = BCM2838_GPIO(dev); + Object *obj; + + obj = object_property_get_link(OBJECT(dev), "sdbus-sdhci", &error_abort); + s->sdbus_sdhci = SD_BUS(obj); + + obj = object_property_get_link(OBJECT(dev), "sdbus-sdhost", &error_abort); + s->sdbus_sdhost = SD_BUS(obj); +} + +static void bcm2838_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_bcm2838_gpio; + dc->realize = &bcm2838_gpio_realize; + dc->reset = &bcm2838_gpio_reset; +} + +static const TypeInfo bcm2838_gpio_info = { + .name = TYPE_BCM2838_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2838GpioState), + .instance_init = bcm2838_gpio_init, + .class_init = bcm2838_gpio_class_init, +}; + +static void bcm2838_gpio_register_types(void) +{ + type_register_static(&bcm2838_gpio_info); +} + +type_init(bcm2838_gpio_register_types) diff --git a/hw/gpio/gpio_key.c b/hw/gpio/gpio_key.c index 74f61383562..61bb5870589 100644 --- a/hw/gpio/gpio_key.c +++ b/hw/gpio/gpio_key.c @@ -45,7 +45,7 @@ static const VMStateDescription vmstate_gpio_key = { .name = "gpio-key", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, GPIOKEYState), VMSTATE_END_OF_LIST() } diff --git a/hw/gpio/imx_gpio.c b/hw/gpio/imx_gpio.c index c7f98b7bb15..e53b00d951d 100644 --- a/hw/gpio/imx_gpio.c +++ b/hw/gpio/imx_gpio.c @@ -277,7 +277,7 @@ static const VMStateDescription vmstate_imx_gpio = { .name = TYPE_IMX_GPIO, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(dr, IMXGPIOState), VMSTATE_UINT32(gdir, IMXGPIOState), VMSTATE_UINT32(psr, IMXGPIOState), diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c index 4470cfe9856..86315714fbd 100644 --- a/hw/gpio/max7310.c +++ b/hw/gpio/max7310.c @@ -155,7 +155,7 @@ static const VMStateDescription vmstate_max7310 = { .name = "max7310", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(i2c_command_byte, MAX7310State), VMSTATE_INT32(len, MAX7310State), VMSTATE_UINT8(level, MAX7310State), diff --git a/hw/gpio/meson.build b/hw/gpio/meson.build index 066ea96480f..a7495d196ae 100644 --- a/hw/gpio/meson.build +++ b/hw/gpio/meson.build @@ -2,6 +2,8 @@ system_ss.add(when: 'CONFIG_GPIO_KEY', if_true: files('gpio_key.c')) system_ss.add(when: 'CONFIG_GPIO_MPC8XXX', if_true: files('mpc8xxx.c')) system_ss.add(when: 'CONFIG_GPIO_PWR', if_true: files('gpio_pwr.c')) system_ss.add(when: 'CONFIG_MAX7310', if_true: files('max7310.c')) +system_ss.add(when: 'CONFIG_PCA9552', if_true: files('pca9552.c')) +system_ss.add(when: 'CONFIG_PCA9554', if_true: files('pca9554.c')) system_ss.add(when: 'CONFIG_PL061', if_true: files('pl061.c')) system_ss.add(when: 'CONFIG_ZAURUS', if_true: files('zaurus.c')) @@ -9,6 +11,11 @@ system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpio.c')) system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_gpio.c')) system_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_gpio.c')) system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_gpio.c')) -system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_gpio.c')) +system_ss.add(when: 'CONFIG_RASPI', if_true: files( + 'bcm2835_gpio.c', + 'bcm2838_gpio.c' +)) +system_ss.add(when: 'CONFIG_STM32L4X5_SOC', if_true: files('stm32l4x5_gpio.c')) system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_gpio.c')) system_ss.add(when: 'CONFIG_SIFIVE_GPIO', if_true: files('sifive_gpio.c')) +system_ss.add(when: 'CONFIG_PCF8574', if_true: files('pcf8574.c')) diff --git a/hw/gpio/mpc8xxx.c b/hw/gpio/mpc8xxx.c index cb42acb6da8..0b3f9e516da 100644 --- a/hw/gpio/mpc8xxx.c +++ b/hw/gpio/mpc8xxx.c @@ -48,7 +48,7 @@ static const VMStateDescription vmstate_mpc8xxx_gpio = { .name = "mpc8xxx_gpio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(dir, MPC8XXXGPIOState), VMSTATE_UINT32(odr, MPC8XXXGPIOState), VMSTATE_UINT32(dat, MPC8XXXGPIOState), diff --git a/hw/gpio/npcm7xx_gpio.c b/hw/gpio/npcm7xx_gpio.c index 3376901ab13..6e70ac1f24b 100644 --- a/hw/gpio/npcm7xx_gpio.c +++ b/hw/gpio/npcm7xx_gpio.c @@ -377,7 +377,7 @@ static const VMStateDescription vmstate_npcm7xx_gpio = { .name = "npcm7xx-gpio", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pin_level, NPCM7xxGPIOState), VMSTATE_UINT32(ext_level, NPCM7xxGPIOState), VMSTATE_UINT32(ext_driven, NPCM7xxGPIOState), diff --git a/hw/gpio/nrf51_gpio.c b/hw/gpio/nrf51_gpio.c index 08396c69a4b..ffc7dff7964 100644 --- a/hw/gpio/nrf51_gpio.c +++ b/hw/gpio/nrf51_gpio.c @@ -280,7 +280,7 @@ static const VMStateDescription vmstate_nrf51_gpio = { .name = TYPE_NRF51_GPIO, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(out, NRF51GPIOState), VMSTATE_UINT32(in, NRF51GPIOState), VMSTATE_UINT32(in_mask, NRF51GPIOState), diff --git a/hw/misc/pca9552.c b/hw/gpio/pca9552.c similarity index 85% rename from hw/misc/pca9552.c rename to hw/gpio/pca9552.c index fff19e369a3..27d4db06809 100644 --- a/hw/misc/pca9552.c +++ b/hw/gpio/pca9552.c @@ -15,8 +15,8 @@ #include "qemu/module.h" #include "qemu/bitops.h" #include "hw/qdev-properties.h" -#include "hw/misc/pca9552.h" -#include "hw/misc/pca9552_regs.h" +#include "hw/gpio/pca9552.h" +#include "hw/gpio/pca9552_regs.h" #include "hw/irq.h" #include "migration/vmstate.h" #include "qapi/error.h" @@ -36,11 +36,16 @@ typedef struct PCA955xClass PCA955xClass; DECLARE_CLASS_CHECKERS(PCA955xClass, PCA955X, TYPE_PCA955X) - +/* + * Note: The LED_ON and LED_OFF configuration values for the PCA955X + * chips are the reverse of the PCA953X family of chips. + */ #define PCA9552_LED_ON 0x0 #define PCA9552_LED_OFF 0x1 #define PCA9552_LED_PWM0 0x2 #define PCA9552_LED_PWM1 0x3 +#define PCA9552_PIN_LOW 0x0 +#define PCA9552_PIN_HIZ 0x1 static const char *led_state[] = {"on", "off", "pwm0", "pwm1"}; @@ -107,17 +112,27 @@ static void pca955x_update_pin_input(PCA955xState *s) for (i = 0; i < k->pin_count; i++) { uint8_t input_reg = PCA9552_INPUT0 + (i / 8); - uint8_t input_shift = (i % 8); + uint8_t bit_mask = 1 << (i % 8); uint8_t config = pca955x_pin_get_config(s, i); + uint8_t old_value = s->regs[input_reg] & bit_mask; + uint8_t new_value; switch (config) { case PCA9552_LED_ON: - qemu_set_irq(s->gpio[i], 1); - s->regs[input_reg] |= 1 << input_shift; + /* Pin is set to 0V to turn on LED */ + s->regs[input_reg] &= ~bit_mask; break; case PCA9552_LED_OFF: - qemu_set_irq(s->gpio[i], 0); - s->regs[input_reg] &= ~(1 << input_shift); + /* + * Pin is set to Hi-Z to turn off LED and + * pullup sets it to a logical 1 unless + * external device drives it low. + */ + if (s->ext_state[i] == PCA9552_PIN_LOW) { + s->regs[input_reg] &= ~bit_mask; + } else { + s->regs[input_reg] |= bit_mask; + } break; case PCA9552_LED_PWM0: case PCA9552_LED_PWM1: @@ -125,6 +140,12 @@ static void pca955x_update_pin_input(PCA955xState *s) default: break; } + + /* update irq state only if pin state changed */ + new_value = s->regs[input_reg] & bit_mask; + if (new_value != old_value) { + qemu_set_irq(s->gpio_out[i], !!new_value); + } } } @@ -328,10 +349,11 @@ static const VMStateDescription pca9552_vmstate = { .name = "PCA9552", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(len, PCA955xState), VMSTATE_UINT8(pointer, PCA955xState), VMSTATE_UINT8_ARRAY(regs, PCA955xState, PCA955X_NR_REGS), + VMSTATE_UINT8_ARRAY(ext_state, PCA955xState, PCA955X_PIN_COUNT_MAX), VMSTATE_I2C_SLAVE(i2c, PCA955xState), VMSTATE_END_OF_LIST() } @@ -350,6 +372,7 @@ static void pca9552_reset(DeviceState *dev) s->regs[PCA9552_LS2] = 0x55; s->regs[PCA9552_LS3] = 0x55; + memset(s->ext_state, PCA9552_PIN_HIZ, PCA955X_PIN_COUNT_MAX); pca955x_update_pin_input(s); s->pointer = 0xFF; @@ -372,6 +395,26 @@ static void pca955x_initfn(Object *obj) } } +static void pca955x_set_ext_state(PCA955xState *s, int pin, int level) +{ + if (s->ext_state[pin] != level) { + uint16_t pins_status = pca955x_pins_get_status(s); + s->ext_state[pin] = level; + pca955x_update_pin_input(s); + pca955x_display_pins_status(s, pins_status); + } +} + +static void pca955x_gpio_in_handler(void *opaque, int pin, int level) +{ + + PCA955xState *s = PCA955X(opaque); + PCA955xClass *k = PCA955X_GET_CLASS(s); + + assert((pin >= 0) && (pin < k->pin_count)); + pca955x_set_ext_state(s, pin, level); +} + static void pca955x_realize(DeviceState *dev, Error **errp) { PCA955xClass *k = PCA955X_GET_CLASS(dev); @@ -381,7 +424,8 @@ static void pca955x_realize(DeviceState *dev, Error **errp) s->description = g_strdup("pca-unspecified"); } - qdev_init_gpio_out(dev, s->gpio, k->pin_count); + qdev_init_gpio_out(dev, s->gpio_out, k->pin_count); + qdev_init_gpio_in(dev, pca955x_gpio_in_handler, k->pin_count); } static Property pca955x_properties[] = { diff --git a/hw/gpio/pca9554.c b/hw/gpio/pca9554.c new file mode 100644 index 00000000000..7d10a64ba7c --- /dev/null +++ b/hw/gpio/pca9554.c @@ -0,0 +1,328 @@ +/* + * PCA9554 I/O port + * + * Copyright (c) 2023, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/bitops.h" +#include "hw/qdev-properties.h" +#include "hw/gpio/pca9554.h" +#include "hw/gpio/pca9554_regs.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "trace.h" +#include "qom/object.h" + +struct PCA9554Class { + /*< private >*/ + I2CSlaveClass parent_class; + /*< public >*/ +}; +typedef struct PCA9554Class PCA9554Class; + +DECLARE_CLASS_CHECKERS(PCA9554Class, PCA9554, + TYPE_PCA9554) + +#define PCA9554_PIN_LOW 0x0 +#define PCA9554_PIN_HIZ 0x1 + +static const char *pin_state[] = {"low", "high"}; + +static void pca9554_update_pin_input(PCA9554State *s) +{ + int i; + uint8_t config = s->regs[PCA9554_CONFIG]; + uint8_t output = s->regs[PCA9554_OUTPUT]; + uint8_t internal_state = config | output; + + for (i = 0; i < PCA9554_PIN_COUNT; i++) { + uint8_t bit_mask = 1 << i; + uint8_t internal_pin_state = (internal_state >> i) & 0x1; + uint8_t old_value = s->regs[PCA9554_INPUT] & bit_mask; + uint8_t new_value; + + switch (internal_pin_state) { + case PCA9554_PIN_LOW: + s->regs[PCA9554_INPUT] &= ~bit_mask; + break; + case PCA9554_PIN_HIZ: + /* + * pullup sets it to a logical 1 unless + * external device drives it low. + */ + if (s->ext_state[i] == PCA9554_PIN_LOW) { + s->regs[PCA9554_INPUT] &= ~bit_mask; + } else { + s->regs[PCA9554_INPUT] |= bit_mask; + } + break; + default: + break; + } + + /* update irq state only if pin state changed */ + new_value = s->regs[PCA9554_INPUT] & bit_mask; + if (new_value != old_value) { + if (new_value) { + /* changed from 0 to 1 */ + qemu_set_irq(s->gpio_out[i], 1); + } else { + /* changed from 1 to 0 */ + qemu_set_irq(s->gpio_out[i], 0); + } + } + } +} + +static uint8_t pca9554_read(PCA9554State *s, uint8_t reg) +{ + switch (reg) { + case PCA9554_INPUT: + return s->regs[PCA9554_INPUT] ^ s->regs[PCA9554_POLARITY]; + case PCA9554_OUTPUT: + case PCA9554_POLARITY: + case PCA9554_CONFIG: + return s->regs[reg]; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: unexpected read to register %d\n", + __func__, reg); + return 0xFF; + } +} + +static void pca9554_write(PCA9554State *s, uint8_t reg, uint8_t data) +{ + switch (reg) { + case PCA9554_OUTPUT: + case PCA9554_CONFIG: + s->regs[reg] = data; + pca9554_update_pin_input(s); + break; + case PCA9554_POLARITY: + s->regs[reg] = data; + break; + case PCA9554_INPUT: + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: unexpected write to register %d\n", + __func__, reg); + } +} + +static uint8_t pca9554_recv(I2CSlave *i2c) +{ + PCA9554State *s = PCA9554(i2c); + uint8_t ret; + + ret = pca9554_read(s, s->pointer & 0x3); + + return ret; +} + +static int pca9554_send(I2CSlave *i2c, uint8_t data) +{ + PCA9554State *s = PCA9554(i2c); + + /* First byte sent by is the register address */ + if (s->len == 0) { + s->pointer = data; + s->len++; + } else { + pca9554_write(s, s->pointer & 0x3, data); + } + + return 0; +} + +static int pca9554_event(I2CSlave *i2c, enum i2c_event event) +{ + PCA9554State *s = PCA9554(i2c); + + s->len = 0; + return 0; +} + +static void pca9554_get_pin(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + PCA9554State *s = PCA9554(obj); + int pin, rc; + uint8_t state; + + rc = sscanf(name, "pin%2d", &pin); + if (rc != 1) { + error_setg(errp, "%s: error reading %s", __func__, name); + return; + } + if (pin < 0 || pin >= PCA9554_PIN_COUNT) { + error_setg(errp, "%s invalid pin %s", __func__, name); + return; + } + + state = pca9554_read(s, PCA9554_CONFIG); + state |= pca9554_read(s, PCA9554_OUTPUT); + state = (state >> pin) & 0x1; + visit_type_str(v, name, (char **)&pin_state[state], errp); +} + +static void pca9554_set_pin(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + PCA9554State *s = PCA9554(obj); + int pin, rc, val; + uint8_t state, mask; + char *state_str; + + if (!visit_type_str(v, name, &state_str, errp)) { + return; + } + rc = sscanf(name, "pin%2d", &pin); + if (rc != 1) { + error_setg(errp, "%s: error reading %s", __func__, name); + return; + } + if (pin < 0 || pin >= PCA9554_PIN_COUNT) { + error_setg(errp, "%s invalid pin %s", __func__, name); + return; + } + + for (state = 0; state < ARRAY_SIZE(pin_state); state++) { + if (!strcmp(state_str, pin_state[state])) { + break; + } + } + if (state >= ARRAY_SIZE(pin_state)) { + error_setg(errp, "%s invalid pin state %s", __func__, state_str); + return; + } + + /* First, modify the output register bit */ + val = pca9554_read(s, PCA9554_OUTPUT); + mask = 0x1 << pin; + if (state == PCA9554_PIN_LOW) { + val &= ~(mask); + } else { + val |= mask; + } + pca9554_write(s, PCA9554_OUTPUT, val); + + /* Then, clear the config register bit for output mode */ + val = pca9554_read(s, PCA9554_CONFIG); + val &= ~mask; + pca9554_write(s, PCA9554_CONFIG, val); +} + +static const VMStateDescription pca9554_vmstate = { + .name = "PCA9554", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(len, PCA9554State), + VMSTATE_UINT8(pointer, PCA9554State), + VMSTATE_UINT8_ARRAY(regs, PCA9554State, PCA9554_NR_REGS), + VMSTATE_UINT8_ARRAY(ext_state, PCA9554State, PCA9554_PIN_COUNT), + VMSTATE_I2C_SLAVE(i2c, PCA9554State), + VMSTATE_END_OF_LIST() + } +}; + +static void pca9554_reset(DeviceState *dev) +{ + PCA9554State *s = PCA9554(dev); + + s->regs[PCA9554_INPUT] = 0xFF; + s->regs[PCA9554_OUTPUT] = 0xFF; + s->regs[PCA9554_POLARITY] = 0x0; /* No pins are inverted */ + s->regs[PCA9554_CONFIG] = 0xFF; /* All pins are inputs */ + + memset(s->ext_state, PCA9554_PIN_HIZ, PCA9554_PIN_COUNT); + pca9554_update_pin_input(s); + + s->pointer = 0x0; + s->len = 0; +} + +static void pca9554_initfn(Object *obj) +{ + int pin; + + for (pin = 0; pin < PCA9554_PIN_COUNT; pin++) { + char *name; + + name = g_strdup_printf("pin%d", pin); + object_property_add(obj, name, "bool", pca9554_get_pin, pca9554_set_pin, + NULL, NULL); + g_free(name); + } +} + +static void pca9554_set_ext_state(PCA9554State *s, int pin, int level) +{ + if (s->ext_state[pin] != level) { + s->ext_state[pin] = level; + pca9554_update_pin_input(s); + } +} + +static void pca9554_gpio_in_handler(void *opaque, int pin, int level) +{ + + PCA9554State *s = PCA9554(opaque); + + assert((pin >= 0) && (pin < PCA9554_PIN_COUNT)); + pca9554_set_ext_state(s, pin, level); +} + +static void pca9554_realize(DeviceState *dev, Error **errp) +{ + PCA9554State *s = PCA9554(dev); + + if (!s->description) { + s->description = g_strdup("pca9554"); + } + + qdev_init_gpio_out(dev, s->gpio_out, PCA9554_PIN_COUNT); + qdev_init_gpio_in(dev, pca9554_gpio_in_handler, PCA9554_PIN_COUNT); +} + +static Property pca9554_properties[] = { + DEFINE_PROP_STRING("description", PCA9554State, description), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pca9554_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + k->event = pca9554_event; + k->recv = pca9554_recv; + k->send = pca9554_send; + dc->realize = pca9554_realize; + dc->reset = pca9554_reset; + dc->vmsd = &pca9554_vmstate; + device_class_set_props(dc, pca9554_properties); +} + +static const TypeInfo pca9554_info = { + .name = TYPE_PCA9554, + .parent = TYPE_I2C_SLAVE, + .instance_init = pca9554_initfn, + .instance_size = sizeof(PCA9554State), + .class_init = pca9554_class_init, + .class_size = sizeof(PCA9554Class), + .abstract = false, +}; + +static void pca9554_register_types(void) +{ + type_register_static(&pca9554_info); +} + +type_init(pca9554_register_types) diff --git a/hw/gpio/pcf8574.c b/hw/gpio/pcf8574.c new file mode 100644 index 00000000000..d37909e2ada --- /dev/null +++ b/hw/gpio/pcf8574.c @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * NXP PCF8574 8-port I2C GPIO expansion chip. + * Copyright (c) 2024 KNS Group (YADRO). + * Written by Dmitrii Sharikhin + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "hw/gpio/pcf8574.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* + * PCF8574 and compatible chips incorporate quasi-bidirectional + * IO. Electrically it means that device sustain pull-up to line + * unless IO port is configured as output _and_ driven low. + * + * IO access is implemented as simple I2C single-byte read + * or write operation. So, to configure line to input user write 1 + * to corresponding bit. To configure line to output and drive it low + * user write 0 to corresponding bit. + * + * In essence, user can think of quasi-bidirectional IO as + * open-drain line, except presence of builtin rising edge acceleration + * embedded in PCF8574 IC + * + * PCF8574 has interrupt request line, which is being pulled down when + * port line state differs from last read. Port read operation clears + * state and INT line returns to high state via pullup. + */ + +OBJECT_DECLARE_SIMPLE_TYPE(PCF8574State, PCF8574) + +#define PORTS_COUNT (8) + +struct PCF8574State { + I2CSlave parent_obj; + uint8_t lastrq; /* Last requested state. If changed - assert irq */ + uint8_t input; /* external electrical line state */ + uint8_t output; /* Pull-up (1) or drive low (0) on bit */ + qemu_irq handler[PORTS_COUNT]; + qemu_irq intrq; /* External irq request */ +}; + +static void pcf8574_reset(DeviceState *dev) +{ + PCF8574State *s = PCF8574(dev); + s->lastrq = MAKE_64BIT_MASK(0, PORTS_COUNT); + s->input = MAKE_64BIT_MASK(0, PORTS_COUNT); + s->output = MAKE_64BIT_MASK(0, PORTS_COUNT); +} + +static inline uint8_t pcf8574_line_state(PCF8574State *s) +{ + /* we driving line low or external circuit does that */ + return s->input & s->output; +} + +static uint8_t pcf8574_rx(I2CSlave *i2c) +{ + PCF8574State *s = PCF8574(i2c); + uint8_t linestate = pcf8574_line_state(s); + if (s->lastrq != linestate) { + s->lastrq = linestate; + if (s->intrq) { + qemu_set_irq(s->intrq, 1); + } + } + return linestate; +} + +static int pcf8574_tx(I2CSlave *i2c, uint8_t data) +{ + PCF8574State *s = PCF8574(i2c); + uint8_t prev; + uint8_t diff; + uint8_t actual; + int line = 0; + + prev = pcf8574_line_state(s); + s->output = data; + actual = pcf8574_line_state(s); + + for (diff = (actual ^ prev); diff; diff &= ~(1 << line)) { + line = ctz32(diff); + if (s->handler[line]) { + qemu_set_irq(s->handler[line], (actual >> line) & 1); + } + } + + if (s->intrq) { + qemu_set_irq(s->intrq, actual == s->lastrq); + } + + return 0; +} + +static const VMStateDescription vmstate_pcf8574 = { + .name = "pcf8574", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_I2C_SLAVE(parent_obj, PCF8574State), + VMSTATE_UINT8(lastrq, PCF8574State), + VMSTATE_UINT8(input, PCF8574State), + VMSTATE_UINT8(output, PCF8574State), + VMSTATE_END_OF_LIST() + } +}; + +static void pcf8574_gpio_set(void *opaque, int line, int level) +{ + PCF8574State *s = (PCF8574State *) opaque; + assert(line >= 0 && line < ARRAY_SIZE(s->handler)); + + if (level) { + s->input |= (1 << line); + } else { + s->input &= ~(1 << line); + } + + if (pcf8574_line_state(s) != s->lastrq && s->intrq) { + qemu_set_irq(s->intrq, 0); + } +} + +static void pcf8574_realize(DeviceState *dev, Error **errp) +{ + PCF8574State *s = PCF8574(dev); + + qdev_init_gpio_in(dev, pcf8574_gpio_set, ARRAY_SIZE(s->handler)); + qdev_init_gpio_out(dev, s->handler, ARRAY_SIZE(s->handler)); + qdev_init_gpio_out_named(dev, &s->intrq, "nINT", 1); +} + +static void pcf8574_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + k->recv = pcf8574_rx; + k->send = pcf8574_tx; + dc->realize = pcf8574_realize; + dc->reset = pcf8574_reset; + dc->vmsd = &vmstate_pcf8574; +} + +static const TypeInfo pcf8574_infos[] = { + { + .name = TYPE_PCF8574, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(PCF8574State), + .class_init = pcf8574_class_init, + } +}; + +DEFINE_TYPES(pcf8574_infos); diff --git a/hw/gpio/pl061.c b/hw/gpio/pl061.c index 899be861cc5..86f23836553 100644 --- a/hw/gpio/pl061.c +++ b/hw/gpio/pl061.c @@ -87,7 +87,7 @@ static const VMStateDescription vmstate_pl061 = { .name = "pl061", .version_id = 4, .minimum_version_id = 4, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(locked, PL061State), VMSTATE_UINT32(data, PL061State), VMSTATE_UINT32(old_out_data, PL061State), diff --git a/hw/gpio/sifive_gpio.c b/hw/gpio/sifive_gpio.c index 78bf29e996f..995a43c7958 100644 --- a/hw/gpio/sifive_gpio.c +++ b/hw/gpio/sifive_gpio.c @@ -326,7 +326,7 @@ static const VMStateDescription vmstate_sifive_gpio = { .name = TYPE_SIFIVE_GPIO, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(value, SIFIVEGPIOState), VMSTATE_UINT32(input_en, SIFIVEGPIOState), VMSTATE_UINT32(output_en, SIFIVEGPIOState), diff --git a/hw/gpio/stm32l4x5_gpio.c b/hw/gpio/stm32l4x5_gpio.c new file mode 100644 index 00000000000..63b8763e9d3 --- /dev/null +++ b/hw/gpio/stm32l4x5_gpio.c @@ -0,0 +1,477 @@ +/* + * STM32L4x5 GPIO (General Purpose Input/Ouput) + * + * Copyright (c) 2024 Arnaud Minier + * Copyright (c) 2024 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/gpio/stm32l4x5_gpio.h" +#include "hw/irq.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "qapi/visitor.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "trace.h" + +#define GPIO_MODER 0x00 +#define GPIO_OTYPER 0x04 +#define GPIO_OSPEEDR 0x08 +#define GPIO_PUPDR 0x0C +#define GPIO_IDR 0x10 +#define GPIO_ODR 0x14 +#define GPIO_BSRR 0x18 +#define GPIO_LCKR 0x1C +#define GPIO_AFRL 0x20 +#define GPIO_AFRH 0x24 +#define GPIO_BRR 0x28 +#define GPIO_ASCR 0x2C + +/* 0b11111111_11111111_00000000_00000000 */ +#define RESERVED_BITS_MASK 0xFFFF0000 + +static void update_gpio_idr(Stm32l4x5GpioState *s); + +static bool is_pull_up(Stm32l4x5GpioState *s, unsigned pin) +{ + return extract32(s->pupdr, 2 * pin, 2) == 1; +} + +static bool is_pull_down(Stm32l4x5GpioState *s, unsigned pin) +{ + return extract32(s->pupdr, 2 * pin, 2) == 2; +} + +static bool is_output(Stm32l4x5GpioState *s, unsigned pin) +{ + return extract32(s->moder, 2 * pin, 2) == 1; +} + +static bool is_open_drain(Stm32l4x5GpioState *s, unsigned pin) +{ + return extract32(s->otyper, pin, 1) == 1; +} + +static bool is_push_pull(Stm32l4x5GpioState *s, unsigned pin) +{ + return extract32(s->otyper, pin, 1) == 0; +} + +static void stm32l4x5_gpio_reset_hold(Object *obj) +{ + Stm32l4x5GpioState *s = STM32L4X5_GPIO(obj); + + s->moder = s->moder_reset; + s->otyper = 0x00000000; + s->ospeedr = s->ospeedr_reset; + s->pupdr = s->pupdr_reset; + s->idr = 0x00000000; + s->odr = 0x00000000; + s->lckr = 0x00000000; + s->afrl = 0x00000000; + s->afrh = 0x00000000; + s->ascr = 0x00000000; + + s->disconnected_pins = 0xFFFF; + s->pins_connected_high = 0x0000; + update_gpio_idr(s); +} + +static void stm32l4x5_gpio_set(void *opaque, int line, int level) +{ + Stm32l4x5GpioState *s = opaque; + /* + * The pin isn't set if line is configured in output mode + * except if level is 0 and the output is open-drain. + * This way there will be no short-circuit prone situations. + */ + if (is_output(s, line) && !(is_open_drain(s, line) && (level == 0))) { + qemu_log_mask(LOG_GUEST_ERROR, "Line %d can't be driven externally\n", + line); + return; + } + + s->disconnected_pins &= ~(1 << line); + if (level) { + s->pins_connected_high |= (1 << line); + } else { + s->pins_connected_high &= ~(1 << line); + } + trace_stm32l4x5_gpio_pins(s->name, s->disconnected_pins, + s->pins_connected_high); + update_gpio_idr(s); +} + + +static void update_gpio_idr(Stm32l4x5GpioState *s) +{ + uint32_t new_idr_mask = 0; + uint32_t new_idr = s->odr; + uint32_t old_idr = s->idr; + int new_pin_state, old_pin_state; + + for (int i = 0; i < GPIO_NUM_PINS; i++) { + if (is_output(s, i)) { + if (is_push_pull(s, i)) { + new_idr_mask |= (1 << i); + } else if (!(s->odr & (1 << i))) { + /* open-drain ODR 0 */ + new_idr_mask |= (1 << i); + /* open-drain ODR 1 */ + } else if (!(s->disconnected_pins & (1 << i)) && + !(s->pins_connected_high & (1 << i))) { + /* open-drain ODR 1 with pin connected low */ + new_idr_mask |= (1 << i); + new_idr &= ~(1 << i); + /* open-drain ODR 1 with unactive pin */ + } else if (is_pull_up(s, i)) { + new_idr_mask |= (1 << i); + } else if (is_pull_down(s, i)) { + new_idr_mask |= (1 << i); + new_idr &= ~(1 << i); + } + /* + * The only case left is for open-drain ODR 1 + * with unactive pin without pull-up or pull-down : + * the value is floating. + */ + /* input or analog mode with connected pin */ + } else if (!(s->disconnected_pins & (1 << i))) { + if (s->pins_connected_high & (1 << i)) { + /* pin high */ + new_idr_mask |= (1 << i); + new_idr |= (1 << i); + } else { + /* pin low */ + new_idr_mask |= (1 << i); + new_idr &= ~(1 << i); + } + /* input or analog mode with disconnected pin */ + } else { + if (is_pull_up(s, i)) { + /* pull-up */ + new_idr_mask |= (1 << i); + new_idr |= (1 << i); + } else if (is_pull_down(s, i)) { + /* pull-down */ + new_idr_mask |= (1 << i); + new_idr &= ~(1 << i); + } + /* + * The only case left is for a disconnected pin + * without pull-up or pull-down : + * the value is floating. + */ + } + } + + s->idr = (old_idr & ~new_idr_mask) | (new_idr & new_idr_mask); + trace_stm32l4x5_gpio_update_idr(s->name, old_idr, s->idr); + + for (int i = 0; i < GPIO_NUM_PINS; i++) { + if (new_idr_mask & (1 << i)) { + new_pin_state = (new_idr & (1 << i)) > 0; + old_pin_state = (old_idr & (1 << i)) > 0; + if (new_pin_state > old_pin_state) { + qemu_irq_raise(s->pin[i]); + } else if (new_pin_state < old_pin_state) { + qemu_irq_lower(s->pin[i]); + } + } + } +} + +/* + * Return mask of pins that are both configured in output + * mode and externally driven (except pins in open-drain + * mode externally set to 0). + */ +static uint32_t get_gpio_pinmask_to_disconnect(Stm32l4x5GpioState *s) +{ + uint32_t pins_to_disconnect = 0; + for (int i = 0; i < GPIO_NUM_PINS; i++) { + /* for each connected pin in output mode */ + if (!(s->disconnected_pins & (1 << i)) && is_output(s, i)) { + /* if either push-pull or high level */ + if (is_push_pull(s, i) || s->pins_connected_high & (1 << i)) { + pins_to_disconnect |= (1 << i); + qemu_log_mask(LOG_GUEST_ERROR, + "Line %d can't be driven externally\n", + i); + } + } + } + return pins_to_disconnect; +} + +/* + * Set field `disconnected_pins` and call `update_gpio_idr()` + */ +static void disconnect_gpio_pins(Stm32l4x5GpioState *s, uint16_t lines) +{ + s->disconnected_pins |= lines; + trace_stm32l4x5_gpio_pins(s->name, s->disconnected_pins, + s->pins_connected_high); + update_gpio_idr(s); +} + +static void disconnected_pins_set(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + Stm32l4x5GpioState *s = STM32L4X5_GPIO(obj); + uint16_t value; + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + disconnect_gpio_pins(s, value); +} + +static void disconnected_pins_get(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + visit_type_uint16(v, name, (uint16_t *)opaque, errp); +} + +static void clock_freq_get(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + Stm32l4x5GpioState *s = STM32L4X5_GPIO(obj); + uint32_t clock_freq_hz = clock_get_hz(s->clk); + visit_type_uint32(v, name, &clock_freq_hz, errp); +} + +static void stm32l4x5_gpio_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + Stm32l4x5GpioState *s = opaque; + + uint32_t value = val64; + trace_stm32l4x5_gpio_write(s->name, addr, val64); + + switch (addr) { + case GPIO_MODER: + s->moder = value; + disconnect_gpio_pins(s, get_gpio_pinmask_to_disconnect(s)); + qemu_log_mask(LOG_UNIMP, + "%s: Analog and AF modes aren't supported\n\ + Analog and AF mode behave like input mode\n", + __func__); + return; + case GPIO_OTYPER: + s->otyper = value & ~RESERVED_BITS_MASK; + disconnect_gpio_pins(s, get_gpio_pinmask_to_disconnect(s)); + return; + case GPIO_OSPEEDR: + qemu_log_mask(LOG_UNIMP, + "%s: Changing I/O output speed isn't supported\n\ + I/O speed is already maximal\n", + __func__); + s->ospeedr = value; + return; + case GPIO_PUPDR: + s->pupdr = value; + update_gpio_idr(s); + return; + case GPIO_IDR: + qemu_log_mask(LOG_UNIMP, + "%s: GPIO->IDR is read-only\n", + __func__); + return; + case GPIO_ODR: + s->odr = value & ~RESERVED_BITS_MASK; + update_gpio_idr(s); + return; + case GPIO_BSRR: { + uint32_t bits_to_reset = (value & RESERVED_BITS_MASK) >> GPIO_NUM_PINS; + uint32_t bits_to_set = value & ~RESERVED_BITS_MASK; + /* If both BSx and BRx are set, BSx has priority.*/ + s->odr &= ~bits_to_reset; + s->odr |= bits_to_set; + update_gpio_idr(s); + return; + } + case GPIO_LCKR: + qemu_log_mask(LOG_UNIMP, + "%s: Locking port bits configuration isn't supported\n", + __func__); + s->lckr = value & ~RESERVED_BITS_MASK; + return; + case GPIO_AFRL: + qemu_log_mask(LOG_UNIMP, + "%s: Alternate functions aren't supported\n", + __func__); + s->afrl = value; + return; + case GPIO_AFRH: + qemu_log_mask(LOG_UNIMP, + "%s: Alternate functions aren't supported\n", + __func__); + s->afrh = value; + return; + case GPIO_BRR: { + uint32_t bits_to_reset = value & ~RESERVED_BITS_MASK; + s->odr &= ~bits_to_reset; + update_gpio_idr(s); + return; + } + case GPIO_ASCR: + qemu_log_mask(LOG_UNIMP, + "%s: ADC function isn't supported\n", + __func__); + s->ascr = value & ~RESERVED_BITS_MASK; + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + } +} + +static uint64_t stm32l4x5_gpio_read(void *opaque, hwaddr addr, + unsigned int size) +{ + Stm32l4x5GpioState *s = opaque; + + trace_stm32l4x5_gpio_read(s->name, addr); + + switch (addr) { + case GPIO_MODER: + return s->moder; + case GPIO_OTYPER: + return s->otyper; + case GPIO_OSPEEDR: + return s->ospeedr; + case GPIO_PUPDR: + return s->pupdr; + case GPIO_IDR: + return s->idr; + case GPIO_ODR: + return s->odr; + case GPIO_BSRR: + return 0; + case GPIO_LCKR: + return s->lckr; + case GPIO_AFRL: + return s->afrl; + case GPIO_AFRH: + return s->afrh; + case GPIO_BRR: + return 0; + case GPIO_ASCR: + return s->ascr; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + return 0; + } +} + +static const MemoryRegionOps stm32l4x5_gpio_ops = { + .read = stm32l4x5_gpio_read, + .write = stm32l4x5_gpio_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void stm32l4x5_gpio_init(Object *obj) +{ + Stm32l4x5GpioState *s = STM32L4X5_GPIO(obj); + + memory_region_init_io(&s->mmio, obj, &stm32l4x5_gpio_ops, s, + TYPE_STM32L4X5_GPIO, 0x400); + + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + qdev_init_gpio_out(DEVICE(obj), s->pin, GPIO_NUM_PINS); + qdev_init_gpio_in(DEVICE(obj), stm32l4x5_gpio_set, GPIO_NUM_PINS); + + s->clk = qdev_init_clock_in(DEVICE(s), "clk", NULL, s, 0); + + object_property_add(obj, "disconnected-pins", "uint16", + disconnected_pins_get, disconnected_pins_set, + NULL, &s->disconnected_pins); + object_property_add(obj, "clock-freq-hz", "uint32", + clock_freq_get, NULL, NULL, NULL); +} + +static void stm32l4x5_gpio_realize(DeviceState *dev, Error **errp) +{ + Stm32l4x5GpioState *s = STM32L4X5_GPIO(dev); + if (!clock_has_source(s->clk)) { + error_setg(errp, "GPIO: clk input must be connected"); + return; + } +} + +static const VMStateDescription vmstate_stm32l4x5_gpio = { + .name = TYPE_STM32L4X5_GPIO, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]){ + VMSTATE_UINT32(moder, Stm32l4x5GpioState), + VMSTATE_UINT32(otyper, Stm32l4x5GpioState), + VMSTATE_UINT32(ospeedr, Stm32l4x5GpioState), + VMSTATE_UINT32(pupdr, Stm32l4x5GpioState), + VMSTATE_UINT32(idr, Stm32l4x5GpioState), + VMSTATE_UINT32(odr, Stm32l4x5GpioState), + VMSTATE_UINT32(lckr, Stm32l4x5GpioState), + VMSTATE_UINT32(afrl, Stm32l4x5GpioState), + VMSTATE_UINT32(afrh, Stm32l4x5GpioState), + VMSTATE_UINT32(ascr, Stm32l4x5GpioState), + VMSTATE_UINT16(disconnected_pins, Stm32l4x5GpioState), + VMSTATE_UINT16(pins_connected_high, Stm32l4x5GpioState), + VMSTATE_END_OF_LIST() + } +}; + +static Property stm32l4x5_gpio_properties[] = { + DEFINE_PROP_STRING("name", Stm32l4x5GpioState, name), + DEFINE_PROP_UINT32("mode-reset", Stm32l4x5GpioState, moder_reset, 0), + DEFINE_PROP_UINT32("ospeed-reset", Stm32l4x5GpioState, ospeedr_reset, 0), + DEFINE_PROP_UINT32("pupd-reset", Stm32l4x5GpioState, pupdr_reset, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void stm32l4x5_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + device_class_set_props(dc, stm32l4x5_gpio_properties); + dc->vmsd = &vmstate_stm32l4x5_gpio; + dc->realize = stm32l4x5_gpio_realize; + rc->phases.hold = stm32l4x5_gpio_reset_hold; +} + +static const TypeInfo stm32l4x5_gpio_types[] = { + { + .name = TYPE_STM32L4X5_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Stm32l4x5GpioState), + .instance_init = stm32l4x5_gpio_init, + .class_init = stm32l4x5_gpio_class_init, + }, +}; + +DEFINE_TYPES(stm32l4x5_gpio_types) diff --git a/hw/gpio/trace-events b/hw/gpio/trace-events index 9736b362ac1..b91cc7e9a45 100644 --- a/hw/gpio/trace-events +++ b/hw/gpio/trace-events @@ -13,6 +13,10 @@ nrf51_gpio_write(uint64_t offset, uint64_t value) "offset 0x%" PRIx64 " value 0x nrf51_gpio_set(int64_t line, int64_t value) "line %" PRIi64 " value %" PRIi64 nrf51_gpio_update_output_irq(int64_t line, int64_t value) "line %" PRIi64 " value %" PRIi64 +# pca9552.c +pca955x_gpio_status(const char *description, const char *buf) "%s GPIOs 0-15 [%s]" +pca955x_gpio_change(const char *description, unsigned id, unsigned prev_state, unsigned current_state) "%s GPIO id:%u status: %u -> %u" + # pl061.c pl061_update(const char *id, uint32_t dir, uint32_t data, uint32_t pullups, uint32_t floating) "%s GPIODIR 0x%x GPIODATA 0x%x pullups 0x%x floating 0x%x" pl061_set_output(const char *id, int gpio, int level) "%s setting output %d to %d" @@ -31,3 +35,9 @@ sifive_gpio_update_output_irq(int64_t line, int64_t value) "line %" PRIi64 " val # aspeed_gpio.c aspeed_gpio_read(uint64_t offset, uint64_t value) "offset: 0x%" PRIx64 " value 0x%" PRIx64 aspeed_gpio_write(uint64_t offset, uint64_t value) "offset: 0x%" PRIx64 " value 0x%" PRIx64 + +# stm32l4x5_gpio.c +stm32l4x5_gpio_read(char *gpio, uint64_t addr) "GPIO%s addr: 0x%" PRIx64 " " +stm32l4x5_gpio_write(char *gpio, uint64_t addr, uint64_t data) "GPIO%s addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" +stm32l4x5_gpio_update_idr(char *gpio, uint32_t old_idr, uint32_t new_idr) "GPIO%s from: 0x%x to: 0x%x" +stm32l4x5_gpio_pins(char *gpio, uint16_t disconnected, uint16_t high) "GPIO%s disconnected pins: 0x%x levels: 0x%x" diff --git a/hw/gpio/zaurus.c b/hw/gpio/zaurus.c index 7cf52a50412..5884804c589 100644 --- a/hw/gpio/zaurus.c +++ b/hw/gpio/zaurus.c @@ -222,7 +222,7 @@ static const VMStateDescription vmstate_scoop_regs = { .version_id = 1, .minimum_version_id = 0, .post_load = scoop_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(status, ScoopInfo), VMSTATE_UINT16(power, ScoopInfo), VMSTATE_UINT32(gpio_level, ScoopInfo), diff --git a/hw/hppa/Kconfig b/hw/hppa/Kconfig index dff5df7f725..c9c3e951759 100644 --- a/hw/hppa/Kconfig +++ b/hw/hppa/Kconfig @@ -2,6 +2,7 @@ config HPPA_B160L bool imply PCI_DEVICES imply E1000_PCI + imply USB_OHCI_PCI imply VIRTIO_VGA select ASTRO select DINO diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index 9e611620cc7..37ee6387e02 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -13,6 +13,7 @@ #include "qemu/error-report.h" #include "sysemu/reset.h" #include "sysemu/sysemu.h" +#include "sysemu/qtest.h" #include "sysemu/runstate.h" #include "hw/rtc/mc146818rtc.h" #include "hw/timer/i8254.h" @@ -333,6 +334,7 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; + const char *firmware = machine->firmware; MachineClass *mc = MACHINE_GET_CLASS(machine); DeviceState *dev; PCIDevice *pci_dev; @@ -342,7 +344,6 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, uint64_t kernel_entry = 0, kernel_low, kernel_high; MemoryRegion *addr_space = get_system_memory(); MemoryRegion *rom_region; - long i; unsigned int smp_cpus = machine->smp.cpus; SysBusDevice *s; @@ -363,16 +364,13 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, } /* Network setup. */ - if (nd_table[0].used && enable_lasi_lan()) { + if (lasi_dev) { lasi_82596_init(addr_space, translate(NULL, LASI_LAN_HPA), - qdev_get_gpio_in(lasi_dev, LASI_IRQ_LAN_HPA)); + qdev_get_gpio_in(lasi_dev, LASI_IRQ_LAN_HPA), + enable_lasi_lan()); } - for (i = 0; i < nb_nics; i++) { - if (!enable_lasi_lan()) { - pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); - } - } + pci_init_nic_devices(pci_bus, mc->default_nic); /* BMC board: HP Powerbar SP2 Diva (with console only) */ pci_dev = pci_new(-1, "pci-serial"); @@ -398,10 +396,14 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, } /* create USB OHCI controller for USB keyboard & mouse on Astro machines */ - if (!lasi_dev && machine->enable_graphics) { + if (!lasi_dev && machine->enable_graphics && defaults_enabled()) { + USBBus *usb_bus; + pci_create_simple(pci_bus, -1, "pci-ohci"); - usb_create_simple(usb_bus_find(-1), "usb-kbd"); - usb_create_simple(usb_bus_find(-1), "usb-mouse"); + usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS, + &error_abort)); + usb_create_simple(usb_bus, "usb-kbd"); + usb_create_simple(usb_bus, "usb-mouse"); } /* register power switch emulation */ @@ -412,31 +414,37 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, /* Load firmware. Given that this is not "real" firmware, but one explicitly written for the emulation, we might as - well load it directly from an ELF image. */ - firmware_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, - machine->firmware ?: "hppa-firmware.img"); - if (firmware_filename == NULL) { - error_report("no firmware provided"); - exit(1); - } + well load it directly from an ELF image. Load the 64-bit + firmware on 64-bit machines by default if not specified + on command line. */ + if (!qtest_enabled()) { + if (!firmware) { + firmware = lasi_dev ? "hppa-firmware.img" : "hppa-firmware64.img"; + } + firmware_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, firmware); + if (firmware_filename == NULL) { + error_report("no firmware provided"); + exit(1); + } - size = load_elf(firmware_filename, NULL, translate, NULL, - &firmware_entry, &firmware_low, &firmware_high, NULL, - true, EM_PARISC, 0, 0); + size = load_elf(firmware_filename, NULL, translate, NULL, + &firmware_entry, &firmware_low, &firmware_high, NULL, + true, EM_PARISC, 0, 0); - if (size < 0) { - error_report("could not load firmware '%s'", firmware_filename); - exit(1); - } - qemu_log_mask(CPU_LOG_PAGE, "Firmware loaded at 0x%08" PRIx64 - "-0x%08" PRIx64 ", entry at 0x%08" PRIx64 ".\n", - firmware_low, firmware_high, firmware_entry); - if (firmware_low < translate(NULL, FIRMWARE_START) || - firmware_high >= translate(NULL, FIRMWARE_END)) { - error_report("Firmware overlaps with memory or IO space"); - exit(1); + if (size < 0) { + error_report("could not load firmware '%s'", firmware_filename); + exit(1); + } + qemu_log_mask(CPU_LOG_PAGE, "Firmware loaded at 0x%08" PRIx64 + "-0x%08" PRIx64 ", entry at 0x%08" PRIx64 ".\n", + firmware_low, firmware_high, firmware_entry); + if (firmware_low < translate(NULL, FIRMWARE_START) || + firmware_high >= translate(NULL, FIRMWARE_END)) { + error_report("Firmware overlaps with memory or IO space"); + exit(1); + } + g_free(firmware_filename); } - g_free(firmware_filename); rom_region = g_new(MemoryRegion, 1); memory_region_init_ram(rom_region, NULL, "firmware", diff --git a/hw/hyperv/hv-balloon-internal.h b/hw/hyperv/hv-balloon-internal.h index 164c2e58253..ee53a28a266 100644 --- a/hw/hyperv/hv-balloon-internal.h +++ b/hw/hyperv/hv-balloon-internal.h @@ -10,7 +10,6 @@ #ifndef HW_HYPERV_HV_BALLOON_INTERNAL_H #define HW_HYPERV_HV_BALLOON_INTERNAL_H -#include "qemu/osdep.h" #define HV_BALLOON_PFN_SHIFT 12 #define HV_BALLOON_PAGE_SIZE (1 << HV_BALLOON_PFN_SHIFT) diff --git a/hw/hyperv/hv-balloon-our_range_memslots.c b/hw/hyperv/hv-balloon-our_range_memslots.c index 99bae870f37..1505a395cf7 100644 --- a/hw/hyperv/hv-balloon-our_range_memslots.c +++ b/hw/hyperv/hv-balloon-our_range_memslots.c @@ -7,6 +7,7 @@ * See the COPYING file in the top-level directory. */ +#include "qemu/osdep.h" #include "hv-balloon-internal.h" #include "hv-balloon-our_range_memslots.h" #include "trace.h" diff --git a/hw/hyperv/hv-balloon-our_range_memslots.h b/hw/hyperv/hv-balloon-our_range_memslots.h index b6f592d34b0..df3b686bc7c 100644 --- a/hw/hyperv/hv-balloon-our_range_memslots.h +++ b/hw/hyperv/hv-balloon-our_range_memslots.h @@ -10,7 +10,6 @@ #ifndef HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H #define HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H -#include "qemu/osdep.h" #include "exec/memory.h" #include "qom/object.h" diff --git a/hw/hyperv/hv-balloon-page_range_tree.c b/hw/hyperv/hv-balloon-page_range_tree.c index e178d8b413c..dfb14852f42 100644 --- a/hw/hyperv/hv-balloon-page_range_tree.c +++ b/hw/hyperv/hv-balloon-page_range_tree.c @@ -7,6 +7,7 @@ * See the COPYING file in the top-level directory. */ +#include "qemu/osdep.h" #include "hv-balloon-internal.h" #include "hv-balloon-page_range_tree.h" diff --git a/hw/hyperv/hv-balloon-page_range_tree.h b/hw/hyperv/hv-balloon-page_range_tree.h index 07a9ae0da61..333772b86d2 100644 --- a/hw/hyperv/hv-balloon-page_range_tree.h +++ b/hw/hyperv/hv-balloon-page_range_tree.h @@ -10,7 +10,6 @@ #ifndef HW_HYPERV_HV_BALLOON_PAGE_RANGE_TREE_H #define HW_HYPERV_HV_BALLOON_PAGE_RANGE_TREE_H -#include "qemu/osdep.h" /* PageRange */ typedef struct PageRange { diff --git a/hw/hyperv/hv-balloon.c b/hw/hyperv/hv-balloon.c index 66f297c1d7e..3a9ef076910 100644 --- a/hw/hyperv/hv-balloon.c +++ b/hw/hyperv/hv-balloon.c @@ -7,6 +7,7 @@ * See the COPYING file in the top-level directory. */ +#include "qemu/osdep.h" #include "hv-balloon-internal.h" #include "exec/address-spaces.h" @@ -365,7 +366,7 @@ static void hv_balloon_unballoon_posting(HvBalloon *balloon, StateDesc *stdesc) PageRangeTree dtree; uint64_t *dctr; bool our_range; - struct dm_unballoon_request *ur; + g_autofree struct dm_unballoon_request *ur = NULL; size_t ur_size = sizeof(*ur) + sizeof(ur->range_array[0]); PageRange range; bool bret; @@ -387,8 +388,7 @@ static void hv_balloon_unballoon_posting(HvBalloon *balloon, StateDesc *stdesc) assert(dtree.t); assert(dctr); - ur = alloca(ur_size); - memset(ur, 0, ur_size); + ur = g_malloc0(ur_size); ur->hdr.type = DM_UNBALLOON_REQUEST; ur->hdr.size = ur_size; ur->hdr.trans_id = balloon->trans_id; @@ -513,8 +513,8 @@ static void hv_balloon_hot_add_setup(HvBalloon *balloon, StateDesc *stdesc) static void hv_balloon_hot_add_rb_wait(HvBalloon *balloon, StateDesc *stdesc) { VMBusChannel *chan = hv_balloon_get_channel(balloon); - struct dm_hot_add *ha; - size_t ha_size = sizeof(*ha) + sizeof(ha->range); + struct dm_hot_add_with_region *ha; + size_t ha_size = sizeof(*ha); assert(balloon->state == S_HOT_ADD_RB_WAIT); @@ -530,8 +530,8 @@ static void hv_balloon_hot_add_posting(HvBalloon *balloon, StateDesc *stdesc) PageRange *hot_add_range = &balloon->hot_add_range; uint64_t *current_count = &balloon->ha_current_count; VMBusChannel *chan = hv_balloon_get_channel(balloon); - struct dm_hot_add *ha; - size_t ha_size = sizeof(*ha) + sizeof(ha->range); + g_autofree struct dm_hot_add_with_region *ha = NULL; + size_t ha_size = sizeof(*ha); union dm_mem_page_range *ha_region; uint64_t align, chunk_max_size; ssize_t ret; @@ -559,9 +559,8 @@ static void hv_balloon_hot_add_posting(HvBalloon *balloon, StateDesc *stdesc) */ *current_count = MIN(hot_add_range->count, chunk_max_size); - ha = alloca(ha_size); - ha_region = &(&ha->range)[1]; - memset(ha, 0, ha_size); + ha = g_malloc0(ha_size); + ha_region = &ha->region; ha->hdr.type = DM_MEM_HOT_ADD_REQUEST; ha->hdr.size = ha_size; ha->hdr.trans_id = balloon->trans_id; @@ -1476,22 +1475,7 @@ static void hv_balloon_ensure_mr(HvBalloon *balloon) balloon->mr = g_new0(MemoryRegion, 1); memory_region_init(balloon->mr, OBJECT(balloon), TYPE_HV_BALLOON, memory_region_size(hostmem_mr)); - - /* - * The VM can indicate an alignment up to 32 GiB. Memory device core can - * usually only handle/guarantee 1 GiB alignment. The user will have to - * specify a larger maxmem eventually. - * - * The memory device core will warn the user in case maxmem might have to be - * increased and will fail plugging the device if there is not sufficient - * space after alignment. - * - * TODO: we could do the alignment ourselves in a slightly bigger region. - * But this feels better, although the warning might be annoying. Maybe - * we can optimize that in the future (e.g., with such a device on the - * cmdline place/size the device memory region differently. - */ - balloon->mr->align = MAX(32 * GiB, memory_region_get_alignment(hostmem_mr)); + balloon->mr->align = memory_region_get_alignment(hostmem_mr); } static void hv_balloon_free_mr(HvBalloon *balloon) @@ -1653,6 +1637,25 @@ static MemoryRegion *hv_balloon_md_get_memory_region(MemoryDeviceState *md, return balloon->mr; } +static uint64_t hv_balloon_md_get_min_alignment(const MemoryDeviceState *md) +{ + /* + * The VM can indicate an alignment up to 32 GiB. Memory device core can + * usually only handle/guarantee 1 GiB alignment. The user will have to + * specify a larger maxmem eventually. + * + * The memory device core will warn the user in case maxmem might have to be + * increased and will fail plugging the device if there is not sufficient + * space after alignment. + * + * TODO: we could do the alignment ourselves in a slightly bigger region. + * But this feels better, although the warning might be annoying. Maybe + * we can optimize that in the future (e.g., with such a device on the + * cmdline place/size the device memory region differently. + */ + return 32 * GiB; +} + static void hv_balloon_md_fill_device_info(const MemoryDeviceState *md, MemoryDeviceInfo *info) { @@ -1765,5 +1768,6 @@ static void hv_balloon_class_init(ObjectClass *klass, void *data) mdc->get_memory_region = hv_balloon_md_get_memory_region; mdc->decide_memslots = hv_balloon_decide_memslots; mdc->get_memslots = hv_balloon_get_memslots; + mdc->get_min_alignment = hv_balloon_md_get_min_alignment; mdc->fill_device_info = hv_balloon_md_fill_device_info; } diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c index 57b402b9561..3ea54ba818b 100644 --- a/hw/hyperv/hyperv.c +++ b/hw/hyperv/hyperv.c @@ -12,6 +12,7 @@ #include "qemu/module.h" #include "qapi/error.h" #include "exec/address-spaces.h" +#include "exec/memory.h" #include "sysemu/kvm.h" #include "qemu/bitops.h" #include "qemu/error-report.h" @@ -21,6 +22,9 @@ #include "qemu/rcu_queue.h" #include "hw/hyperv/hyperv.h" #include "qom/object.h" +#include "target/i386/kvm/hyperv-proto.h" +#include "target/i386/cpu.h" +#include "exec/cpu-all.h" struct SynICState { DeviceState parent_obj; @@ -947,3 +951,15 @@ uint64_t hyperv_syndbg_query_options(void) return msg.u.query_options.options; } + +static bool vmbus_recommended_features_enabled; + +bool hyperv_are_vmbus_recommended_features_enabled(void) +{ + return vmbus_recommended_features_enabled; +} + +void hyperv_set_vmbus_recommended_features_enabled(void) +{ + vmbus_recommended_features_enabled = true; +} diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c index c64eaa5a46a..f33afeeea27 100644 --- a/hw/hyperv/vmbus.c +++ b/hw/hyperv/vmbus.c @@ -526,7 +526,7 @@ static const VMStateDescription vmstate_gpadl = { .name = "vmbus/gpadl", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, VMBusGpadl), VMSTATE_UINT32(child_relid, VMBusGpadl), VMSTATE_UINT32(num_gfns, VMBusGpadl), @@ -1489,7 +1489,7 @@ static const VMStateDescription vmstate_channel = { .version_id = 0, .minimum_version_id = 0, .post_load = channel_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, VMBusChannel), VMSTATE_UINT16(subchan_idx, VMBusChannel), VMSTATE_UINT32(open_id, VMBusChannel), @@ -2380,7 +2380,7 @@ const VMStateDescription vmstate_vmbus_dev = { .name = TYPE_VMBUS_DEVICE, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(instanceid.data, VMBusDevice, 16), VMSTATE_UINT16(num_channels, VMBusDevice), VMSTATE_STRUCT_VARRAY_POINTER_UINT16(channels, VMBusDevice, @@ -2453,9 +2453,9 @@ static void vmbus_unrealize(BusState *bus) qemu_mutex_destroy(&vmbus->rx_queue_lock); } -static void vmbus_reset(BusState *bus) +static void vmbus_reset_hold(Object *obj) { - vmbus_deinit(VMBUS(bus)); + vmbus_deinit(VMBUS(obj)); } static char *vmbus_get_dev_path(DeviceState *dev) @@ -2476,12 +2476,13 @@ static char *vmbus_get_fw_dev_path(DeviceState *dev) static void vmbus_class_init(ObjectClass *klass, void *data) { BusClass *k = BUS_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); k->get_dev_path = vmbus_get_dev_path; k->get_fw_dev_path = vmbus_get_fw_dev_path; k->realize = vmbus_realize; k->unrealize = vmbus_unrealize; - k->reset = vmbus_reset; + rc->phases.hold = vmbus_reset_hold; } static int vmbus_pre_load(void *opaque) @@ -2549,7 +2550,7 @@ static const VMStateDescription vmstate_post_message_input = { .name = "vmbus/hyperv_post_message_input", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* * skip connection_id and message_type as they are validated before * queueing and ignored on dequeueing @@ -2572,7 +2573,7 @@ static const VMStateDescription vmstate_rx_queue = { .version_id = 0, .minimum_version_id = 0, .needed = vmbus_rx_queue_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(rx_queue_head, VMBus), VMSTATE_UINT8(rx_queue_size, VMBus), VMSTATE_STRUCT_ARRAY(rx_queue, VMBus, @@ -2589,7 +2590,7 @@ static const VMStateDescription vmstate_vmbus = { .minimum_version_id = 0, .pre_load = vmbus_pre_load, .post_load = vmbus_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(state, VMBus), VMSTATE_UINT32(version, VMBus), VMSTATE_UINT32(target_vp, VMBus), @@ -2598,7 +2599,7 @@ static const VMStateDescription vmstate_vmbus = { vmstate_gpadl, VMBusGpadl, link), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_rx_queue, NULL } @@ -2630,6 +2631,12 @@ static void vmbus_bridge_realize(DeviceState *dev, Error **errp) return; } + if (!hyperv_are_vmbus_recommended_features_enabled()) { + warn_report("VMBus enabled without the recommended set of Hyper-V features: " + "hv-stimer, hv-vapic and hv-runtime. " + "Some Windows versions might not boot or enable the VMBus device"); + } + bridge->bus = VMBUS(qbus_new(TYPE_VMBUS, dev, "vmbus")); } @@ -2643,7 +2650,7 @@ static const VMStateDescription vmstate_vmbus_bridge = { .name = TYPE_VMBUS_BRIDGE, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER(bus, VMBusBridge, vmstate_vmbus, VMBus), VMSTATE_END_OF_LIST() }, diff --git a/hw/i2c/Kconfig b/hw/i2c/Kconfig index 14886b35dac..596a7a3165a 100644 --- a/hw/i2c/Kconfig +++ b/hw/i2c/Kconfig @@ -45,3 +45,7 @@ config PCA954X config PMBUS bool select SMBUS + +config BCM2835_I2C + bool + select I2C diff --git a/hw/i2c/allwinner-i2c.c b/hw/i2c/allwinner-i2c.c index 9e8efa1d63f..8abcc39a5c2 100644 --- a/hw/i2c/allwinner-i2c.c +++ b/hw/i2c/allwinner-i2c.c @@ -415,7 +415,7 @@ static const VMStateDescription allwinner_i2c_vmstate = { .name = TYPE_AW_I2C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(addr, AWI2CState), VMSTATE_UINT8(xaddr, AWI2CState), VMSTATE_UINT8(data, AWI2CState), diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c index 1037c22b2f7..b43afd250de 100644 --- a/hw/i2c/aspeed_i2c.c +++ b/hw/i2c/aspeed_i2c.c @@ -945,7 +945,7 @@ static const VMStateDescription aspeed_i2c_bus_vmstate = { .name = TYPE_ASPEED_I2C, .version_id = 5, .minimum_version_id = 5, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedI2CBus, ASPEED_I2C_NEW_NUM_REG), VMSTATE_END_OF_LIST() } @@ -955,7 +955,7 @@ static const VMStateDescription aspeed_i2c_vmstate = { .name = TYPE_ASPEED_I2C, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(intr_status, AspeedI2CState), VMSTATE_STRUCT_ARRAY(busses, AspeedI2CState, ASPEED_I2C_NR_BUSSES, 1, aspeed_i2c_bus_vmstate, diff --git a/hw/i2c/bcm2835_i2c.c b/hw/i2c/bcm2835_i2c.c new file mode 100644 index 00000000000..20ec46eeabc --- /dev/null +++ b/hw/i2c/bcm2835_i2c.c @@ -0,0 +1,282 @@ +/* + * Broadcom Serial Controller (BSC) + * + * Copyright (c) 2024 Rayhan Faizel + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/i2c/bcm2835_i2c.h" +#include "hw/irq.h" +#include "migration/vmstate.h" + +static void bcm2835_i2c_update_interrupt(BCM2835I2CState *s) +{ + int do_interrupt = 0; + /* Interrupt on RXR (Needs reading) */ + if (s->c & BCM2835_I2C_C_INTR && s->s & BCM2835_I2C_S_RXR) { + do_interrupt = 1; + } + + /* Interrupt on TXW (Needs writing) */ + if (s->c & BCM2835_I2C_C_INTT && s->s & BCM2835_I2C_S_TXW) { + do_interrupt = 1; + } + + /* Interrupt on DONE (Transfer complete) */ + if (s->c & BCM2835_I2C_C_INTD && s->s & BCM2835_I2C_S_DONE) { + do_interrupt = 1; + } + qemu_set_irq(s->irq, do_interrupt); +} + +static void bcm2835_i2c_begin_transfer(BCM2835I2CState *s) +{ + int direction = s->c & BCM2835_I2C_C_READ; + if (i2c_start_transfer(s->bus, s->a, direction)) { + s->s |= BCM2835_I2C_S_ERR; + } + s->s |= BCM2835_I2C_S_TA; + + if (direction) { + s->s |= BCM2835_I2C_S_RXR | BCM2835_I2C_S_RXD; + } else { + s->s |= BCM2835_I2C_S_TXW; + } +} + +static void bcm2835_i2c_finish_transfer(BCM2835I2CState *s) +{ + /* + * STOP is sent when DLEN counts down to zero. + * + * https://github.com/torvalds/linux/blob/v6.7/drivers/i2c/busses/i2c-bcm2835.c#L223-L261 + * It is possible to initiate repeated starts on real hardware. + * However, this requires sending another ST request before the bytes in + * TX FIFO are shifted out. + * + * This is not emulated currently. + */ + i2c_end_transfer(s->bus); + s->s |= BCM2835_I2C_S_DONE; + + /* Ensure RXD is cleared, otherwise the driver registers an error */ + s->s &= ~(BCM2835_I2C_S_TA | BCM2835_I2C_S_RXR | + BCM2835_I2C_S_TXW | BCM2835_I2C_S_RXD); +} + +static uint64_t bcm2835_i2c_read(void *opaque, hwaddr addr, unsigned size) +{ + BCM2835I2CState *s = opaque; + uint32_t readval = 0; + + switch (addr) { + case BCM2835_I2C_C: + readval = s->c; + break; + case BCM2835_I2C_S: + readval = s->s; + break; + case BCM2835_I2C_DLEN: + readval = s->dlen; + break; + case BCM2835_I2C_A: + readval = s->a; + break; + case BCM2835_I2C_FIFO: + /* We receive I2C messages directly instead of using FIFOs */ + if (s->s & BCM2835_I2C_S_TA) { + readval = i2c_recv(s->bus); + s->dlen -= 1; + + if (s->dlen == 0) { + bcm2835_i2c_finish_transfer(s); + } + } + bcm2835_i2c_update_interrupt(s); + break; + case BCM2835_I2C_DIV: + readval = s->div; + break; + case BCM2835_I2C_DEL: + readval = s->del; + break; + case BCM2835_I2C_CLKT: + readval = s->clkt; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + } + + return readval; +} + +static void bcm2835_i2c_write(void *opaque, hwaddr addr, + uint64_t value, unsigned int size) +{ + BCM2835I2CState *s = opaque; + uint32_t writeval = value; + + switch (addr) { + case BCM2835_I2C_C: + /* ST is a one-shot operation; it must read back as 0 */ + s->c = writeval & ~BCM2835_I2C_C_ST; + + /* Start transfer */ + if (writeval & (BCM2835_I2C_C_ST | BCM2835_I2C_C_I2CEN)) { + bcm2835_i2c_begin_transfer(s); + /* + * Handle special case where transfer starts with zero data length. + * Required for zero length i2c quick messages to work. + */ + if (s->dlen == 0) { + bcm2835_i2c_finish_transfer(s); + } + } + + bcm2835_i2c_update_interrupt(s); + break; + case BCM2835_I2C_S: + if (writeval & BCM2835_I2C_S_DONE && s->s & BCM2835_I2C_S_DONE) { + /* When DONE is cleared, DLEN should read last written value. */ + s->dlen = s->last_dlen; + } + + /* Clear DONE, CLKT and ERR by writing 1 */ + s->s &= ~(writeval & (BCM2835_I2C_S_DONE | + BCM2835_I2C_S_ERR | BCM2835_I2C_S_CLKT)); + break; + case BCM2835_I2C_DLEN: + s->dlen = writeval; + s->last_dlen = writeval; + break; + case BCM2835_I2C_A: + s->a = writeval; + break; + case BCM2835_I2C_FIFO: + /* We send I2C messages directly instead of using FIFOs */ + if (s->s & BCM2835_I2C_S_TA) { + if (s->s & BCM2835_I2C_S_TXD) { + if (!i2c_send(s->bus, writeval & 0xff)) { + s->dlen -= 1; + } else { + s->s |= BCM2835_I2C_S_ERR; + } + } + + if (s->dlen == 0) { + bcm2835_i2c_finish_transfer(s); + } + } + bcm2835_i2c_update_interrupt(s); + break; + case BCM2835_I2C_DIV: + s->div = writeval; + break; + case BCM2835_I2C_DEL: + s->del = writeval; + break; + case BCM2835_I2C_CLKT: + s->clkt = writeval; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + } +} + +static const MemoryRegionOps bcm2835_i2c_ops = { + .read = bcm2835_i2c_read, + .write = bcm2835_i2c_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void bcm2835_i2c_realize(DeviceState *dev, Error **errp) +{ + BCM2835I2CState *s = BCM2835_I2C(dev); + s->bus = i2c_init_bus(dev, NULL); + + memory_region_init_io(&s->iomem, OBJECT(dev), &bcm2835_i2c_ops, s, + TYPE_BCM2835_I2C, 0x24); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); +} + +static void bcm2835_i2c_reset(DeviceState *dev) +{ + BCM2835I2CState *s = BCM2835_I2C(dev); + + /* Reset values according to BCM2835 Peripheral Documentation */ + s->c = 0x0; + s->s = BCM2835_I2C_S_TXD | BCM2835_I2C_S_TXE; + s->dlen = 0x0; + s->a = 0x0; + s->div = 0x5dc; + s->del = 0x00300030; + s->clkt = 0x40; +} + +static const VMStateDescription vmstate_bcm2835_i2c = { + .name = TYPE_BCM2835_I2C, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(c, BCM2835I2CState), + VMSTATE_UINT32(s, BCM2835I2CState), + VMSTATE_UINT32(dlen, BCM2835I2CState), + VMSTATE_UINT32(a, BCM2835I2CState), + VMSTATE_UINT32(div, BCM2835I2CState), + VMSTATE_UINT32(del, BCM2835I2CState), + VMSTATE_UINT32(clkt, BCM2835I2CState), + VMSTATE_UINT32(last_dlen, BCM2835I2CState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bcm2835_i2c_reset; + dc->realize = bcm2835_i2c_realize; + dc->vmsd = &vmstate_bcm2835_i2c; +} + +static const TypeInfo bcm2835_i2c_info = { + .name = TYPE_BCM2835_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835I2CState), + .class_init = bcm2835_i2c_class_init, +}; + +static void bcm2835_i2c_register_types(void) +{ + type_register_static(&bcm2835_i2c_info); +} + +type_init(bcm2835_i2c_register_types) diff --git a/hw/i2c/core.c b/hw/i2c/core.c index 879a1d45cb1..4cf30b2c863 100644 --- a/hw/i2c/core.c +++ b/hw/i2c/core.c @@ -50,7 +50,7 @@ static const VMStateDescription vmstate_i2c_bus = { .version_id = 1, .minimum_version_id = 1, .pre_save = i2c_bus_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(saved_address, I2CBus), VMSTATE_END_OF_LIST() } @@ -359,7 +359,7 @@ const VMStateDescription vmstate_i2c_slave = { .version_id = 1, .minimum_version_id = 1, .post_load = i2c_slave_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(address, I2CSlave), VMSTATE_END_OF_LIST() } diff --git a/hw/i2c/exynos4210_i2c.c b/hw/i2c/exynos4210_i2c.c index b65a7d0222e..9445424d5fd 100644 --- a/hw/i2c/exynos4210_i2c.c +++ b/hw/i2c/exynos4210_i2c.c @@ -273,7 +273,7 @@ static const VMStateDescription exynos4210_i2c_vmstate = { .name = "exynos4210.i2c", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(i2ccon, Exynos4210I2CState), VMSTATE_UINT8(i2cstat, Exynos4210I2CState), VMSTATE_UINT8(i2cds, Exynos4210I2CState), diff --git a/hw/i2c/imx_i2c.c b/hw/i2c/imx_i2c.c index 9792583fea7..a25676f0254 100644 --- a/hw/i2c/imx_i2c.c +++ b/hw/i2c/imx_i2c.c @@ -285,7 +285,7 @@ static const VMStateDescription imx_i2c_vmstate = { .name = TYPE_IMX_I2C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(address, IMXI2CState), VMSTATE_UINT16(iadr, IMXI2CState), VMSTATE_UINT16(ifdr, IMXI2CState), diff --git a/hw/i2c/meson.build b/hw/i2c/meson.build index b58bc167dbc..c459adcb596 100644 --- a/hw/i2c/meson.build +++ b/hw/i2c/meson.build @@ -17,4 +17,5 @@ i2c_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_i2c.c')) i2c_ss.add(when: 'CONFIG_PPC4XX', if_true: files('ppc4xx_i2c.c')) i2c_ss.add(when: 'CONFIG_PCA954X', if_true: files('i2c_mux_pca954x.c')) i2c_ss.add(when: 'CONFIG_PMBUS', if_true: files('pmbus_device.c')) +i2c_ss.add(when: 'CONFIG_BCM2835_I2C', if_true: files('bcm2835_i2c.c')) system_ss.add_all(when: 'CONFIG_I2C', if_true: i2c_ss) diff --git a/hw/i2c/microbit_i2c.c b/hw/i2c/microbit_i2c.c index e92f9f84ea8..24d36d15b09 100644 --- a/hw/i2c/microbit_i2c.c +++ b/hw/i2c/microbit_i2c.c @@ -80,7 +80,7 @@ static const VMStateDescription microbit_i2c_vmstate = { .name = TYPE_MICROBIT_I2C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, MicrobitI2CState, MICROBIT_I2C_NREGS), VMSTATE_UINT32(read_idx, MicrobitI2CState), VMSTATE_END_OF_LIST() diff --git a/hw/i2c/mpc_i2c.c b/hw/i2c/mpc_i2c.c index 219c5484028..cb051a520f7 100644 --- a/hw/i2c/mpc_i2c.c +++ b/hw/i2c/mpc_i2c.c @@ -312,7 +312,7 @@ static const VMStateDescription mpc_i2c_vmstate = { .name = TYPE_MPC_I2C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(address, MPCI2CState), VMSTATE_UINT8(adr, MPCI2CState), VMSTATE_UINT8(fdr, MPCI2CState), diff --git a/hw/i2c/npcm7xx_smbus.c b/hw/i2c/npcm7xx_smbus.c index e7e0ba66fe7..0ea3083bb6e 100644 --- a/hw/i2c/npcm7xx_smbus.c +++ b/hw/i2c/npcm7xx_smbus.c @@ -1046,7 +1046,7 @@ static const VMStateDescription vmstate_npcm7xx_smbus = { .name = "npcm7xx-smbus", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(sda, NPCM7xxSMBusState), VMSTATE_UINT8(st, NPCM7xxSMBusState), VMSTATE_UINT8(cst, NPCM7xxSMBusState), diff --git a/hw/i2c/pm_smbus.c b/hw/i2c/pm_smbus.c index 78e7c229a85..3eed8110b95 100644 --- a/hw/i2c/pm_smbus.c +++ b/hw/i2c/pm_smbus.c @@ -455,7 +455,7 @@ const VMStateDescription pmsmb_vmstate = { .name = "pmsmb", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(smb_stat, PMSMBus), VMSTATE_UINT8(smb_ctl, PMSMBus), VMSTATE_UINT8(smb_cmd, PMSMBus), diff --git a/hw/i2c/pmbus_device.c b/hw/i2c/pmbus_device.c index 1b978e588f1..ba1d2fd7160 100644 --- a/hw/i2c/pmbus_device.c +++ b/hw/i2c/pmbus_device.c @@ -1886,7 +1886,7 @@ const VMStateDescription vmstate_pmbus_device = { .name = TYPE_PMBUS_DEVICE, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SMBUS_DEVICE(smb, PMBusDevice), VMSTATE_UINT8(num_pages, PMBusDevice), VMSTATE_UINT8(code, PMBusDevice), diff --git a/hw/i2c/smbus_eeprom.c b/hw/i2c/smbus_eeprom.c index 12c5741f388..c42236bb139 100644 --- a/hw/i2c/smbus_eeprom.c +++ b/hw/i2c/smbus_eeprom.c @@ -100,7 +100,7 @@ static const VMStateDescription vmstate_smbus_eeprom = { .version_id = 1, .minimum_version_id = 1, .needed = smbus_eeprom_vmstate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SMBUS_DEVICE(smbusdev, SMBusEEPROMDevice), VMSTATE_UINT8_ARRAY(data, SMBusEEPROMDevice, SMBUS_EEPROM_SIZE), VMSTATE_UINT8(offset, SMBusEEPROMDevice), diff --git a/hw/i2c/smbus_ich9.c b/hw/i2c/smbus_ich9.c index 18d40e93c10..208f263ac5b 100644 --- a/hw/i2c/smbus_ich9.c +++ b/hw/i2c/smbus_ich9.c @@ -50,7 +50,7 @@ static const VMStateDescription vmstate_ich9_smbus = { .name = "ich9_smb", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, ICH9SMBState), VMSTATE_BOOL_TEST(irq_enabled, ICH9SMBState, ich9_vmstate_need_smbus), VMSTATE_STRUCT_TEST(smb, ICH9SMBState, ich9_vmstate_need_smbus, 1, diff --git a/hw/i2c/smbus_slave.c b/hw/i2c/smbus_slave.c index 2ef2c7c5f69..9f9afc25a40 100644 --- a/hw/i2c/smbus_slave.c +++ b/hw/i2c/smbus_slave.c @@ -25,11 +25,15 @@ #define DPRINTF(fmt, ...) \ do { printf("smbus(%02x): " fmt , dev->i2c.address, ## __VA_ARGS__); } while (0) #define BADF(fmt, ...) \ -do { fprintf(stderr, "smbus: error: " fmt , ## __VA_ARGS__); exit(1);} while (0) +do { g_autofree char *qom_path = object_get_canonical_path(OBJECT(dev)); \ + fprintf(stderr, "%s: smbus: error: " fmt , qom_path, ## __VA_ARGS__); \ + exit(1); } while (0) #else #define DPRINTF(fmt, ...) do {} while(0) #define BADF(fmt, ...) \ -do { fprintf(stderr, "smbus: error: " fmt , ## __VA_ARGS__);} while (0) +do { g_autofree char *qom_path = object_get_canonical_path(OBJECT(dev)); \ + fprintf(stderr, "%s: smbus: error: " fmt , qom_path, ## __VA_ARGS__); \ + } while (0) #endif enum { @@ -215,7 +219,7 @@ const VMStateDescription vmstate_smbus_device = { .name = TYPE_SMBUS_DEVICE, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(i2c, SMBusDevice), VMSTATE_INT32(mode, SMBusDevice), VMSTATE_INT32(data_len, SMBusDevice), diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index 55850791df4..a6ee052f9a1 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -76,6 +76,7 @@ config I440FX select PIIX select DIMM select SMBIOS + select SMBIOS_LEGACY select FW_CFG_DMA config ISAPC @@ -95,6 +96,7 @@ config Q35 imply E1000E_PCI_EXPRESS imply VMPORT imply VMMOUSE + imply IOMMUFD select PC_PCI select PC_ACPI select PCI_EXPRESS_Q35 diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 1e178341de2..53f804ac16b 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -68,6 +68,7 @@ #include "hw/acpi/utils.h" #include "hw/acpi/pci.h" #include "hw/acpi/cxl.h" +#include "hw/acpi/acpi_generic_initiator.h" #include "qom/qom-qobject.h" #include "hw/i386/amd_iommu.h" @@ -192,21 +193,10 @@ static void init_common_fadt_data(MachineState *ms, Object *o, *data = fadt; } -static Object *object_resolve_type_unambiguous(const char *typename) -{ - bool ambig; - Object *o = object_resolve_path_type("", typename, &ambig); - - if (ambig || !o) { - return NULL; - } - return o; -} - static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm) { - Object *piix = object_resolve_type_unambiguous(TYPE_PIIX4_PM); - Object *lpc = object_resolve_type_unambiguous(TYPE_ICH9_LPC_DEVICE); + Object *piix = object_resolve_type_unambiguous(TYPE_PIIX4_PM, NULL); + Object *lpc = object_resolve_type_unambiguous(TYPE_ICH9_LPC_DEVICE, NULL); Object *obj = piix ? piix : lpc; QObject *o; pm->cpu_hp_io_base = 0; @@ -1428,8 +1418,9 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, AcpiPmInfo *pm, AcpiMiscInfo *misc, Range *pci_hole, Range *pci_hole64, MachineState *machine) { - Object *i440fx = object_resolve_type_unambiguous(TYPE_I440FX_PCI_HOST_BRIDGE); - Object *q35 = object_resolve_type_unambiguous(TYPE_Q35_HOST_DEVICE); + Object *i440fx = object_resolve_type_unambiguous(TYPE_I440FX_PCI_HOST_BRIDGE, + NULL); + Object *q35 = object_resolve_type_unambiguous(TYPE_Q35_HOST_DEVICE, NULL); CrsRangeEntry *entry; Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs; CrsRangeSet crs_range_set; @@ -1556,7 +1547,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, } crs_range_set_init(&crs_range_set); - bus = PC_MACHINE(machine)->bus; + bus = PC_MACHINE(machine)->pcibus; if (bus) { QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); @@ -2056,6 +2047,8 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) build_srat_memory(table_data, 0, 0, 0, MEM_AFFINITY_NOFLAGS); } + build_srat_generic_pci_initiator(table_data); + /* * Entry is required for Windows to enable memory hotplug in OS * and for Linux to enable SWIOTLB when booted with less than @@ -2333,30 +2326,23 @@ static void build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { - int ivhd_table_len = 24; AMDVIState *s = AMD_IOMMU_DEVICE(x86_iommu_get_default()); GArray *ivhd_blob = g_array_new(false, true, 1); AcpiTable table = { .sig = "IVRS", .rev = 1, .oem_id = oem_id, .oem_table_id = oem_table_id }; + uint64_t feature_report; acpi_table_begin(&table, table_data); /* IVinfo - IO virtualization information common to all * IOMMU units in a system */ - build_append_int_noprefix(table_data, 40UL << 8/* PASize */, 4); + build_append_int_noprefix(table_data, + (1UL << 0) | /* EFRSup */ + (40UL << 8), /* PASize */ + 4); /* reserved */ build_append_int_noprefix(table_data, 0, 8); - /* IVHD definition - type 10h */ - build_append_int_noprefix(table_data, 0x10, 1); - /* virtualization flags */ - build_append_int_noprefix(table_data, - (1UL << 0) | /* HtTunEn */ - (1UL << 4) | /* iotblSup */ - (1UL << 6) | /* PrefSup */ - (1UL << 7), /* PPRSup */ - 1); - /* * A PCI bus walk, for each PCI host bridge, is necessary to create a * complete set of IVHD entries. Do this into a separate blob so that we @@ -2376,18 +2362,34 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, build_append_int_noprefix(ivhd_blob, 0x0000001, 4); } - ivhd_table_len += ivhd_blob->len; - /* * When interrupt remapping is supported, we add a special IVHD device - * for type IO-APIC. + * for type IO-APIC + * Refer to spec - Table 95: IVHD device entry type codes + * + * Linux IOMMU driver checks for the special IVHD device (type IO-APIC). + * See Linux kernel commit 'c2ff5cf5294bcbd7fa50f7d860e90a66db7e5059' */ if (x86_iommu_ir_supported(x86_iommu_get_default())) { - ivhd_table_len += 8; + build_append_int_noprefix(ivhd_blob, + (0x1ull << 56) | /* type IOAPIC */ + (IOAPIC_SB_DEVID << 40) | /* IOAPIC devid */ + 0x48, /* special device */ + 8); } + /* IVHD definition - type 10h */ + build_append_int_noprefix(table_data, 0x10, 1); + /* virtualization flags */ + build_append_int_noprefix(table_data, + (1UL << 0) | /* HtTunEn */ + (1UL << 4) | /* iotblSup */ + (1UL << 6) | /* PrefSup */ + (1UL << 7), /* PPRSup */ + 1); + /* IVHD length */ - build_append_int_noprefix(table_data, ivhd_table_len, 2); + build_append_int_noprefix(table_data, ivhd_blob->len + 24, 2); /* DeviceID */ build_append_int_noprefix(table_data, object_property_get_int(OBJECT(&s->pci), "addr", @@ -2401,31 +2403,53 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, /* IOMMU info */ build_append_int_noprefix(table_data, 0, 2); /* IOMMU Feature Reporting */ + feature_report = (48UL << 30) | /* HATS */ + (48UL << 28) | /* GATS */ + (1UL << 2) | /* GTSup */ + (1UL << 6); /* GASup */ + if (s->xtsup) { + feature_report |= (1UL << 0); /* XTSup */ + } + build_append_int_noprefix(table_data, feature_report, 4); + + /* IVHD entries as found above */ + g_array_append_vals(table_data, ivhd_blob->data, ivhd_blob->len); + + /* IVHD definition - type 11h */ + build_append_int_noprefix(table_data, 0x11, 1); + /* virtualization flags */ build_append_int_noprefix(table_data, - (48UL << 30) | /* HATS */ - (48UL << 28) | /* GATS */ - (1UL << 2) | /* GTSup */ - (1UL << 6), /* GASup */ - 4); + (1UL << 0) | /* HtTunEn */ + (1UL << 4), /* iotblSup */ + 1); + + /* IVHD length */ + build_append_int_noprefix(table_data, ivhd_blob->len + 40, 2); + /* DeviceID */ + build_append_int_noprefix(table_data, + object_property_get_int(OBJECT(&s->pci), "addr", + &error_abort), 2); + /* Capability offset */ + build_append_int_noprefix(table_data, s->pci.capab_offset, 2); + /* IOMMU base address */ + build_append_int_noprefix(table_data, s->mmio.addr, 8); + /* PCI Segment Group */ + build_append_int_noprefix(table_data, 0, 2); + /* IOMMU info */ + build_append_int_noprefix(table_data, 0, 2); + /* IOMMU Attributes */ + build_append_int_noprefix(table_data, 0, 4); + /* EFR Register Image */ + build_append_int_noprefix(table_data, + amdvi_extended_feature_register(s), + 8); + /* EFR Register Image 2 */ + build_append_int_noprefix(table_data, 0, 8); /* IVHD entries as found above */ g_array_append_vals(table_data, ivhd_blob->data, ivhd_blob->len); - g_array_free(ivhd_blob, TRUE); - /* - * Add a special IVHD device type. - * Refer to spec - Table 95: IVHD device entry type codes - * - * Linux IOMMU driver checks for the special IVHD device (type IO-APIC). - * See Linux kernel commit 'c2ff5cf5294bcbd7fa50f7d860e90a66db7e5059' - */ - if (x86_iommu_ir_supported(x86_iommu_get_default())) { - build_append_int_noprefix(table_data, - (0x1ull << 56) | /* type IOAPIC */ - (IOAPIC_SB_DEVID << 40) | /* IOAPIC devid */ - 0x48, /* special device */ - 8); - } + g_array_free(ivhd_blob, TRUE); acpi_table_end(linker, &table); } @@ -2697,7 +2721,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) " migration may not work", tables_blob->len, legacy_table_size); error_printf("Try removing CPUs, NUMA nodes, memory slots" - " or PCI bridges."); + " or PCI bridges.\n"); } g_array_set_size(tables_blob, legacy_table_size); } else { @@ -2709,7 +2733,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) " migration may not work", tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); error_printf("Try removing CPUs, NUMA nodes, memory slots" - " or PCI bridges."); + " or PCI bridges.\n"); } acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE); } @@ -2770,7 +2794,7 @@ static const VMStateDescription vmstate_acpi_build = { .name = "acpi_build", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(patched, AcpiBuildState), VMSTATE_END_OF_LIST() }, diff --git a/hw/i386/acpi-common.c b/hw/i386/acpi-common.c index 43dc23f7e06..20f19269da4 100644 --- a/hw/i386/acpi-common.c +++ b/hw/i386/acpi-common.c @@ -27,7 +27,6 @@ #include "hw/acpi/acpi.h" #include "hw/acpi/aml-build.h" #include "hw/acpi/utils.h" -#include "hw/i386/pc.h" #include "target/i386/cpu.h" #include "acpi-build.h" @@ -100,6 +99,7 @@ void acpi_build_madt(GArray *table_data, BIOSLinker *linker, int i; bool x2apic_mode = false; MachineClass *mc = MACHINE_GET_CLASS(x86ms); + X86MachineClass *x86mc = X86_MACHINE_GET_CLASS(x86ms); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(x86ms)); AcpiTable table = { .sig = "APIC", .rev = 3, .oem_id = oem_id, .oem_table_id = oem_table_id }; @@ -122,7 +122,7 @@ void acpi_build_madt(GArray *table_data, BIOSLinker *linker, IO_APIC_SECONDARY_ADDRESS, IO_APIC_SECONDARY_IRQBASE); } - if (x86ms->apic_xrupt_override) { + if (x86mc->apic_xrupt_override) { build_xrupt_override(table_data, 0, 2, 0 /* Flags: Conforms to the specifications of the bus */); } diff --git a/hw/i386/acpi-common.h b/hw/i386/acpi-common.h index b3c56ee0145..e305aaac158 100644 --- a/hw/i386/acpi-common.h +++ b/hw/i386/acpi-common.h @@ -1,12 +1,15 @@ #ifndef HW_I386_ACPI_COMMON_H #define HW_I386_ACPI_COMMON_H +#include "hw/boards.h" #include "hw/acpi/bios-linker-loader.h" #include "hw/i386/x86.h" /* Default IOAPIC ID */ #define ACPI_BUILD_IOAPIC_ID 0x0 +void pc_madt_cpu_entry(int uid, const CPUArchIdList *apic_ids, + GArray *entry, bool force_enabled); void acpi_build_madt(GArray *table_data, BIOSLinker *linker, X86MachineState *x86ms, const char *oem_id, const char *oem_table_id); diff --git a/hw/i386/acpi-microvm.c b/hw/i386/acpi-microvm.c index 2909a739338..279da6b4aa2 100644 --- a/hw/i386/acpi-microvm.c +++ b/hw/i386/acpi-microvm.c @@ -37,6 +37,7 @@ #include "hw/pci/pci.h" #include "hw/pci/pcie_host.h" #include "hw/usb/xhci.h" +#include "hw/virtio/virtio-acpi.h" #include "hw/virtio/virtio-mmio.h" #include "hw/input/i8042.h" @@ -77,19 +78,7 @@ static void acpi_dsdt_add_virtio(Aml *scope, uint32_t irq = mms->virtio_irq_base + index; hwaddr base = VIRTIO_MMIO_BASE + index * 512; hwaddr size = 512; - - Aml *dev = aml_device("VR%02u", (unsigned)index); - aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005"))); - aml_append(dev, aml_name_decl("_UID", aml_int(index))); - aml_append(dev, aml_name_decl("_CCA", aml_int(1))); - - Aml *crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE)); - aml_append(crs, - aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, - AML_EXCLUSIVE, &irq, 1)); - aml_append(dev, aml_name_decl("_CRS", crs)); - aml_append(scope, dev); + virtio_acpi_dsdt_add(scope, base, size, irq, index, 1); } } } diff --git a/hw/i386/amd_iommu-stub.c b/hw/i386/amd_iommu-stub.c new file mode 100644 index 00000000000..d62a3732e60 --- /dev/null +++ b/hw/i386/amd_iommu-stub.c @@ -0,0 +1,26 @@ +/* + * Stubs for AMD IOMMU emulation + * + * Copyright (C) 2023 Bui Quang Minh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "amd_iommu.h" + +uint64_t amdvi_extended_feature_register(AMDVIState *s) +{ + return AMDVI_DEFAULT_EXT_FEATURES; +} diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index 4203144da98..6d4fde72f9b 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -31,6 +31,7 @@ #include "hw/i386/apic_internal.h" #include "trace.h" #include "hw/i386/apic-msidef.h" +#include "hw/qdev-properties.h" /* used AMD-Vi MMIO registers */ const char *amdvi_mmio_low[] = { @@ -74,6 +75,16 @@ typedef struct AMDVIIOTLBEntry { uint64_t page_mask; /* physical page size */ } AMDVIIOTLBEntry; +uint64_t amdvi_extended_feature_register(AMDVIState *s) +{ + uint64_t feature = AMDVI_DEFAULT_EXT_FEATURES; + if (s->xtsup) { + feature |= AMDVI_FEATURE_XT; + } + + return feature; +} + /* configure MMIO registers at startup/reset */ static void amdvi_set_quad(AMDVIState *s, hwaddr addr, uint64_t val, uint64_t romask, uint64_t w1cmask) @@ -1155,7 +1166,12 @@ static int amdvi_int_remap_ga(AMDVIState *iommu, irq->vector = irte.hi.fields.vector; irq->dest_mode = irte.lo.fields_remap.dm; irq->redir_hint = irte.lo.fields_remap.rq_eoi; - irq->dest = irte.lo.fields_remap.destination; + if (iommu->xtsup) { + irq->dest = irte.lo.fields_remap.destination | + (irte.hi.fields.destination_hi << 24); + } else { + irq->dest = irte.lo.fields_remap.destination & 0xff; + } return 0; } @@ -1505,8 +1521,9 @@ static void amdvi_init(AMDVIState *s) /* reset MMIO */ memset(s->mmior, 0, AMDVI_MMIO_SIZE); - amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES, - 0xffffffffffffffef, 0); + amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, + amdvi_extended_feature_register(s), + 0xffffffffffffffef, 0); amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67); } @@ -1567,7 +1584,7 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) MachineState *ms = MACHINE(qdev_get_machine()); PCMachineState *pcms = PC_MACHINE(ms); X86MachineState *x86ms = X86_MACHINE(ms); - PCIBus *bus = pcms->bus; + PCIBus *bus = pcms->pcibus; s->iotlb = g_hash_table_new_full(amdvi_uint64_hash, amdvi_uint64_equal, g_free, g_free); @@ -1589,6 +1606,11 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) amdvi_init(s); } +static Property amdvi_properties[] = { + DEFINE_PROP_BOOL("xtsup", AMDVIState, xtsup, false), + DEFINE_PROP_END_OF_LIST(), +}; + static const VMStateDescription vmstate_amdvi_sysbus = { .name = "amd-iommu", .unmigratable = 1 @@ -1615,6 +1637,7 @@ static void amdvi_sysbus_class_init(ObjectClass *klass, void *data) dc->user_creatable = true; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; + device_class_set_props(dc, amdvi_properties); } static const TypeInfo amdvi_sysbus = { diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h index c5065a3e277..73619fe9eaa 100644 --- a/hw/i386/amd_iommu.h +++ b/hw/i386/amd_iommu.h @@ -154,6 +154,7 @@ #define AMDVI_FEATURE_PREFETCH (1ULL << 0) /* page prefetch */ #define AMDVI_FEATURE_PPR (1ULL << 1) /* PPR Support */ +#define AMDVI_FEATURE_XT (1ULL << 2) /* x2APIC Support */ #define AMDVI_FEATURE_GT (1ULL << 4) /* Guest Translation */ #define AMDVI_FEATURE_IA (1ULL << 6) /* inval all support */ #define AMDVI_FEATURE_GA (1ULL << 7) /* guest VAPIC support */ @@ -173,8 +174,9 @@ #define AMDVI_IOTLB_MAX_SIZE 1024 #define AMDVI_DEVID_SHIFT 36 -/* extended feature support */ -#define AMDVI_EXT_FEATURES (AMDVI_FEATURE_PREFETCH | AMDVI_FEATURE_PPR | \ +/* default extended feature */ +#define AMDVI_DEFAULT_EXT_FEATURES \ + (AMDVI_FEATURE_PREFETCH | AMDVI_FEATURE_PPR | \ AMDVI_FEATURE_IA | AMDVI_FEATURE_GT | AMDVI_FEATURE_HE | \ AMDVI_GATS_MODE | AMDVI_HATS_MODE | AMDVI_FEATURE_GA) @@ -276,8 +278,8 @@ union irte_ga_lo { dm:1, /* ------ */ guest_mode:1, - destination:8, - rsvd_1:48; + destination:24, + rsvd_1:32; } fields_remap; }; @@ -285,7 +287,8 @@ union irte_ga_hi { uint64_t val; struct { uint64_t vector:8, - rsvd_2:56; + rsvd_2:48, + destination_hi:8; } fields; }; @@ -364,6 +367,9 @@ struct AMDVIState { /* Interrupt remapping */ bool ga_enabled; + bool xtsup; }; +uint64_t amdvi_extended_feature_register(AMDVIState *s); + #endif diff --git a/hw/i386/fw_cfg.c b/hw/i386/fw_cfg.c index 7362daa45a6..d802d2787f0 100644 --- a/hw/i386/fw_cfg.c +++ b/hw/i386/fw_cfg.c @@ -48,22 +48,34 @@ const char *fw_cfg_arch_key_name(uint16_t key) return NULL; } -void fw_cfg_build_smbios(MachineState *ms, FWCfgState *fw_cfg) +void fw_cfg_build_smbios(PCMachineState *pcms, FWCfgState *fw_cfg, + SmbiosEntryPointType ep_type) { #ifdef CONFIG_SMBIOS uint8_t *smbios_tables, *smbios_anchor; size_t smbios_tables_len, smbios_anchor_len; struct smbios_phys_mem_area *mem_array; unsigned i, array_count; + MachineState *ms = MACHINE(pcms); + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + MachineClass *mc = MACHINE_GET_CLASS(pcms); X86CPU *cpu = X86_CPU(ms->possible_cpus->cpus[0].cpu); + if (pcmc->smbios_defaults) { + /* These values are guest ABI, do not change */ + smbios_set_defaults("QEMU", mc->desc, mc->name, + pcmc->smbios_uuid_encoded); + } + /* tell smbios about cpuid version and features */ smbios_set_cpuid(cpu->env.cpuid_version, cpu->env.features[FEAT_1_EDX]); - smbios_tables = smbios_get_table_legacy(ms, &smbios_tables_len); - if (smbios_tables) { + if (pcmc->smbios_legacy_mode) { + smbios_tables = smbios_get_table_legacy(&smbios_tables_len, + &error_fatal); fw_cfg_add_bytes(fw_cfg, FW_CFG_SMBIOS_ENTRIES, smbios_tables, smbios_tables_len); + return; } /* build the array of physical mem area from e820 table */ @@ -77,7 +89,7 @@ void fw_cfg_build_smbios(MachineState *ms, FWCfgState *fw_cfg) array_count++; } } - smbios_get_tables(ms, mem_array, array_count, + smbios_get_tables(ms, ep_type, mem_array, array_count, &smbios_tables, &smbios_tables_len, &smbios_anchor, &smbios_anchor_len, &error_fatal); diff --git a/hw/i386/fw_cfg.h b/hw/i386/fw_cfg.h index 86ca7c1c0cb..92e310f5fdc 100644 --- a/hw/i386/fw_cfg.h +++ b/hw/i386/fw_cfg.h @@ -10,6 +10,7 @@ #define HW_I386_FW_CFG_H #include "hw/boards.h" +#include "hw/i386/pc.h" #include "hw/nvram/fw_cfg.h" #define FW_CFG_IO_BASE 0x510 @@ -22,7 +23,8 @@ FWCfgState *fw_cfg_arch_create(MachineState *ms, uint16_t boot_cpus, uint16_t apic_id_limit); -void fw_cfg_build_smbios(MachineState *ms, FWCfgState *fw_cfg); +void fw_cfg_build_smbios(PCMachineState *pcms, FWCfgState *fw_cfg, + SmbiosEntryPointType ep_type); void fw_cfg_build_feature_control(MachineState *ms, FWCfgState *fw_cfg); void fw_cfg_add_acpi_dsdt(Aml *scope, FWCfgState *fw_cfg); diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 5085a6fee3f..cc8e59674eb 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -1665,7 +1665,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) { bool use_iommu, pt; /* Whether we need to take the BQL on our own */ - bool take_bql = !qemu_mutex_iothread_locked(); + bool take_bql = !bql_locked(); assert(as); @@ -1683,7 +1683,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) * it. We'd better make sure we have had it already, or, take it. */ if (take_bql) { - qemu_mutex_lock_iothread(); + bql_lock(); } /* Turn off first then on the other */ @@ -1738,7 +1738,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) } if (take_bql) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } return use_iommu; @@ -3289,7 +3289,7 @@ static const VMStateDescription vtd_vmstate = { .minimum_version_id = 1, .priority = MIG_PRI_IOMMU, .post_load = vtd_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(root, IntelIOMMUState), VMSTATE_UINT64(intr_root, IntelIOMMUState), VMSTATE_UINT64(iq, IntelIOMMUState), @@ -4124,11 +4124,7 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; } if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) { - if (!kvm_irqchip_is_split()) { - error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split"); - return false; - } - if (kvm_enabled() && !kvm_enable_x2apic()) { + if (kvm_irqchip_is_split() && !kvm_enable_x2apic()) { error_setg(errp, "eim=on requires support on the KVM side" "(X2APIC_API, first shipped in v4.7)"); return false; @@ -4187,7 +4183,7 @@ static void vtd_realize(DeviceState *dev, Error **errp) MachineState *ms = MACHINE(qdev_get_machine()); PCMachineState *pcms = PC_MACHINE(ms); X86MachineState *x86ms = X86_MACHINE(ms); - PCIBus *bus = pcms->bus; + PCIBus *bus = pcms->pcibus; IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c index 1e89ca0899c..a72c28e8a7d 100644 --- a/hw/i386/kvm/apic.c +++ b/hw/i386/kvm/apic.c @@ -95,9 +95,10 @@ void kvm_get_apic_state(DeviceState *dev, struct kvm_lapic_state *kapic) apic_next_timer(s, s->initial_count_load_time); } -static void kvm_apic_set_base(APICCommonState *s, uint64_t val) +static int kvm_apic_set_base(APICCommonState *s, uint64_t val) { s->apicbase = val; + return 0; } static void kvm_apic_set_tpr(APICCommonState *s, uint8_t val) diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c index e756b0aa43f..40aa9a32c32 100644 --- a/hw/i386/kvm/clock.c +++ b/hw/i386/kvm/clock.c @@ -245,7 +245,7 @@ static const VMStateDescription kvmclock_reliable_get_clock = { .version_id = 1, .minimum_version_id = 1, .needed = kvmclock_clock_is_reliable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(clock_is_reliable, KVMClockState), VMSTATE_END_OF_LIST() } @@ -295,11 +295,11 @@ static const VMStateDescription kvmclock_vmsd = { .minimum_version_id = 1, .pre_load = kvmclock_pre_load, .pre_save = kvmclock_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(clock, KVMClockState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &kvmclock_reliable_get_clock, NULL } diff --git a/hw/i386/kvm/ioapic.c b/hw/i386/kvm/ioapic.c index 409d0c8c766..b96fe84eed3 100644 --- a/hw/i386/kvm/ioapic.c +++ b/hw/i386/kvm/ioapic.c @@ -35,7 +35,7 @@ void kvm_pc_setup_irq_routing(bool pci_enabled) kvm_irqchip_add_irq_route(s, i, KVM_IRQCHIP_PIC_SLAVE, i - 8); } if (pci_enabled) { - for (i = 0; i < 24; ++i) { + for (i = 0; i < KVM_IOAPIC_NUM_PINS; ++i) { if (i == 0) { kvm_irqchip_add_irq_route(s, i, KVM_IRQCHIP_IOAPIC, 2); } else if (i != 2) { diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c index 02b8cbf8dff..07bd0c9ab80 100644 --- a/hw/i386/kvm/xen_evtchn.c +++ b/hw/i386/kvm/xen_evtchn.c @@ -240,7 +240,7 @@ static const VMStateDescription xen_evtchn_port_vmstate = { .name = "xen_evtchn_port", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vcpu, XenEvtchnPort), VMSTATE_UINT16(type, XenEvtchnPort), VMSTATE_UINT16(u.val, XenEvtchnPort), @@ -255,7 +255,7 @@ static const VMStateDescription xen_evtchn_vmstate = { .needed = xen_evtchn_is_needed, .pre_load = xen_evtchn_pre_load, .post_load = xen_evtchn_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(callback_param, XenEvtchnState), VMSTATE_UINT32(nr_ports, XenEvtchnState), VMSTATE_STRUCT_VARRAY_UINT32(port_table, XenEvtchnState, nr_ports, 1, @@ -371,7 +371,7 @@ static int set_callback_pci_intx(XenEvtchnState *s, uint64_t param) return 0; } - pdev = pci_find_device(pcms->bus, bus, devfn); + pdev = pci_find_device(pcms->pcibus, bus, devfn); if (!pdev) { return 0; } @@ -425,7 +425,7 @@ void xen_evtchn_set_callback_level(int level) * effect immediately. That just leaves interdomain loopback as the case * which uses the BH. */ - if (!qemu_mutex_iothread_locked()) { + if (!bql_locked()) { qemu_bh_schedule(s->gsi_bh); return; } @@ -459,7 +459,7 @@ int xen_evtchn_set_callback_param(uint64_t param) * We need the BQL because set_callback_pci_intx() may call into PCI code, * and because we may need to manipulate the old and new GSI levels. */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); qemu_mutex_lock(&s->port_lock); switch (type) { @@ -1037,7 +1037,7 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port, XenEvtchnPort *p = &s->port_table[port]; /* Because it *might* be a PIRQ port */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); switch (p->type) { case EVTCHNSTAT_closed: @@ -1097,14 +1097,14 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port, int xen_evtchn_soft_reset(void) { XenEvtchnState *s = xen_evtchn_singleton; - bool flush_kvm_routes; + bool flush_kvm_routes = false; int i; if (!s) { return -ENOTSUP; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); qemu_mutex_lock(&s->port_lock); @@ -1127,7 +1127,7 @@ int xen_evtchn_reset_op(struct evtchn_reset *reset) return -ESRCH; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); return xen_evtchn_soft_reset(); } @@ -1145,7 +1145,7 @@ int xen_evtchn_close_op(struct evtchn_close *close) return -EINVAL; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); qemu_mutex_lock(&s->port_lock); ret = close_port(s, close->port, &flush_kvm_routes); @@ -1272,7 +1272,7 @@ int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq) return -EINVAL; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); if (s->pirq[pirq->pirq].port) { return -EBUSY; @@ -1601,7 +1601,7 @@ bool xen_evtchn_set_gsi(int gsi, int level) XenEvtchnState *s = xen_evtchn_singleton; int pirq; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) { return false; @@ -1712,7 +1712,7 @@ void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector, return; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); pirq = msi_pirq_target(addr, data); @@ -1749,7 +1749,7 @@ int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route, return 1; /* Not a PIRQ */ } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); pirq = msi_pirq_target(address, data); if (!pirq || pirq >= s->nr_pirqs) { @@ -1796,7 +1796,7 @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data) return false; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); pirq = msi_pirq_target(address, data); if (!pirq || pirq >= s->nr_pirqs) { @@ -1824,7 +1824,7 @@ int xen_physdev_map_pirq(struct physdev_map_pirq *map) return -ENOTSUP; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); QEMU_LOCK_GUARD(&s->port_lock); if (map->domid != DOMID_SELF && map->domid != xen_domid) { @@ -1884,7 +1884,7 @@ int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap) return -EINVAL; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); qemu_mutex_lock(&s->port_lock); if (!pirq_inuse(s, pirq)) { @@ -1924,7 +1924,7 @@ int xen_physdev_eoi_pirq(struct physdev_eoi *eoi) return -ENOTSUP; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); QEMU_LOCK_GUARD(&s->port_lock); if (!pirq_inuse(s, pirq)) { @@ -1956,7 +1956,7 @@ int xen_physdev_query_pirq(struct physdev_irq_status_query *query) return -ENOTSUP; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); QEMU_LOCK_GUARD(&s->port_lock); if (!pirq_inuse(s, pirq)) { diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c index 0a24f53f20e..245e4b15db7 100644 --- a/hw/i386/kvm/xen_gnttab.c +++ b/hw/i386/kvm/xen_gnttab.c @@ -127,7 +127,7 @@ static const VMStateDescription xen_gnttab_vmstate = { .minimum_version_id = 1, .needed = xen_gnttab_is_needed, .post_load = xen_gnttab_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(nr_frames, XenGnttabState), VMSTATE_VARRAY_UINT32(gnt_frame_gpas, XenGnttabState, nr_frames, 0, vmstate_info_uint64, uint64_t), @@ -176,7 +176,7 @@ int xen_gnttab_map_page(uint64_t idx, uint64_t gfn) return -EINVAL; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); QEMU_LOCK_GUARD(&s->gnt_lock); xen_overlay_do_map_page(&s->gnt_aliases[idx], gpa); diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c index 39fda1b72c3..c68e78ac5ce 100644 --- a/hw/i386/kvm/xen_overlay.c +++ b/hw/i386/kvm/xen_overlay.c @@ -139,7 +139,7 @@ static const VMStateDescription xen_overlay_vmstate = { .needed = xen_overlay_is_needed, .pre_save = xen_overlay_pre_save, .post_load = xen_overlay_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(shinfo_gpa, XenOverlayState), VMSTATE_BOOL(long_mode, XenOverlayState), VMSTATE_END_OF_LIST() @@ -194,7 +194,7 @@ int xen_overlay_map_shinfo_page(uint64_t gpa) return -ENOENT; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (s->shinfo_gpa) { /* If removing shinfo page, turn the kernel magic off first */ diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c index 6e651960b3a..1a9bc342b88 100644 --- a/hw/i386/kvm/xen_xenstore.c +++ b/hw/i386/kvm/xen_xenstore.c @@ -243,7 +243,7 @@ static const VMStateDescription xen_xenstore_vmstate = { .needed = xen_xenstore_is_needed, .pre_save = xen_xenstore_pre_save, .post_load = xen_xenstore_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState, sizeof_field(XenXenstoreState, req_data)), VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState, @@ -1341,7 +1341,7 @@ static void fire_watch_cb(void *opaque, const char *path, const char *token) { XenXenstoreState *s = opaque; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); /* * If there's a response pending, we obviously can't scribble over diff --git a/hw/i386/meson.build b/hw/i386/meson.build index 369c6bf823b..d8b70ef3e9c 100644 --- a/hw/i386/meson.build +++ b/hw/i386/meson.build @@ -1,7 +1,7 @@ i386_ss = ss.source_set() i386_ss.add(files( 'fw_cfg.c', - 'kvmvapic.c', + 'vapic.c', 'e820_memory_layout.c', 'multiboot.c', 'x86.c', @@ -9,7 +9,8 @@ i386_ss.add(files( i386_ss.add(when: 'CONFIG_X86_IOMMU', if_true: files('x86-iommu.c'), if_false: files('x86-iommu-stub.c')) -i386_ss.add(when: 'CONFIG_AMD_IOMMU', if_true: files('amd_iommu.c')) +i386_ss.add(when: 'CONFIG_AMD_IOMMU', if_true: files('amd_iommu.c'), + if_false: files('amd_iommu-stub.c')) i386_ss.add(when: 'CONFIG_I440FX', if_true: files('pc_piix.c')) i386_ss.add(when: 'CONFIG_MICROVM', if_true: files('microvm.c', 'acpi-microvm.c', 'microvm-dt.c')) i386_ss.add(when: 'CONFIG_Q35', if_true: files('pc_q35.c')) diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index ca55aecc3b4..61a772dfe6e 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -175,7 +175,7 @@ static void microvm_devices_init(MicrovmMachineState *mms) &error_abort); isa_bus_register_input_irqs(isa_bus, x86ms->gsi); - ioapic_init_gsi(gsi_state, "machine"); + ioapic_init_gsi(gsi_state, OBJECT(mms)); if (ioapics > 1) { x86ms->ioapic2 = ioapic_init_secondary(gsi_state); } diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 29b9964733e..5c21b0c4dbf 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -31,7 +31,7 @@ #include "hw/i386/fw_cfg.h" #include "hw/i386/vmport.h" #include "sysemu/cpus.h" -#include "hw/ide/internal.h" +#include "hw/ide/ide-bus.h" #include "hw/timer/hpet.h" #include "hw/loader.h" #include "hw/rtc/mc146818rtc.h" @@ -78,6 +78,9 @@ { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, }, +GlobalProperty pc_compat_8_2[] = {}; +const size_t pc_compat_8_2_len = G_N_ELEMENTS(pc_compat_8_2); + GlobalProperty pc_compat_8_1[] = {}; const size_t pc_compat_8_1_len = G_N_ELEMENTS(pc_compat_8_1); @@ -396,8 +399,8 @@ static int boot_device2nibble(char boot_device) return 0; } -static void set_boot_dev(MC146818RtcState *s, const char *boot_device, - Error **errp) +static void set_boot_dev(PCMachineState *pcms, MC146818RtcState *s, + const char *boot_device, Error **errp) { #define PC_MAX_BOOT_DEVICES 3 int nbds, bds[3] = { 0, }; @@ -417,12 +420,15 @@ static void set_boot_dev(MC146818RtcState *s, const char *boot_device, } } mc146818rtc_set_cmos_data(s, 0x3d, (bds[1] << 4) | bds[0]); - mc146818rtc_set_cmos_data(s, 0x38, (bds[2] << 4) | (fd_bootchk ? 0x0 : 0x1)); + mc146818rtc_set_cmos_data(s, 0x38, (bds[2] << 4) | !pcms->fd_bootchk); } static void pc_boot_set(void *opaque, const char *boot_device, Error **errp) { - set_boot_dev(opaque, boot_device, errp); + PCMachineState *pcms = opaque; + X86MachineState *x86ms = X86_MACHINE(pcms); + + set_boot_dev(pcms, MC146818_RTC(x86ms->rtc), boot_device, errp); } static void pc_cmos_init_floppy(MC146818RtcState *rtc_state, ISADevice *floppy) @@ -462,11 +468,6 @@ static void pc_cmos_init_floppy(MC146818RtcState *rtc_state, ISADevice *floppy) mc146818rtc_set_cmos_data(rtc_state, REG_EQUIPMENT_BYTE, val); } -typedef struct pc_cmos_init_late_arg { - MC146818RtcState *rtc_state; - BusState *idebus[2]; -} pc_cmos_init_late_arg; - typedef struct check_fdc_state { ISADevice *floppy; bool multiple; @@ -527,23 +528,25 @@ static ISADevice *pc_find_fdc0(void) return state.floppy; } -static void pc_cmos_init_late(void *opaque) +static void pc_cmos_init_late(PCMachineState *pcms) { - pc_cmos_init_late_arg *arg = opaque; - MC146818RtcState *s = arg->rtc_state; + X86MachineState *x86ms = X86_MACHINE(pcms); + MC146818RtcState *s = MC146818_RTC(x86ms->rtc); int16_t cylinders; int8_t heads, sectors; int val; int i, trans; val = 0; - if (arg->idebus[0] && ide_get_geometry(arg->idebus[0], 0, - &cylinders, &heads, §ors) >= 0) { + if (pcms->idebus[0] && + ide_get_geometry(pcms->idebus[0], 0, + &cylinders, &heads, §ors) >= 0) { cmos_init_hd(s, 0x19, 0x1b, cylinders, heads, sectors); val |= 0xf0; } - if (arg->idebus[0] && ide_get_geometry(arg->idebus[0], 1, - &cylinders, &heads, §ors) >= 0) { + if (pcms->idebus[0] && + ide_get_geometry(pcms->idebus[0], 1, + &cylinders, &heads, §ors) >= 0) { cmos_init_hd(s, 0x1a, 0x24, cylinders, heads, sectors); val |= 0x0f; } @@ -555,10 +558,11 @@ static void pc_cmos_init_late(void *opaque) geometry. It is always such that: 1 <= sects <= 63, 1 <= heads <= 16, 1 <= cylinders <= 16383. The BIOS geometry can be different if a translation is done. */ - if (arg->idebus[i / 2] && - ide_get_geometry(arg->idebus[i / 2], i % 2, + BusState *idebus = pcms->idebus[i / 2]; + if (idebus && + ide_get_geometry(idebus, i % 2, &cylinders, &heads, §ors) >= 0) { - trans = ide_get_bios_chs_trans(arg->idebus[i / 2], i % 2) - 1; + trans = ide_get_bios_chs_trans(idebus, i % 2) - 1; assert((trans & ~3) == 0); val |= trans << (i * 2); } @@ -567,18 +571,6 @@ static void pc_cmos_init_late(void *opaque) pc_cmos_init_floppy(s, pc_find_fdc0()); - qemu_unregister_reset(pc_cmos_init_late, opaque); -} - -void pc_cmos_init(PCMachineState *pcms, - BusState *idebus0, BusState *idebus1, - ISADevice *rtc) -{ - int val; - static pc_cmos_init_late_arg arg; - X86MachineState *x86ms = X86_MACHINE(pcms); - MC146818RtcState *s = MC146818_RTC(rtc); - /* various important CMOS locations needed by PC/Bochs bios */ /* memory size */ @@ -614,26 +606,10 @@ void pc_cmos_init(PCMachineState *pcms, mc146818rtc_set_cmos_data(s, 0x5c, val >> 8); mc146818rtc_set_cmos_data(s, 0x5d, val >> 16); - object_property_add_link(OBJECT(pcms), "rtc_state", - TYPE_ISA_DEVICE, - (Object **)&x86ms->rtc, - object_property_allow_set_link, - OBJ_PROP_LINK_STRONG); - object_property_set_link(OBJECT(pcms), "rtc_state", OBJECT(s), - &error_abort); - - set_boot_dev(s, MACHINE(pcms)->boot_config.order, &error_fatal); - val = 0; val |= 0x02; /* FPU is there */ val |= 0x04; /* PS/2 mouse installed */ mc146818rtc_set_cmos_data(s, REG_EQUIPMENT_BYTE, val); - - /* hard drives and FDC */ - arg.rtc_state = s; - arg.idebus[0] = idebus0; - arg.idebus[1] = idebus1; - qemu_register_reset(pc_cmos_init_late, &arg); } static void handle_a20_line_change(void *opaque, int irq, int level) @@ -651,15 +627,19 @@ static const int ne2000_io[NE2000_NB_MAX] = { 0x300, 0x320, 0x340, 0x360, 0x280, 0x380 }; static const int ne2000_irq[NE2000_NB_MAX] = { 9, 10, 11, 3, 4, 5 }; -static void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd) +static gboolean pc_init_ne2k_isa(ISABus *bus, NICInfo *nd, Error **errp) { static int nb_ne2k = 0; - if (nb_ne2k == NE2000_NB_MAX) - return; + if (nb_ne2k == NE2000_NB_MAX) { + error_setg(errp, + "maximum number of ISA NE2000 devices exceeded"); + return false; + } isa_ne2000_init(bus, ne2000_io[nb_ne2k], ne2000_irq[nb_ne2k], nd); nb_ne2k++; + return true; } void pc_acpi_smi_interrupt(void *opaque, int irq, int level) @@ -678,7 +658,7 @@ void pc_machine_done(Notifier *notifier, void *data) PCMachineState, machine_done); X86MachineState *x86ms = X86_MACHINE(pcms); - cxl_hook_up_pxb_registers(pcms->bus, &pcms->cxl_devices_state, + cxl_hook_up_pxb_registers(pcms->pcibus, &pcms->cxl_devices_state, &error_fatal); if (pcms->cxl_devices_state.is_enabled) { @@ -688,24 +668,17 @@ void pc_machine_done(Notifier *notifier, void *data) /* set the number of CPUs */ x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus); - fw_cfg_add_extra_pci_roots(pcms->bus, x86ms->fw_cfg); + fw_cfg_add_extra_pci_roots(pcms->pcibus, x86ms->fw_cfg); acpi_setup(); if (x86ms->fw_cfg) { - fw_cfg_build_smbios(MACHINE(pcms), x86ms->fw_cfg); + fw_cfg_build_smbios(pcms, x86ms->fw_cfg, pcms->smbios_entry_point_type); fw_cfg_build_feature_control(MACHINE(pcms), x86ms->fw_cfg); /* update FW_CFG_NB_CPUS to account for -device added CPUs */ fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); } -} - -void pc_guest_info_init(PCMachineState *pcms) -{ - X86MachineState *x86ms = X86_MACHINE(pcms); - x86ms->apic_xrupt_override = true; - pcms->machine_done.notify = pc_machine_done; - qemu_add_machine_init_done_notifier(&pcms->machine_done); + pc_cmos_init_late(pcms); } /* setup pci memory address space mapping into system address space */ @@ -726,7 +699,8 @@ void xen_load_linux(PCMachineState *pcms) assert(MACHINE(pcms)->kernel_filename != NULL); - fw_cfg = fw_cfg_init_io(FW_CFG_IO_BASE); + fw_cfg = fw_cfg_init_io_dma(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4, + &address_space_memory); fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); rom_set_fw(fw_cfg); @@ -1188,7 +1162,8 @@ static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, port92 = isa_create_simple(isa_bus, TYPE_PORT92); a20_line = qemu_allocate_irqs(handle_a20_line_change, first_cpu, 2); - i8042_setup_a20_line(i8042, a20_line[0]); + qdev_connect_gpio_out_named(DEVICE(i8042), + I8042_A20_LINE, 0, a20_line[0]); qdev_connect_gpio_out_named(DEVICE(port92), PORT92_A20_LINE, 0, a20_line[1]); g_free(a20_line); @@ -1259,15 +1234,17 @@ void pc_basic_device_init(struct PCMachineState *pcms, xen_evtchn_create(IOAPIC_NUM_PINS, gsi); xen_gnttab_create(); xen_xenstore_create(); - if (pcms->bus) { - pci_create_simple(pcms->bus, -1, "xen-platform"); + if (pcms->pcibus) { + pci_create_simple(pcms->pcibus, -1, "xen-platform"); } - pcms->xenbus = xen_bus_init(); + xen_bus_init(); xen_be_init(); } #endif - qemu_register_boot_set(pc_boot_set, rtc_state); + qemu_register_boot_set(pc_boot_set, pcms); + set_boot_dev(pcms, MC146818_RTC(rtc_state), + MACHINE(pcms)->boot_config.order, &error_fatal); if (!xen_enabled() && (x86ms->pit == ON_OFF_AUTO_AUTO || x86ms->pit == ON_OFF_AUTO_ON)) { @@ -1290,27 +1267,21 @@ void pc_basic_device_init(struct PCMachineState *pcms, pcms->vmport != ON_OFF_AUTO_ON); } -void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus, - BusState *xen_bus) +void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus) { MachineClass *mc = MACHINE_CLASS(pcmc); - int i; + bool default_is_ne2k = g_str_equal(mc->default_nic, TYPE_ISA_NE2000); + NICInfo *nd; rom_set_order_override(FW_CFG_ORDER_OVERRIDE_NIC); - for (i = 0; i < nb_nics; i++) { - NICInfo *nd = &nd_table[i]; - const char *model = nd->model ? nd->model : mc->default_nic; - - if (xen_bus && (!nd->model || g_str_equal(model, "xen-net-device"))) { - DeviceState *dev = qdev_new("xen-net-device"); - qdev_set_nic_properties(dev, nd); - qdev_realize_and_unref(dev, xen_bus, &error_fatal); - } else if (g_str_equal(model, "ne2k_isa")) { - pc_init_ne2k_isa(isa_bus, nd); - } else { - pci_nic_init_nofail(nd, pci_bus, model, NULL); - } + + while ((nd = qemu_find_nic_info(TYPE_ISA_NE2000, default_is_ne2k, NULL))) { + pc_init_ne2k_isa(isa_bus, nd, &error_fatal); } + + /* Anything remaining should be a PCI NIC */ + pci_init_nic_devices(pci_bus, mc->default_nic); + rom_reset_order_override(); } @@ -1345,7 +1316,7 @@ static void pc_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error *local_err = NULL; /* - * When -no-acpi is used with Q35 machine type, no ACPI is built, + * When "acpi=off" is used with the Q35 machine type, no ACPI is built, * but pcms->acpi_dev is still created. Check !acpi_enabled in * addition to cover this case. */ @@ -1393,7 +1364,7 @@ static void pc_memory_unplug_request(HotplugHandler *hotplug_dev, X86MachineState *x86ms = X86_MACHINE(hotplug_dev); /* - * When -no-acpi is used with Q35 machine type, no ACPI is built, + * When "acpi=off" is used with the Q35 machine type, no ACPI is built, * but pcms->acpi_dev is still created. Check !acpi_enabled in * addition to cover this case. */ @@ -1558,6 +1529,20 @@ static void pc_machine_set_vmport(Object *obj, Visitor *v, const char *name, visit_type_OnOffAuto(v, name, &pcms->vmport, errp); } +static bool pc_machine_get_fd_bootchk(Object *obj, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + return pcms->fd_bootchk; +} + +static void pc_machine_set_fd_bootchk(Object *obj, bool value, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + pcms->fd_bootchk = value; +} + static bool pc_machine_get_smbus(Object *obj, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); @@ -1746,18 +1731,19 @@ static void pc_machine_initfn(Object *obj) #ifdef CONFIG_HPET pcms->hpet_enabled = true; #endif + pcms->fd_bootchk = true; pcms->default_bus_bypass_iommu = false; pc_system_flash_create(pcms); pcms->pcspk = isa_new(TYPE_PC_SPEAKER); object_property_add_alias(OBJECT(pcms), "pcspk-audiodev", OBJECT(pcms->pcspk), "audiodev"); - cxl_machine_init(obj, &pcms->cxl_devices_state); -} + if (pcmc->pci_enabled) { + cxl_machine_init(obj, &pcms->cxl_devices_state); + } -int pc_machine_kvm_type(MachineState *machine, const char *kvm_type) -{ - return 0; + pcms->machine_done.notify = pc_machine_done; + qemu_add_machine_init_done_notifier(&pcms->machine_done); } static void pc_machine_reset(MachineState *machine, ShutdownCause reason) @@ -1806,6 +1792,7 @@ static bool pc_hotplug_allowed(MachineState *ms, DeviceState *dev, Error **errp) static void pc_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); + X86MachineClass *x86mc = X86_MACHINE_CLASS(oc); PCMachineClass *pcmc = PC_MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); @@ -1816,7 +1803,6 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) pcmc->smbios_uuid_encoded = true; pcmc->gigabyte_align = true; pcmc->has_reserved_memory = true; - pcmc->kvmclock_enabled = true; pcmc->enforce_aligned_dimm = true; pcmc->enforce_amd_1tb_hole = true; /* BIOS ACPI tables: 128K. Other BIOS datastructures: less than 4K reported @@ -1825,6 +1811,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) pcmc->pvh_enabled = true; pcmc->kvmclock_create_always = true; pcmc->resizable_acpi_blob = true; + x86mc->apic_xrupt_override = true; assert(!mc->get_hotplug_handler); mc->get_hotplug_handler = pc_get_hotplug_handler; mc->hotplug_allowed = pc_hotplug_allowed; @@ -1847,7 +1834,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) mc->nvdimm_supported = true; mc->smp_props.dies_supported = true; mc->default_ram_id = "pc.ram"; - pcmc->default_smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_64; + pcmc->default_smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_AUTO; object_class_property_add(oc, PC_MACHINE_MAX_RAM_BELOW_4G, "size", pc_machine_get_max_ram_below_4g, pc_machine_set_max_ram_below_4g, @@ -1894,6 +1881,10 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) NULL, NULL); object_class_property_set_description(oc, PC_MACHINE_SMBIOS_EP, "SMBIOS Entry Point type [32, 64]"); + + object_class_property_add_bool(oc, "fd-bootchk", + pc_machine_get_fd_bootchk, + pc_machine_set_fd_bootchk); } static const TypeInfo pc_machine_info = { diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index eace8543358..18ba0766092 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -36,7 +36,6 @@ #include "hw/rtc/mc146818rtc.h" #include "hw/southbridge/piix.h" #include "hw/display/ramfb.h" -#include "hw/firmware/smbios.h" #include "hw/pci/pci.h" #include "hw/pci/pci_ids.h" #include "hw/usb.h" @@ -56,11 +55,13 @@ #ifdef CONFIG_XEN #include #include "hw/xen/xen_pt.h" +#include "hw/xen/xen_igd.h" #endif #include "hw/xen/xen-x86.h" #include "hw/xen/xen.h" #include "migration/global_state.h" #include "migration/misc.h" +#include "sysemu/runstate.h" #include "sysemu/numa.h" #include "hw/hyperv/vmbus-bridge.h" #include "hw/mem/nvdimm.h" @@ -68,7 +69,6 @@ #include "kvm/kvm-cpu.h" #include "target/i386/cpu.h" -#define MAX_IDE_BUS 2 #define XEN_IOAPIC_NUM_PIRQS 128ULL #ifdef CONFIG_IDE_ISA @@ -92,33 +92,27 @@ static void piix_intx_routing_notifier_xen(PCIDevice *dev) { int i; - /* Scan for updates to PCI link routes (0x60-0x63). */ + /* Scan for updates to PCI link routes. */ for (i = 0; i < PIIX_NUM_PIRQS; i++) { - uint8_t v = dev->config_read(dev, PIIX_PIRQCA + i, 1); - if (v & 0x80) { - v = 0; - } - v &= 0xf; + const PCIINTxRoute route = pci_device_route_intx_to_irq(dev, i); + const uint8_t v = route.mode == PCI_INTX_ENABLED ? route.irq : 0; xen_set_pci_link_route(i, v); } } /* PC hardware initialisation */ -static void pc_init1(MachineState *machine, - const char *host_type, const char *pci_type) +static void pc_init1(MachineState *machine, const char *pci_type) { PCMachineState *pcms = PC_MACHINE(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); X86MachineState *x86ms = X86_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *system_io = get_system_io(); - PCIBus *pci_bus = NULL; + Object *phb = NULL; ISABus *isa_bus; Object *piix4_pm = NULL; qemu_irq smi_irq; GSIState *gsi_state; - BusState *idebus[MAX_IDE_BUS]; - ISADevice *rtc_state; MemoryRegion *ram_memory; MemoryRegion *pci_memory = NULL; MemoryRegion *rom_memory = system_memory; @@ -190,18 +184,16 @@ static void pc_init1(MachineState *machine, pc_machine_init_sgx_epc(pcms); x86_cpus_init(x86ms, pcmc->default_cpu_version); - if (kvm_enabled() && pcmc->kvmclock_enabled) { + if (kvm_enabled()) { kvmclock_create(pcmc->kvmclock_create_always); } if (pcmc->pci_enabled) { - Object *phb; - pci_memory = g_new(MemoryRegion, 1); memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); rom_memory = pci_memory; - phb = OBJECT(qdev_new(host_type)); + phb = OBJECT(qdev_new(TYPE_I440FX_PCI_HOST_BRIDGE)); object_property_add_child(OBJECT(machine), "i440fx", phb); object_property_set_link(phb, PCI_HOST_PROP_RAM_MEM, OBJECT(ram_memory), &error_fatal); @@ -219,28 +211,16 @@ static void pc_init1(MachineState *machine, &error_fatal); sysbus_realize_and_unref(SYS_BUS_DEVICE(phb), &error_fatal); - pci_bus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pci.0")); - pci_bus_map_irqs(pci_bus, + pcms->pcibus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pci.0")); + pci_bus_map_irqs(pcms->pcibus, xen_enabled() ? xen_pci_slot_get_pirq : pc_pci_slot_get_pirq); - pcms->bus = pci_bus; hole64_size = object_property_get_uint(phb, PCI_HOST_PROP_PCI_HOLE64_SIZE, &error_abort); } - pc_guest_info_init(pcms); - - if (pcmc->smbios_defaults) { - MachineClass *mc = MACHINE_GET_CLASS(machine); - /* These values are guest ABI, do not change */ - smbios_set_defaults("QEMU", mc->desc, - mc->name, pcmc->smbios_legacy_mode, - pcmc->smbios_uuid_encoded, - pcms->smbios_entry_point_type); - } - /* allocate ram and load rom/bios */ if (!xen_enabled()) { pc_memory_init(pcms, system_memory, rom_memory, hole64_size); @@ -280,7 +260,7 @@ static void pc_init1(MachineState *machine, for (i = 0; i < ISA_NUM_IRQS; i++) { qdev_connect_gpio_out_named(dev, "isa-irqs", i, x86ms->gsi[i]); } - pci_realize_and_unref(pci_dev, pci_bus, &error_fatal); + pci_realize_and_unref(pci_dev, pcms->pcibus, &error_fatal); if (xen_enabled()) { pci_device_set_intx_routing_notifier( @@ -292,46 +272,44 @@ static void pc_init1(MachineState *machine, * connected to the IOAPIC directly. * These additional routes can be discovered through ACPI. */ - pci_bus_irqs(pci_bus, xen_intx_set_irq, pci_dev, + pci_bus_irqs(pcms->pcibus, xen_intx_set_irq, pci_dev, XEN_IOAPIC_NUM_PIRQS); } isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(pci_dev), "isa.0")); - rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev), - "rtc")); + x86ms->rtc = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev), + "rtc")); piix4_pm = object_resolve_path_component(OBJECT(pci_dev), "pm"); dev = DEVICE(object_resolve_path_component(OBJECT(pci_dev), "ide")); pci_ide_create_devs(PCI_DEVICE(dev)); - idebus[0] = qdev_get_child_bus(dev, "ide.0"); - idebus[1] = qdev_get_child_bus(dev, "ide.1"); + pcms->idebus[0] = qdev_get_child_bus(dev, "ide.0"); + pcms->idebus[1] = qdev_get_child_bus(dev, "ide.1"); } else { isa_bus = isa_bus_new(NULL, system_memory, system_io, &error_abort); isa_bus_register_input_irqs(isa_bus, x86ms->gsi); - rtc_state = isa_new(TYPE_MC146818_RTC); - qdev_prop_set_int32(DEVICE(rtc_state), "base_year", 2000); - isa_realize_and_unref(rtc_state, isa_bus, &error_fatal); + x86ms->rtc = isa_new(TYPE_MC146818_RTC); + qdev_prop_set_int32(DEVICE(x86ms->rtc), "base_year", 2000); + isa_realize_and_unref(x86ms->rtc, isa_bus, &error_fatal); - i8257_dma_init(isa_bus, 0); + i8257_dma_init(OBJECT(machine), isa_bus, 0); pcms->hpet_enabled = false; - idebus[0] = NULL; - idebus[1] = NULL; } if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) { pc_i8259_create(isa_bus, gsi_state->i8259_irq); } - if (pcmc->pci_enabled) { - ioapic_init_gsi(gsi_state, "i440fx"); + if (phb) { + ioapic_init_gsi(gsi_state, phb); } if (tcg_enabled()) { x86_register_ferr_irq(x86ms->gsi[13]); } - pc_vga_init(isa_bus, pcmc->pci_enabled ? pci_bus : NULL); + pc_vga_init(isa_bus, pcmc->pci_enabled ? pcms->pcibus : NULL); assert(pcms->vmport != ON_OFF_AUTO__MAX); if (pcms->vmport == ON_OFF_AUTO_AUTO) { @@ -339,16 +317,13 @@ static void pc_init1(MachineState *machine, } /* init basic PC hardware */ - pc_basic_device_init(pcms, isa_bus, x86ms->gsi, rtc_state, true, + pc_basic_device_init(pcms, isa_bus, x86ms->gsi, x86ms->rtc, true, 0x4); - pc_nic_init(pcmc, isa_bus, pci_bus, pcms->xenbus); + pc_nic_init(pcmc, isa_bus, pcms->pcibus); - if (pcmc->pci_enabled) { - pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state); - } #ifdef CONFIG_IDE_ISA - else { + if (!pcmc->pci_enabled) { DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; int i; @@ -364,9 +339,8 @@ static void pc_init1(MachineState *machine, * second one. */ busname[4] = '0' + i; - idebus[i] = qdev_get_child_bus(DEVICE(dev), busname); + pcms->idebus[i] = qdev_get_child_bus(DEVICE(dev), busname); } - pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state); } #endif @@ -408,9 +382,6 @@ static const QEnumLookup PCSouthBridgeOption_lookup = { .size = PC_SOUTH_BRIDGE_OPTION_MAX }; -#define NotifyVmexitOption_str(val) \ - qapi_enum_lookup(&NotifyVmexitOption_lookup, (val)) - static int pc_get_south_bridge(Object *obj, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); @@ -478,7 +449,7 @@ static void pc_compat_2_0_fn(MachineState *machine) #ifdef CONFIG_ISAPC static void pc_init_isa(MachineState *machine) { - pc_init1(machine, TYPE_I440FX_PCI_HOST_BRIDGE, TYPE_I440FX_PCI_DEVICE); + pc_init1(machine, NULL); } #endif @@ -488,9 +459,7 @@ static void pc_xen_hvm_init_pci(MachineState *machine) const char *pci_type = xen_igd_gfx_pt_enabled() ? TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE : TYPE_I440FX_PCI_DEVICE; - pc_init1(machine, - TYPE_I440FX_PCI_HOST_BRIDGE, - pci_type); + pc_init1(machine, pci_type); } static void pc_xen_hvm_init(MachineState *machine) @@ -503,8 +472,8 @@ static void pc_xen_hvm_init(MachineState *machine) } pc_xen_hvm_init_pci(machine); - xen_igd_reserve_slot(pcms->bus); - pci_create_simple(pcms->bus, -1, "xen-platform"); + xen_igd_reserve_slot(pcms->pcibus); + pci_create_simple(pcms->pcibus, -1, "xen-platform"); } #endif @@ -515,8 +484,7 @@ static void pc_xen_hvm_init(MachineState *machine) if (compat) { \ compat(machine); \ } \ - pc_init1(machine, TYPE_I440FX_PCI_HOST_BRIDGE, \ - TYPE_I440FX_PCI_DEVICE); \ + pc_init1(machine, TYPE_I440FX_PCI_DEVICE); \ } \ DEFINE_PC_MACHINE(suffix, name, pc_init_##suffix, optionfn) @@ -545,13 +513,30 @@ static void pc_i440fx_machine_options(MachineClass *m) "Use a different south bridge than PIIX3"); } -static void pc_i440fx_8_2_machine_options(MachineClass *m) +static void pc_i440fx_9_0_machine_options(MachineClass *m) { pc_i440fx_machine_options(m); m->alias = "pc"; m->is_default = true; } +DEFINE_I440FX_MACHINE(v9_0, "pc-i440fx-9.0", NULL, + pc_i440fx_9_0_machine_options); + +static void pc_i440fx_8_2_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_i440fx_9_0_machine_options(m); + m->alias = NULL; + m->is_default = false; + + compat_props_add(m->compat_props, hw_compat_8_2, hw_compat_8_2_len); + compat_props_add(m->compat_props, pc_compat_8_2, pc_compat_8_2_len); + /* For pc-i44fx-8.2 and 8.1, use SMBIOS 3.X by default */ + pcmc->default_smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_64; +} + DEFINE_I440FX_MACHINE(v8_2, "pc-i440fx-8.2", NULL, pc_i440fx_8_2_machine_options); @@ -560,8 +545,6 @@ static void pc_i440fx_8_1_machine_options(MachineClass *m) PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_8_2_machine_options(m); - m->alias = NULL; - m->is_default = false; pcmc->broken_32bit_mem_addr_check = true; compat_props_add(m->compat_props, hw_compat_8_1, hw_compat_8_1_len); diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 4f3e5412f6b..c7bc8a2041f 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -30,6 +30,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "hw/acpi/acpi.h" #include "hw/char/parallel-isa.h" #include "hw/loader.h" #include "hw/i2c/smbus_eeprom.h" @@ -44,10 +45,10 @@ #include "hw/i386/pc.h" #include "hw/i386/amd_iommu.h" #include "hw/i386/intel_iommu.h" +#include "hw/virtio/virtio-iommu.h" #include "hw/display/ramfb.h" -#include "hw/firmware/smbios.h" #include "hw/ide/pci.h" -#include "hw/ide/ahci.h" +#include "hw/ide/ahci-pci.h" #include "hw/intc/ioapic.h" #include "hw/southbridge/ich9.h" #include "hw/usb.h" @@ -63,6 +64,12 @@ /* ICH9 AHCI has 6 ports */ #define MAX_SATA_PORTS 6 +static GlobalProperty pc_q35_compat_defaults[] = { + { TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "39" }, +}; +static const size_t pc_q35_compat_defaults_len = + G_N_ELEMENTS(pc_q35_compat_defaults); + struct ehci_companions { const char *name; int func; @@ -123,19 +130,14 @@ static void pc_q35_init(MachineState *machine) PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); X86MachineState *x86ms = X86_MACHINE(machine); Object *phb; - PCIBus *host_bus; PCIDevice *lpc; DeviceState *lpc_dev; - BusState *idebus[MAX_SATA_PORTS]; - ISADevice *rtc_state; MemoryRegion *system_memory = get_system_memory(); MemoryRegion *system_io = get_system_io(); - MemoryRegion *pci_memory; - MemoryRegion *rom_memory; + MemoryRegion *pci_memory = g_new(MemoryRegion, 1); GSIState *gsi_state; ISABus *isa_bus; int i; - PCIDevice *ahci; ram_addr_t lowmem; DriveInfo *hd[MAX_SATA_PORTS]; MachineClass *mc = MACHINE_GET_CLASS(machine); @@ -143,6 +145,8 @@ static void pc_q35_init(MachineState *machine) bool keep_pci_slot_hpc; uint64_t pci_hole64_size = 0; + assert(pcmc->pci_enabled); + /* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory * and 256 Mbytes for PCI Express Enhanced Configuration Access Mapping * also known as MMCFG). @@ -189,37 +193,16 @@ static void pc_q35_init(MachineState *machine) kvmclock_create(pcmc->kvmclock_create_always); } - /* pci enabled */ - if (pcmc->pci_enabled) { - pci_memory = g_new(MemoryRegion, 1); - memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); - rom_memory = pci_memory; - } else { - pci_memory = NULL; - rom_memory = system_memory; - } - - pc_guest_info_init(pcms); - - if (pcmc->smbios_defaults) { - /* These values are guest ABI, do not change */ - smbios_set_defaults("QEMU", mc->desc, - mc->name, pcmc->smbios_legacy_mode, - pcmc->smbios_uuid_encoded, - pcms->smbios_entry_point_type); - } - /* create pci host bus */ phb = OBJECT(qdev_new(TYPE_Q35_HOST_DEVICE)); - if (pcmc->pci_enabled) { - pci_hole64_size = object_property_get_uint(phb, - PCI_HOST_PROP_PCI_HOLE64_SIZE, - &error_abort); - } + pci_hole64_size = object_property_get_uint(phb, + PCI_HOST_PROP_PCI_HOLE64_SIZE, + &error_abort); /* allocate ram and load rom/bios */ - pc_memory_init(pcms, system_memory, rom_memory, pci_hole64_size); + memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); + pc_memory_init(pcms, system_memory, pci_memory, pci_hole64_size); object_property_add_child(OBJECT(machine), "q35", phb); object_property_set_link(phb, PCI_HOST_PROP_RAM_MEM, @@ -239,24 +222,23 @@ static void pc_q35_init(MachineState *machine) sysbus_realize_and_unref(SYS_BUS_DEVICE(phb), &error_fatal); /* pci */ - host_bus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pcie.0")); - pcms->bus = host_bus; + pcms->pcibus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pcie.0")); /* irq lines */ - gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled); + gsi_state = pc_gsi_create(&x86ms->gsi, true); /* create ISA bus */ lpc = pci_new_multifunction(PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC), TYPE_ICH9_LPC_DEVICE); - qdev_prop_set_bit(DEVICE(lpc), "smm-enabled", - x86_machine_is_smm_enabled(x86ms)); lpc_dev = DEVICE(lpc); + qdev_prop_set_bit(lpc_dev, "smm-enabled", + x86_machine_is_smm_enabled(x86ms)); for (i = 0; i < IOAPIC_NUM_PINS; i++) { qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, x86ms->gsi[i]); } - pci_realize_and_unref(lpc, host_bus, &error_fatal); + pci_realize_and_unref(lpc, pcms->pcibus, &error_fatal); - rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(lpc), "rtc")); + x86ms->rtc = ISA_DEVICE(object_resolve_path_component(OBJECT(lpc), "rtc")); object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, TYPE_HOTPLUG_HANDLER, @@ -286,9 +268,7 @@ static void pc_q35_init(MachineState *machine) pc_i8259_create(isa_bus, gsi_state->i8259_irq); } - if (pcmc->pci_enabled) { - ioapic_init_gsi(gsi_state, "q35"); - } + ioapic_init_gsi(gsi_state, OBJECT(phb)); if (tcg_enabled()) { x86_register_ferr_irq(x86ms->gsi[13]); @@ -300,34 +280,36 @@ static void pc_q35_init(MachineState *machine) } /* init basic PC hardware */ - pc_basic_device_init(pcms, isa_bus, x86ms->gsi, rtc_state, !mc->no_floppy, + pc_basic_device_init(pcms, isa_bus, x86ms->gsi, x86ms->rtc, !mc->no_floppy, 0xff0104); if (pcms->sata_enabled) { + PCIDevice *pdev; + AHCIPCIState *ich9; + /* ahci and SATA device, for q35 1 ahci controller is built-in */ - ahci = pci_create_simple_multifunction(host_bus, + pdev = pci_create_simple_multifunction(pcms->pcibus, PCI_DEVFN(ICH9_SATA1_DEV, ICH9_SATA1_FUNC), "ich9-ahci"); - idebus[0] = qdev_get_child_bus(&ahci->qdev, "ide.0"); - idebus[1] = qdev_get_child_bus(&ahci->qdev, "ide.1"); - g_assert(MAX_SATA_PORTS == ahci_get_num_ports(ahci)); - ide_drive_get(hd, ahci_get_num_ports(ahci)); - ahci_ide_create_devs(ahci, hd); - } else { - idebus[0] = idebus[1] = NULL; + ich9 = ICH9_AHCI(pdev); + pcms->idebus[0] = qdev_get_child_bus(DEVICE(pdev), "ide.0"); + pcms->idebus[1] = qdev_get_child_bus(DEVICE(pdev), "ide.1"); + g_assert(MAX_SATA_PORTS == ich9->ahci.ports); + ide_drive_get(hd, ich9->ahci.ports); + ahci_ide_create_devs(&ich9->ahci, hd); } if (machine_usb(machine)) { /* Should we create 6 UHCI according to ich9 spec? */ - ehci_create_ich9_with_companions(host_bus, 0x1d); + ehci_create_ich9_with_companions(pcms->pcibus, 0x1d); } if (pcms->smbus_enabled) { PCIDevice *smb; /* TODO: Populate SPD eeprom data. */ - smb = pci_create_simple_multifunction(host_bus, + smb = pci_create_simple_multifunction(pcms->pcibus, PCI_DEVFN(ICH9_SMB_DEV, ICH9_SMB_FUNC), TYPE_ICH9_SMB_DEVICE); @@ -336,11 +318,9 @@ static void pc_q35_init(MachineState *machine) smbus_eeprom_init(pcms->smbus, 8, NULL, 0); } - pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state); - /* the rest devices to which pci devfn is automatically assigned */ - pc_vga_init(isa_bus, host_bus); - pc_nic_init(pcmc, isa_bus, host_bus, pcms->xenbus); + pc_vga_init(isa_bus, pcms->pcibus); + pc_nic_init(pcmc, isa_bus, pcms->pcibus); if (machine->nvdimms_state->is_enabled) { nvdimm_init_acpi_state(machine->nvdimms_state, system_io, @@ -375,20 +355,37 @@ static void pc_q35_machine_options(MachineClass *m) m->default_nic = "e1000e"; m->default_kernel_irqchip_split = false; m->no_floppy = 1; - m->max_cpus = 1024; + m->max_cpus = 4096; m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); machine_class_allow_dynamic_sysbus_dev(m, TYPE_AMD_IOMMU_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); + compat_props_add(m->compat_props, + pc_q35_compat_defaults, pc_q35_compat_defaults_len); } -static void pc_q35_8_2_machine_options(MachineClass *m) +static void pc_q35_9_0_machine_options(MachineClass *m) { pc_q35_machine_options(m); m->alias = "q35"; } +DEFINE_Q35_MACHINE(v9_0, "pc-q35-9.0", NULL, + pc_q35_9_0_machine_options); + +static void pc_q35_8_2_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + pc_q35_9_0_machine_options(m); + m->alias = NULL; + m->max_cpus = 1024; + compat_props_add(m->compat_props, hw_compat_8_2, hw_compat_8_2_len); + compat_props_add(m->compat_props, pc_compat_8_2, pc_compat_8_2_len); + /* For pc-q35-8.2 and 8.1, use SMBIOS 3.X by default */ + pcmc->default_smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_64; +} + DEFINE_Q35_MACHINE(v8_2, "pc-q35-8.2", NULL, pc_q35_8_2_machine_options); @@ -396,7 +393,6 @@ static void pc_q35_8_1_machine_options(MachineClass *m) { PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_q35_8_2_machine_options(m); - m->alias = NULL; pcmc->broken_32bit_mem_addr_check = true; compat_props_add(m->compat_props, hw_compat_8_1, hw_compat_8_1_len); compat_props_add(m->compat_props, pc_compat_8_1, pc_compat_8_1_len); diff --git a/hw/i386/pc_sysfw.c b/hw/i386/pc_sysfw.c index c8d9e71b889..3efabbbab21 100644 --- a/hw/i386/pc_sysfw.c +++ b/hw/i386/pc_sysfw.c @@ -107,17 +107,15 @@ void pc_system_flash_cleanup_unused(PCMachineState *pcms) { char *prop_name; int i; - Object *dev_obj; assert(PC_MACHINE_GET_CLASS(pcms)->pci_enabled); for (i = 0; i < ARRAY_SIZE(pcms->flash); i++) { - dev_obj = OBJECT(pcms->flash[i]); - if (!object_property_get_bool(dev_obj, "realized", &error_abort)) { + if (!qdev_is_realized(DEVICE(pcms->flash[i]))) { prop_name = g_strdup_printf("pflash%d", i); object_property_del(OBJECT(pcms), prop_name); g_free(prop_name); - object_unparent(dev_obj); + object_unparent(OBJECT(pcms->flash[i])); pcms->flash[i] = NULL; } } diff --git a/hw/i386/port92.c b/hw/i386/port92.c index e1379a4f980..b25157f6e4b 100644 --- a/hw/i386/port92.c +++ b/hw/i386/port92.c @@ -10,6 +10,7 @@ #include "sysemu/runstate.h" #include "migration/vmstate.h" #include "hw/irq.h" +#include "hw/isa/isa.h" #include "hw/i386/pc.h" #include "trace.h" #include "qom/object.h" @@ -54,7 +55,7 @@ static const VMStateDescription vmstate_port92_isa = { .name = "port92", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(outport, Port92State), VMSTATE_END_OF_LIST() } diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c index 70305547d4a..de76397bcfb 100644 --- a/hw/i386/sgx.c +++ b/hw/i386/sgx.c @@ -286,7 +286,6 @@ void pc_machine_init_sgx_epc(PCMachineState *pcms) SGXEPCState *sgx_epc = &pcms->sgx_epc; X86MachineState *x86ms = X86_MACHINE(pcms); SgxEPCList *list = NULL; - Object *obj; memset(sgx_epc, 0, sizeof(SGXEPCState)); if (!x86ms->sgx_epc_list) { @@ -300,16 +299,15 @@ void pc_machine_init_sgx_epc(PCMachineState *pcms) &sgx_epc->mr); for (list = x86ms->sgx_epc_list; list; list = list->next) { - obj = object_new("sgx-epc"); + DeviceState *dev = qdev_new(TYPE_SGX_EPC); /* set the memdev link with memory backend */ - object_property_parse(obj, SGX_EPC_MEMDEV_PROP, list->value->memdev, - &error_fatal); + object_property_parse(OBJECT(dev), SGX_EPC_MEMDEV_PROP, + list->value->memdev, &error_fatal); /* set the numa node property for sgx epc object */ - object_property_set_uint(obj, SGX_EPC_NUMA_NODE_PROP, list->value->node, - &error_fatal); - object_property_set_bool(obj, "realized", true, &error_fatal); - object_unref(obj); + object_property_set_uint(OBJECT(dev), SGX_EPC_NUMA_NODE_PROP, + list->value->node, &error_fatal); + qdev_realize_and_unref(dev, NULL, &error_fatal); } if ((sgx_epc->base + sgx_epc->size) < sgx_epc->base) { diff --git a/hw/i386/kvmvapic.c b/hw/i386/vapic.c similarity index 97% rename from hw/i386/kvmvapic.c rename to hw/i386/vapic.c index 43f8a8f679e..f5b1db7e5fc 100644 --- a/hw/i386/kvmvapic.c +++ b/hw/i386/vapic.c @@ -16,6 +16,7 @@ #include "sysemu/hw_accel.h" #include "sysemu/kvm.h" #include "sysemu/runstate.h" +#include "exec/address-spaces.h" #include "hw/i386/apic_internal.h" #include "hw/sysbus.h" #include "hw/boards.h" @@ -57,6 +58,7 @@ typedef struct GuestROMState { struct VAPICROMState { SysBusDevice busdev; + MemoryRegion io; MemoryRegion rom; uint32_t state; @@ -580,19 +582,17 @@ static int vapic_map_rom_writable(VAPICROMState *s) { hwaddr rom_paddr = s->rom_state_paddr & ROM_BLOCK_MASK; MemoryRegionSection section; - MemoryRegion *as; + MemoryRegion *mr = get_system_memory(); size_t rom_size; uint8_t *ram; - as = sysbus_address_space(&s->busdev); - if (s->rom_mapped_writable) { - memory_region_del_subregion(as, &s->rom); + memory_region_del_subregion(mr, &s->rom); object_unparent(OBJECT(&s->rom)); } /* grab RAM memory region (region @rom_paddr may still be pc.rom) */ - section = memory_region_find(as, 0, 1); + section = memory_region_find(mr, 0, 1); /* read ROM size from RAM region */ if (rom_paddr + 2 >= memory_region_size(section.mr)) { @@ -613,7 +613,7 @@ static int vapic_map_rom_writable(VAPICROMState *s) memory_region_init_alias(&s->rom, OBJECT(s), "kvmvapic-rom", section.mr, rom_paddr, rom_size); - memory_region_add_subregion_overlap(as, rom_paddr, &s->rom, 1000); + memory_region_add_subregion_overlap(mr, rom_paddr, &s->rom, 1000); s->rom_mapped_writable = true; memory_region_unref(section.mr); @@ -727,7 +727,7 @@ static void vapic_realize(DeviceState *dev, Error **errp) VAPICROMState *s = VAPIC(dev); memory_region_init_io(&s->io, OBJECT(s), &vapic_ops, s, "kvmvapic", 2); - sysbus_add_io(sbd, VAPIC_IO_PORT, &s->io); + memory_region_add_subregion(get_system_io(), VAPIC_IO_PORT, &s->io); sysbus_init_ioports(sbd, VAPIC_IO_PORT, 2); option_rom[nb_option_roms].name = "kvmvapic.bin"; @@ -747,8 +747,7 @@ static void do_vapic_enable(CPUState *cs, run_on_cpu_data data) s->state = VAPIC_ACTIVE; } -static void kvmvapic_vm_state_change(void *opaque, bool running, - RunState state) +static void vapic_vm_state_change(void *opaque, bool running, RunState state) { MachineState *ms = MACHINE(qdev_get_machine()); VAPICROMState *s = opaque; @@ -793,7 +792,7 @@ static int vapic_post_load(void *opaque, int version_id) if (!s->vmsentry) { s->vmsentry = - qemu_add_vm_change_state_handler(kvmvapic_vm_state_change, s); + qemu_add_vm_change_state_handler(vapic_vm_state_change, s); } return 0; } @@ -802,7 +801,7 @@ static const VMStateDescription vmstate_handlers = { .name = "kvmvapic-handlers", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(set_tpr, VAPICHandlers), VMSTATE_UINT32(set_tpr_eax, VAPICHandlers), VMSTATE_UINT32_ARRAY(get_tpr, VAPICHandlers, 8), @@ -815,7 +814,7 @@ static const VMStateDescription vmstate_guest_rom = { .name = "kvmvapic-guest-rom", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UNUSED(8), /* signature */ VMSTATE_UINT32(vaddr, GuestROMState), VMSTATE_UINT32(fixup_start, GuestROMState), @@ -835,7 +834,7 @@ static const VMStateDescription vmstate_vapic = { .version_id = 1, .minimum_version_id = 1, .post_load = vapic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(rom_state, VAPICROMState, 0, vmstate_guest_rom, GuestROMState), VMSTATE_UINT32(state, VAPICROMState), diff --git a/hw/i386/vmmouse.c b/hw/i386/vmmouse.c index 91320afa2f6..a8d014d09a8 100644 --- a/hw/i386/vmmouse.c +++ b/hw/i386/vmmouse.c @@ -277,7 +277,7 @@ static const VMStateDescription vmstate_vmmouse = { .version_id = 0, .minimum_version_id = 0, .post_load = vmmouse_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(queue_size, VMMouseState, NULL), VMSTATE_UINT32_ARRAY(queue, VMMouseState, VMMOUSE_QUEUE_SIZE), VMSTATE_UINT16(nb_queue, VMMouseState), diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c index 726e9e1d16e..60af8962253 100644 --- a/hw/i386/x86-iommu.c +++ b/hw/i386/x86-iommu.c @@ -101,7 +101,7 @@ static void x86_iommu_realize(DeviceState *dev, Error **errp) QLIST_INIT(&x86_iommu->iec_notifiers); bool irq_all_kernel = kvm_irqchip_in_kernel() && !kvm_irqchip_is_split(); - if (!pcms || !pcms->bus) { + if (!pcms || !pcms->pcibus) { error_setg(errp, "Machine-type '%s' not supported by IOMMU", mc->name); return; diff --git a/hw/i386/x86.c b/hw/i386/x86.c index 2b6291ad8d5..ffbda48917f 100644 --- a/hw/i386/x86.c +++ b/hw/i386/x86.c @@ -137,7 +137,7 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version) * a literal `0` in configurations where kvm_* aren't defined) */ if (kvm_enabled() && x86ms->apic_id_limit > 255 && - (!kvm_irqchip_in_kernel() || !kvm_enable_x2apic())) { + kvm_irqchip_in_kernel() && !kvm_enable_x2apic()) { error_report("current -smp configuration requires kernel " "irqchip and X2APIC API support."); exit(EXIT_FAILURE); @@ -147,6 +147,10 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version) kvm_set_max_apic_id(x86ms->apic_id_limit); } + if (!kvm_irqchip_in_kernel()) { + apic_set_max_apic_id(x86ms->apic_id_limit); + } + possible_cpus = mc->possible_cpu_arch_ids(ms); for (i = 0; i < ms->smp.cpus; i++) { x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal); @@ -221,7 +225,7 @@ void x86_cpu_plug(HotplugHandler *hotplug_dev, } found_cpu = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, NULL); - found_cpu->cpu = OBJECT(dev); + found_cpu->cpu = CPU(dev); out: error_propagate(errp, local_err); } @@ -516,10 +520,10 @@ static void x86_nmi(NMIState *n, int cpu_index, Error **errp) CPU_FOREACH(cs) { X86CPU *cpu = X86_CPU(cs); - if (!cpu->apic_state) { - cpu_interrupt(cs, CPU_INTERRUPT_NMI); - } else { + if (cpu_is_apic_enabled(cpu->apic_state)) { apic_deliver_nmi(cpu->apic_state); + } else { + cpu_interrupt(cs, CPU_INTERRUPT_NMI); } } } @@ -551,7 +555,7 @@ static void pic_irq_request(void *opaque, int irq, int level) X86CPU *cpu = X86_CPU(cs); trace_x86_pic_interrupt(irq, level); - if (cpu->apic_state && !kvm_irqchip_in_kernel() && + if (cpu_is_apic_enabled(cpu->apic_state) && !kvm_irqchip_in_kernel() && !whpx_apic_in_platform()) { CPU_FOREACH(cs) { cpu = X86_CPU(cs); @@ -636,20 +640,19 @@ void gsi_handler(void *opaque, int n, int level) } } -void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name) +void ioapic_init_gsi(GSIState *gsi_state, Object *parent) { DeviceState *dev; SysBusDevice *d; unsigned int i; - assert(parent_name); + assert(parent); if (kvm_ioapic_in_kernel()) { dev = qdev_new(TYPE_KVM_IOAPIC); } else { dev = qdev_new(TYPE_IOAPIC); } - object_property_add_child(object_resolve_path(parent_name, NULL), - "ioapic", OBJECT(dev)); + object_property_add_child(parent, "ioapic", OBJECT(dev)); d = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(d, &error_fatal); sysbus_mmio_map(d, 0, IO_APIC_DEFAULT_ADDRESS); diff --git a/hw/i386/xen/meson.build b/hw/i386/xen/meson.build index 3dc4c4f106e..3f0df8bc075 100644 --- a/hw/i386/xen/meson.build +++ b/hw/i386/xen/meson.build @@ -1,8 +1,10 @@ i386_ss.add(when: 'CONFIG_XEN', if_true: files( - 'xen-hvm.c', 'xen_apic.c', 'xen_pvdevice.c', )) +i386_ss.add(when: ['CONFIG_XEN', xen], if_true: files( + 'xen-hvm.c', +)) i386_ss.add(when: 'CONFIG_XEN_BUS', if_true: files( 'xen_platform.c', diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index f42621e6742..7745cb39631 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -23,6 +23,7 @@ #include "hw/xen/xen-hvm-common.h" #include "hw/xen/arch_hvm.h" #include +#include "exec/target_page.h" static MemoryRegion ram_640k, ram_lo, ram_hi; static MemoryRegion *framebuffer; @@ -149,12 +150,12 @@ static void xen_ram_init(PCMachineState *pcms, */ block_len = (4 * GiB) + x86ms->above_4g_mem_size; } - memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, + memory_region_init_ram(&xen_memory, NULL, "xen.ram", block_len, &error_fatal); - *ram_memory_p = &ram_memory; + *ram_memory_p = &xen_memory; memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k", - &ram_memory, 0, 0xa0000); + &xen_memory, 0, 0xa0000); memory_region_add_subregion(sysmem, 0, &ram_640k); /* Skip of the VGA IO memory space, it will be registered later by the VGA * emulated device. @@ -163,22 +164,23 @@ static void xen_ram_init(PCMachineState *pcms, * the Options ROM, so it is registered here as RAM. */ memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", - &ram_memory, 0xc0000, + &xen_memory, 0xc0000, x86ms->below_4g_mem_size - 0xc0000); memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); if (x86ms->above_4g_mem_size > 0) { memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", - &ram_memory, 0x100000000ULL, + &xen_memory, 0x100000000ULL, x86ms->above_4g_mem_size); memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); } } -static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size) +static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size, + int page_mask) { XenPhysmap *physmap = NULL; - start_addr &= TARGET_PAGE_MASK; + start_addr &= page_mask; QLIST_FOREACH(physmap, &xen_physmap, list) { if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { @@ -188,9 +190,10 @@ static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size) return NULL; } -static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size) +static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size, + int page_mask) { - hwaddr addr = phys_offset & TARGET_PAGE_MASK; + hwaddr addr = phys_offset & page_mask; XenPhysmap *physmap = NULL; QLIST_FOREACH(physmap, &xen_physmap, list) { @@ -245,6 +248,9 @@ static int xen_add_to_physmap(XenIOState *state, MemoryRegion *mr, hwaddr offset_within_region) { + unsigned target_page_bits = qemu_target_page_bits(); + int page_size = qemu_target_page_size(); + int page_mask = -page_size; unsigned long nr_pages; int rc = 0; XenPhysmap *physmap = NULL; @@ -252,7 +258,7 @@ static int xen_add_to_physmap(XenIOState *state, hwaddr phys_offset = memory_region_get_ram_addr(mr); const char *mr_name; - if (get_physmapping(start_addr, size)) { + if (get_physmapping(start_addr, size, page_mask)) { return 0; } if (size <= 0) { @@ -292,9 +298,9 @@ static int xen_add_to_physmap(XenIOState *state, return 0; } - pfn = phys_offset >> TARGET_PAGE_BITS; - start_gpfn = start_addr >> TARGET_PAGE_BITS; - nr_pages = size >> TARGET_PAGE_BITS; + pfn = phys_offset >> target_page_bits; + start_gpfn = start_addr >> target_page_bits; + nr_pages = size >> target_page_bits; rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn, start_gpfn); if (rc) { @@ -308,8 +314,8 @@ static int xen_add_to_physmap(XenIOState *state, } rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid, - start_addr >> TARGET_PAGE_BITS, - (start_addr + size - 1) >> TARGET_PAGE_BITS, + start_addr >> target_page_bits, + (start_addr + size - 1) >> target_page_bits, XEN_DOMCTL_MEM_CACHEATTR_WB); if (rc) { error_report("pin_memory_cacheattr failed: %s", strerror(errno)); @@ -321,11 +327,14 @@ static int xen_remove_from_physmap(XenIOState *state, hwaddr start_addr, ram_addr_t size) { + unsigned target_page_bits = qemu_target_page_bits(); + int page_size = qemu_target_page_size(); + int page_mask = -page_size; int rc = 0; XenPhysmap *physmap = NULL; hwaddr phys_offset = 0; - physmap = get_physmapping(start_addr, size); + physmap = get_physmapping(start_addr, size, page_mask); if (physmap == NULL) { return -1; } @@ -336,9 +345,9 @@ static int xen_remove_from_physmap(XenIOState *state, DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at " "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset); - size >>= TARGET_PAGE_BITS; - start_addr >>= TARGET_PAGE_BITS; - phys_offset >>= TARGET_PAGE_BITS; + size >>= target_page_bits; + start_addr >>= target_page_bits; + phys_offset >>= target_page_bits; rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr, phys_offset); if (rc) { @@ -367,13 +376,16 @@ static void xen_sync_dirty_bitmap(XenIOState *state, hwaddr start_addr, ram_addr_t size) { - hwaddr npages = size >> TARGET_PAGE_BITS; + unsigned target_page_bits = qemu_target_page_bits(); + int page_size = qemu_target_page_size(); + int page_mask = -page_size; + hwaddr npages = size >> target_page_bits; const int width = sizeof(unsigned long) * 8; size_t bitmap_size = DIV_ROUND_UP(npages, width); int rc, i, j; const XenPhysmap *physmap = NULL; - physmap = get_physmapping(start_addr, size); + physmap = get_physmapping(start_addr, size, page_mask); if (physmap == NULL) { /* not handled */ return; @@ -387,7 +399,7 @@ static void xen_sync_dirty_bitmap(XenIOState *state, return; } - rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, + rc = xen_track_dirty_vram(xen_domid, start_addr >> target_page_bits, npages, dirty_bitmap); if (rc < 0) { #ifndef ENODATA @@ -408,8 +420,7 @@ static void xen_sync_dirty_bitmap(XenIOState *state, j = ctzl(map); map &= ~(1ul << j); memory_region_set_dirty(framebuffer, - (i * width + j) * TARGET_PAGE_SIZE, - TARGET_PAGE_SIZE); + (i * width + j) * page_size, page_size); }; } } @@ -629,17 +640,21 @@ void xen_register_framebuffer(MemoryRegion *mr) void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) { + unsigned target_page_bits = qemu_target_page_bits(); + int page_size = qemu_target_page_size(); + int page_mask = -page_size; + if (unlikely(xen_in_migration)) { int rc; ram_addr_t start_pfn, nb_pages; - start = xen_phys_offset_to_gaddr(start, length); + start = xen_phys_offset_to_gaddr(start, length, page_mask); if (length == 0) { - length = TARGET_PAGE_SIZE; + length = page_size; } - start_pfn = start >> TARGET_PAGE_BITS; - nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) + start_pfn = start >> target_page_bits; + nb_pages = ((start + length + page_size - 1) >> target_page_bits) - start_pfn; rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); if (rc) { @@ -662,6 +677,9 @@ void qmp_xen_set_global_dirty_log(bool enable, Error **errp) void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section, bool add) { + unsigned target_page_bits = qemu_target_page_bits(); + int page_size = qemu_target_page_size(); + int page_mask = -page_size; hwaddr start_addr = section->offset_within_address_space; ram_addr_t size = int128_get64(section->size); bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); @@ -677,8 +695,8 @@ void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section, trace_xen_client_set_memory(start_addr, size, log_dirty); - start_addr &= TARGET_PAGE_MASK; - size = TARGET_PAGE_ALIGN(size); + start_addr &= page_mask; + size = ROUND_UP(size, page_size); if (add) { if (!memory_region_is_rom(section->mr)) { @@ -687,8 +705,8 @@ void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section, } else { mem_type = HVMMEM_ram_ro; if (xen_set_mem_type(xen_domid, mem_type, - start_addr >> TARGET_PAGE_BITS, - size >> TARGET_PAGE_BITS)) { + start_addr >> target_page_bits, + size >> target_page_bits)) { DPRINTF("xen_set_mem_type error, addr: "HWADDR_FMT_plx"\n", start_addr); } diff --git a/hw/i386/xen/xen_apic.c b/hw/i386/xen/xen_apic.c index 7c7a60b166e..101e16a7662 100644 --- a/hw/i386/xen/xen_apic.c +++ b/hw/i386/xen/xen_apic.c @@ -49,8 +49,9 @@ static void xen_apic_realize(DeviceState *dev, Error **errp) msi_nonbroken = true; } -static void xen_apic_set_base(APICCommonState *s, uint64_t val) +static int xen_apic_set_base(APICCommonState *s, uint64_t val) { + return 0; } static void xen_apic_set_tpr(APICCommonState *s, uint8_t val) diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c index ef7d3fc05f0..708488af32d 100644 --- a/hw/i386/xen/xen_platform.c +++ b/hw/i386/xen/xen_platform.c @@ -537,7 +537,7 @@ static const VMStateDescription vmstate_xen_platform = { .version_id = 4, .minimum_version_id = 4, .post_load = xen_platform_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIXenPlatformState), VMSTATE_UINT8(flags, PCIXenPlatformState), VMSTATE_END_OF_LIST() diff --git a/hw/i386/xen/xen_pvdevice.c b/hw/i386/xen/xen_pvdevice.c index e62e06622b0..ed621531d81 100644 --- a/hw/i386/xen/xen_pvdevice.c +++ b/hw/i386/xen/xen_pvdevice.c @@ -77,7 +77,7 @@ static const VMStateDescription vmstate_xen_pvdevice = { .name = "xen-pvdevice", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, XenPVDevice), VMSTATE_END_OF_LIST() } diff --git a/hw/ide/Kconfig b/hw/ide/Kconfig index dd85fa3619f..6dfc5a21292 100644 --- a/hw/ide/Kconfig +++ b/hw/ide/Kconfig @@ -1,51 +1,58 @@ config IDE_CORE bool -config IDE_QDEV +config IDE_BUS bool select IDE_CORE +config IDE_DEV + bool + depends on IDE_BUS + config IDE_PCI bool depends on PCI - select IDE_QDEV + select IDE_BUS + select IDE_DEV config IDE_ISA bool depends on ISA_BUS - select IDE_QDEV + select IDE_BUS + select IDE_DEV config IDE_PIIX bool select IDE_PCI - select IDE_QDEV config IDE_CMD646 bool select IDE_PCI - select IDE_QDEV config IDE_MACIO bool - select IDE_QDEV + select IDE_BUS + select IDE_DEV config IDE_MMIO bool - select IDE_QDEV + select IDE_BUS + select IDE_DEV config IDE_VIA bool select IDE_PCI - select IDE_QDEV config MICRODRIVE bool - select IDE_QDEV + select IDE_BUS + select IDE_DEV depends on PCMCIA config AHCI bool - select IDE_QDEV + select IDE_BUS + select IDE_DEV config AHCI_ICH9 bool @@ -56,4 +63,7 @@ config AHCI_ICH9 config IDE_SII3112 bool select IDE_PCI - select IDE_QDEV + +config IDE_CF + bool + default y if IDE_BUS diff --git a/hw/ide/ahci-allwinner.c b/hw/ide/ahci-allwinner.c index 227e747ba72..9620de8ce84 100644 --- a/hw/ide/ahci-allwinner.c +++ b/hw/ide/ahci-allwinner.c @@ -19,9 +19,8 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "sysemu/dma.h" -#include "hw/ide/internal.h" #include "migration/vmstate.h" -#include "ahci_internal.h" +#include "hw/ide/ahci-sysbus.h" #include "trace.h" @@ -97,7 +96,7 @@ static const VMStateDescription vmstate_allwinner_ahci = { .name = "allwinner-ahci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AllwinnerAHCIState, ALLWINNER_AHCI_MMIO_SIZE / 4), VMSTATE_END_OF_LIST() diff --git a/hw/ide/ahci_internal.h b/hw/ide/ahci-internal.h similarity index 98% rename from hw/ide/ahci_internal.h rename to hw/ide/ahci-internal.h index c244bbd8be3..7e63ea23102 100644 --- a/hw/ide/ahci_internal.h +++ b/hw/ide/ahci-internal.h @@ -25,8 +25,8 @@ #define HW_IDE_AHCI_INTERNAL_H #include "hw/ide/ahci.h" -#include "hw/ide/internal.h" #include "hw/pci/pci_device.h" +#include "ide-internal.h" #define AHCI_MEM_BAR_SIZE 0x1000 #define AHCI_MAX_PORTS 32 @@ -324,14 +324,6 @@ struct AHCIDevice { MemReentrancyGuard mem_reentrancy_guard; }; -struct AHCIPCIState { - /*< private >*/ - PCIDevice parent_obj; - /*< public >*/ - - AHCIState ahci; -}; - extern const VMStateDescription vmstate_ahci; #define VMSTATE_AHCI(_field, _state) { \ @@ -385,7 +377,7 @@ typedef struct SDBFIS { uint32_t payload; } QEMU_PACKED SDBFIS; -void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports); +void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as); void ahci_init(AHCIState *s, DeviceState *qdev); void ahci_uninit(AHCIState *s); diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index afdc44b8e05..bfefad2965d 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -34,9 +34,11 @@ #include "qemu/module.h" #include "sysemu/block-backend.h" #include "sysemu/dma.h" -#include "hw/ide/internal.h" #include "hw/ide/pci.h" -#include "ahci_internal.h" +#include "hw/ide/ahci-pci.h" +#include "hw/ide/ahci-sysbus.h" +#include "ahci-internal.h" +#include "ide-internal.h" #include "trace.h" @@ -1613,14 +1615,14 @@ void ahci_init(AHCIState *s, DeviceState *qdev) "ahci-idp", 32); } -void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports) +void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as) { qemu_irq *irqs; int i; s->as = as; - s->ports = ports; - s->dev = g_new0(AHCIDevice, ports); + assert(s->ports > 0); + s->dev = g_new0(AHCIDevice, s->ports); ahci_reg_init(s); irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports); for (i = 0; i < s->ports; i++) { @@ -1685,7 +1687,7 @@ void ahci_reset(AHCIState *s) static const VMStateDescription vmstate_ncq_tfs = { .name = "ncq state", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(sector_count, NCQTransferState), VMSTATE_UINT64(lba, NCQTransferState), VMSTATE_UINT8(tag, NCQTransferState), @@ -1700,7 +1702,7 @@ static const VMStateDescription vmstate_ncq_tfs = { static const VMStateDescription vmstate_ahci_device = { .name = "ahci port", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_IDE_BUS(port, AHCIDevice), VMSTATE_IDE_DRIVE(port.ifs[0], AHCIDevice), VMSTATE_UINT32(port_state, AHCIDevice), @@ -1817,8 +1819,8 @@ const VMStateDescription vmstate_ahci = { .name = "ahci", .version_id = 1, .post_load = ahci_state_post_load, - .fields = (VMStateField[]) { - VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev, AHCIState, ports, + .fields = (const VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(dev, AHCIState, ports, vmstate_ahci_device, AHCIDevice), VMSTATE_UINT32(control_regs.cap, AHCIState), VMSTATE_UINT32(control_regs.ghc, AHCIState), @@ -1826,14 +1828,14 @@ const VMStateDescription vmstate_ahci = { VMSTATE_UINT32(control_regs.impl, AHCIState), VMSTATE_UINT32(control_regs.version, AHCIState), VMSTATE_UINT32(idp_index, AHCIState), - VMSTATE_INT32_EQUAL(ports, AHCIState, NULL), + VMSTATE_UINT32_EQUAL(ports, AHCIState, NULL), VMSTATE_END_OF_LIST() }, }; static const VMStateDescription vmstate_sysbus_ahci = { .name = "sysbus-ahci", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_AHCI(ahci, SysbusAHCIState), VMSTATE_END_OF_LIST() }, @@ -1861,11 +1863,11 @@ static void sysbus_ahci_realize(DeviceState *dev, Error **errp) { SysbusAHCIState *s = SYSBUS_AHCI(dev); - ahci_realize(&s->ahci, dev, &address_space_memory, s->num_ports); + ahci_realize(&s->ahci, dev, &address_space_memory); } static Property sysbus_ahci_properties[] = { - DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, num_ports, 1), + DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, ahci.ports, 1), DEFINE_PROP_END_OF_LIST(), }; @@ -1895,18 +1897,8 @@ static void sysbus_ahci_register_types(void) type_init(sysbus_ahci_register_types) -int32_t ahci_get_num_ports(PCIDevice *dev) +void ahci_ide_create_devs(AHCIState *ahci, DriveInfo **hd) { - AHCIPCIState *d = ICH9_AHCI(dev); - AHCIState *ahci = &d->ahci; - - return ahci->ports; -} - -void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd) -{ - AHCIPCIState *d = ICH9_AHCI(dev); - AHCIState *ahci = &d->ahci; int i; for (i = 0; i < ahci->ports; i++) { @@ -1915,5 +1907,4 @@ void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd) } ide_bus_create_drive(&ahci->dev[i].port, 0, hd[i]); } - } diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c index dcc39df9a44..73ec3731844 100644 --- a/hw/ide/atapi.c +++ b/hw/ide/atapi.c @@ -24,10 +24,10 @@ */ #include "qemu/osdep.h" -#include "hw/ide/internal.h" #include "hw/scsi/scsi.h" #include "sysemu/block-backend.h" #include "scsi/constants.h" +#include "ide-internal.h" #include "trace.h" #define ATAPI_SECTOR_BITS (2 + BDRV_SECTOR_BITS) diff --git a/hw/ide/cf.c b/hw/ide/cf.c new file mode 100644 index 00000000000..2a425cb0f23 --- /dev/null +++ b/hw/ide/cf.c @@ -0,0 +1,58 @@ +/* + * ide CompactFlash support + * + * This code is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/ide/ide-dev.h" +#include "qapi/qapi-types-block.h" + +static void ide_cf_realize(IDEDevice *dev, Error **errp) +{ + ide_dev_initfn(dev, IDE_CFATA, errp); +} + +static Property ide_cf_properties[] = { + DEFINE_IDE_DEV_PROPERTIES(), + DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf), + DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans", + IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ide_cf_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IDEDeviceClass *k = IDE_DEVICE_CLASS(klass); + + k->realize = ide_cf_realize; + dc->fw_name = "drive"; + dc->desc = "virtual CompactFlash card"; + device_class_set_props(dc, ide_cf_properties); +} + +static const TypeInfo ide_cf_info = { + .name = "ide-cf", + .parent = TYPE_IDE_DEVICE, + .instance_size = sizeof(IDEDrive), + .class_init = ide_cf_class_init, +}; + +static void ide_cf_register_type(void) +{ + type_register_static(&ide_cf_info); +} + +type_init(ide_cf_register_type) diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c index c0bcfa44143..8cebd1b63d3 100644 --- a/hw/ide/cmd646.c +++ b/hw/ide/cmd646.c @@ -33,6 +33,7 @@ #include "sysemu/reset.h" #include "hw/ide/pci.h" +#include "ide-internal.h" #include "trace.h" /* CMD646 specific */ diff --git a/hw/ide/core.c b/hw/ide/core.c index 8a0579bff41..e8cb2dac929 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -41,7 +41,7 @@ #include "qemu/cutils.h" #include "sysemu/replay.h" #include "sysemu/runstate.h" -#include "hw/ide/internal.h" +#include "ide-internal.h" #include "trace.h" /* These values were based on a Seagate ST3500418AS but have been modified @@ -1059,7 +1059,7 @@ static void ide_sector_write_cb(void *opaque, int ret) ide_sector_write); } - if (win2k_install_hack && ((++s->irq_count % 16) == 0)) { + if (s->win2k_install_hack && ((++s->irq_count % 16) == 0)) { /* It seems there is a bug in the Windows 2000 installer HDD IDE driver which fills the disk with empty logs when the IDE write IRQ comes too early. This hack tries to correct @@ -2589,24 +2589,21 @@ static const BlockDevOps ide_hd_block_ops = { .resize_cb = ide_resize_cb, }; -int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, - const char *version, const char *serial, const char *model, - uint64_t wwn, - uint32_t cylinders, uint32_t heads, uint32_t secs, - int chs_trans, Error **errp) +int ide_init_drive(IDEState *s, IDEDevice *dev, IDEDriveKind kind, Error **errp) { uint64_t nb_sectors; - s->blk = blk; + s->blk = dev->conf.blk; s->drive_kind = kind; - blk_get_geometry(blk, &nb_sectors); - s->cylinders = cylinders; - s->heads = s->drive_heads = heads; - s->sectors = s->drive_sectors = secs; - s->chs_trans = chs_trans; + blk_get_geometry(s->blk, &nb_sectors); + s->win2k_install_hack = dev->win2k_install_hack; + s->cylinders = dev->conf.cyls; + s->heads = s->drive_heads = dev->conf.heads; + s->sectors = s->drive_sectors = dev->conf.secs; + s->chs_trans = dev->chs_trans; s->nb_sectors = nb_sectors; - s->wwn = wwn; + s->wwn = dev->wwn; /* The SMART values should be preserved across power cycles but they aren't. */ s->smart_enabled = 1; @@ -2614,26 +2611,26 @@ int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, s->smart_errors = 0; s->smart_selftest_count = 0; if (kind == IDE_CD) { - blk_set_dev_ops(blk, &ide_cd_block_ops, s); + blk_set_dev_ops(s->blk, &ide_cd_block_ops, s); } else { if (!blk_is_inserted(s->blk)) { error_setg(errp, "Device needs media, but drive is empty"); return -1; } - if (!blk_is_writable(blk)) { + if (!blk_is_writable(s->blk)) { error_setg(errp, "Can't use a read-only drive"); return -1; } - blk_set_dev_ops(blk, &ide_hd_block_ops, s); + blk_set_dev_ops(s->blk, &ide_hd_block_ops, s); } - if (serial) { - pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial); + if (dev->serial) { + pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), dev->serial); } else { snprintf(s->drive_serial_str, sizeof(s->drive_serial_str), "QM%05d", s->drive_serial); } - if (model) { - pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model); + if (dev->model) { + pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), dev->model); } else { switch (kind) { case IDE_CD: @@ -2648,14 +2645,14 @@ int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, } } - if (version) { - pstrcpy(s->version, sizeof(s->version), version); + if (dev->version) { + pstrcpy(s->version, sizeof(s->version), dev->version); } else { pstrcpy(s->version, sizeof(s->version), qemu_hw_version()); } ide_reset(s); - blk_iostatus_enable(blk); + blk_iostatus_enable(s->blk); return 0; } @@ -2918,7 +2915,7 @@ static const VMStateDescription vmstate_ide_atapi_gesn_state = { .version_id = 1, .minimum_version_id = 1, .needed = ide_atapi_gesn_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(events.new_media, IDEState), VMSTATE_BOOL(events.eject_request, IDEState), VMSTATE_END_OF_LIST() @@ -2930,7 +2927,7 @@ static const VMStateDescription vmstate_ide_tray_state = { .version_id = 1, .minimum_version_id = 1, .needed = ide_tray_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(tray_open, IDEState), VMSTATE_BOOL(tray_locked, IDEState), VMSTATE_END_OF_LIST() @@ -2944,7 +2941,7 @@ static const VMStateDescription vmstate_ide_drive_pio_state = { .pre_save = ide_drive_pio_pre_save, .post_load = ide_drive_pio_post_load, .needed = ide_drive_pio_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(req_nb_sectors, IDEState), VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1, vmstate_info_uint8, uint8_t), @@ -2962,7 +2959,7 @@ const VMStateDescription vmstate_ide_drive = { .version_id = 3, .minimum_version_id = 0, .post_load = ide_drive_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(mult_sectors, IDEState), VMSTATE_INT32(identify_set, IDEState), VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set), @@ -2985,7 +2982,7 @@ const VMStateDescription vmstate_ide_drive = { VMSTATE_UINT8_V(cdrom_changed, IDEState, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_ide_drive_pio_state, &vmstate_ide_tray_state, &vmstate_ide_atapi_gesn_state, @@ -2998,7 +2995,7 @@ static const VMStateDescription vmstate_ide_error_status = { .version_id = 2, .minimum_version_id = 1, .needed = ide_error_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(error_status, IDEBus), VMSTATE_INT64_V(retry_sector_num, IDEBus, 2), VMSTATE_UINT32_V(retry_nsector, IDEBus, 2), @@ -3011,12 +3008,12 @@ const VMStateDescription vmstate_ide_bus = { .name = "ide_bus", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(cmd, IDEBus), VMSTATE_UINT8(unit, IDEBus), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_ide_error_status, NULL } diff --git a/hw/ide/ich.c b/hw/ide/ich.c index d61faab5323..9b909c87f33 100644 --- a/hw/ide/ich.c +++ b/hw/ide/ich.c @@ -69,7 +69,8 @@ #include "hw/isa/isa.h" #include "sysemu/dma.h" #include "hw/ide/pci.h" -#include "ahci_internal.h" +#include "hw/ide/ahci-pci.h" +#include "ahci-internal.h" #define ICH9_MSI_CAP_OFFSET 0x80 #define ICH9_SATA_CAP_OFFSET 0xA8 @@ -83,7 +84,7 @@ static const VMStateDescription vmstate_ich9_ahci = { .name = "ich9_ahci", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, AHCIPCIState), VMSTATE_AHCI(ahci, AHCIPCIState), VMSTATE_END_OF_LIST() @@ -99,20 +100,21 @@ static void pci_ich9_reset(DeviceState *dev) static void pci_ich9_ahci_init(Object *obj) { - struct AHCIPCIState *d = ICH9_AHCI(obj); + AHCIPCIState *d = ICH9_AHCI(obj); ahci_init(&d->ahci, DEVICE(obj)); } static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp) { - struct AHCIPCIState *d; + AHCIPCIState *d; int sata_cap_offset; uint8_t *sata_cap; d = ICH9_AHCI(dev); int ret; - ahci_realize(&d->ahci, DEVICE(dev), pci_get_address_space(dev), 6); + d->ahci.ports = 6; + ahci_realize(&d->ahci, DEVICE(dev), pci_get_address_space(dev)); pci_config_set_prog_interface(dev->config, AHCI_PROGMODE_MAJOR_REV_1); @@ -154,7 +156,7 @@ static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp) static void pci_ich9_uninit(PCIDevice *dev) { - struct AHCIPCIState *d; + AHCIPCIState *d; d = ICH9_AHCI(dev); msi_uninit(dev); diff --git a/hw/ide/ide-bus.c b/hw/ide/ide-bus.c new file mode 100644 index 00000000000..37d003dd9ad --- /dev/null +++ b/hw/ide/ide-bus.c @@ -0,0 +1,111 @@ +/* + * ide bus support for qdev. + * + * Copyright (c) 2009 Gerd Hoffmann + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" +#include "sysemu/runstate.h" +#include "ide-internal.h" + +static char *idebus_get_fw_dev_path(DeviceState *dev); +static void idebus_unrealize(BusState *qdev); + +static void ide_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->get_fw_dev_path = idebus_get_fw_dev_path; + k->unrealize = idebus_unrealize; +} + +static void idebus_unrealize(BusState *bus) +{ + IDEBus *ibus = IDE_BUS(bus); + + if (ibus->vmstate) { + qemu_del_vm_change_state_handler(ibus->vmstate); + } +} + +static const TypeInfo ide_bus_info = { + .name = TYPE_IDE_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(IDEBus), + .class_init = ide_bus_class_init, +}; + +void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev, + int bus_id, int max_units) +{ + qbus_init(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL); + idebus->bus_id = bus_id; + idebus->max_units = max_units; +} + +static char *idebus_get_fw_dev_path(DeviceState *dev) +{ + char path[30]; + + snprintf(path, sizeof(path), "%s@%x", qdev_fw_name(dev), + ((IDEBus *)dev->parent_bus)->bus_id); + + return g_strdup(path); +} + +IDEDevice *ide_bus_create_drive(IDEBus *bus, int unit, DriveInfo *drive) +{ + DeviceState *dev; + + dev = qdev_new(drive->media_cd ? "ide-cd" : "ide-hd"); + qdev_prop_set_uint32(dev, "unit", unit); + qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(drive), + &error_fatal); + qdev_realize_and_unref(dev, &bus->qbus, &error_fatal); + return DO_UPCAST(IDEDevice, qdev, dev); +} + +int ide_get_geometry(BusState *bus, int unit, + int16_t *cyls, int8_t *heads, int8_t *secs) +{ + IDEState *s = &DO_UPCAST(IDEBus, qbus, bus)->ifs[unit]; + + if (s->drive_kind != IDE_HD || !s->blk) { + return -1; + } + + *cyls = s->cylinders; + *heads = s->heads; + *secs = s->sectors; + return 0; +} + +int ide_get_bios_chs_trans(BusState *bus, int unit) +{ + return DO_UPCAST(IDEBus, qbus, bus)->ifs[unit].chs_trans; +} + +static void ide_bus_register_type(void) +{ + type_register_static(&ide_bus_info); +} + +type_init(ide_bus_register_type) diff --git a/hw/ide/qdev.c b/hw/ide/ide-dev.c similarity index 65% rename from hw/ide/qdev.c rename to hw/ide/ide-dev.c index 1b3b4da01df..03f79677988 100644 --- a/hw/ide/qdev.c +++ b/hw/ide/ide-dev.c @@ -1,5 +1,5 @@ /* - * ide bus support for qdev. + * IDE device functions * * Copyright (c) 2009 Gerd Hoffmann * @@ -18,74 +18,23 @@ */ #include "qemu/osdep.h" -#include "sysemu/dma.h" #include "qapi/error.h" #include "qapi/qapi-types-block.h" #include "qemu/error-report.h" -#include "qemu/main-loop.h" #include "qemu/module.h" -#include "hw/ide/internal.h" -#include "hw/qdev-properties.h" -#include "hw/qdev-properties-system.h" +#include "hw/ide/ide-dev.h" #include "sysemu/block-backend.h" #include "sysemu/blockdev.h" -#include "hw/block/block.h" #include "sysemu/sysemu.h" -#include "sysemu/runstate.h" #include "qapi/visitor.h" - -/* --------------------------------- */ - -static char *idebus_get_fw_dev_path(DeviceState *dev); -static void idebus_unrealize(BusState *qdev); +#include "ide-internal.h" static Property ide_props[] = { DEFINE_PROP_UINT32("unit", IDEDevice, unit, -1), + DEFINE_PROP_BOOL("win2k-install-hack", IDEDevice, win2k_install_hack, false), DEFINE_PROP_END_OF_LIST(), }; -static void ide_bus_class_init(ObjectClass *klass, void *data) -{ - BusClass *k = BUS_CLASS(klass); - - k->get_fw_dev_path = idebus_get_fw_dev_path; - k->unrealize = idebus_unrealize; -} - -static void idebus_unrealize(BusState *bus) -{ - IDEBus *ibus = IDE_BUS(bus); - - if (ibus->vmstate) { - qemu_del_vm_change_state_handler(ibus->vmstate); - } -} - -static const TypeInfo ide_bus_info = { - .name = TYPE_IDE_BUS, - .parent = TYPE_BUS, - .instance_size = sizeof(IDEBus), - .class_init = ide_bus_class_init, -}; - -void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev, - int bus_id, int max_units) -{ - qbus_init(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL); - idebus->bus_id = bus_id; - idebus->max_units = max_units; -} - -static char *idebus_get_fw_dev_path(DeviceState *dev) -{ - char path[30]; - - snprintf(path, sizeof(path), "%s@%x", qdev_fw_name(dev), - ((IDEBus*)dev->parent_bus)->bus_id); - - return g_strdup(path); -} - static void ide_qdev_realize(DeviceState *qdev, Error **errp) { IDEDevice *dev = IDE_DEVICE(qdev); @@ -124,45 +73,7 @@ static void ide_qdev_realize(DeviceState *qdev, Error **errp) dc->realize(dev, errp); } -IDEDevice *ide_bus_create_drive(IDEBus *bus, int unit, DriveInfo *drive) -{ - DeviceState *dev; - - dev = qdev_new(drive->media_cd ? "ide-cd" : "ide-hd"); - qdev_prop_set_uint32(dev, "unit", unit); - qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(drive), - &error_fatal); - qdev_realize_and_unref(dev, &bus->qbus, &error_fatal); - return DO_UPCAST(IDEDevice, qdev, dev); -} - -int ide_get_geometry(BusState *bus, int unit, - int16_t *cyls, int8_t *heads, int8_t *secs) -{ - IDEState *s = &DO_UPCAST(IDEBus, qbus, bus)->ifs[unit]; - - if (s->drive_kind != IDE_HD || !s->blk) { - return -1; - } - - *cyls = s->cylinders; - *heads = s->heads; - *secs = s->sectors; - return 0; -} - -int ide_get_bios_chs_trans(BusState *bus, int unit) -{ - return DO_UPCAST(IDEBus, qbus, bus)->ifs[unit].chs_trans; -} - -/* --------------------------------- */ - -typedef struct IDEDrive { - IDEDevice dev; -} IDEDrive; - -static void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp) +void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp) { IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus); IDEState *s = bus->ifs + dev->unit; @@ -208,10 +119,7 @@ static void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp) return; } - if (ide_init_drive(s, dev->conf.blk, kind, - dev->version, dev->serial, dev->model, dev->wwn, - dev->conf.cyls, dev->conf.heads, dev->conf.secs, - dev->chs_trans, errp) < 0) { + if (ide_init_drive(s, dev, kind, errp) < 0) { return; } @@ -283,19 +191,6 @@ static void ide_cd_realize(IDEDevice *dev, Error **errp) ide_dev_initfn(dev, IDE_CD, errp); } -static void ide_cf_realize(IDEDevice *dev, Error **errp) -{ - ide_dev_initfn(dev, IDE_CFATA, errp); -} - -#define DEFINE_IDE_DEV_PROPERTIES() \ - DEFINE_BLOCK_PROPERTIES(IDEDrive, dev.conf), \ - DEFINE_BLOCK_ERROR_PROPERTIES(IDEDrive, dev.conf), \ - DEFINE_PROP_STRING("ver", IDEDrive, dev.version), \ - DEFINE_PROP_UINT64("wwn", IDEDrive, dev.wwn, 0), \ - DEFINE_PROP_STRING("serial", IDEDrive, dev.serial),\ - DEFINE_PROP_STRING("model", IDEDrive, dev.model) - static Property ide_hd_properties[] = { DEFINE_IDE_DEV_PROPERTIES(), DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf), @@ -346,32 +241,6 @@ static const TypeInfo ide_cd_info = { .class_init = ide_cd_class_init, }; -static Property ide_cf_properties[] = { - DEFINE_IDE_DEV_PROPERTIES(), - DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf), - DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans", - IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO), - DEFINE_PROP_END_OF_LIST(), -}; - -static void ide_cf_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - IDEDeviceClass *k = IDE_DEVICE_CLASS(klass); - - k->realize = ide_cf_realize; - dc->fw_name = "drive"; - dc->desc = "virtual CompactFlash card"; - device_class_set_props(dc, ide_cf_properties); -} - -static const TypeInfo ide_cf_info = { - .name = "ide-cf", - .parent = TYPE_IDE_DEVICE, - .instance_size = sizeof(IDEDrive), - .class_init = ide_cf_class_init, -}; - static void ide_device_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); @@ -393,10 +262,8 @@ static const TypeInfo ide_device_type_info = { static void ide_register_types(void) { - type_register_static(&ide_bus_info); type_register_static(&ide_hd_info); type_register_static(&ide_cd_info); - type_register_static(&ide_cf_info); type_register_static(&ide_device_type_info); } diff --git a/include/hw/ide/internal.h b/hw/ide/ide-internal.h similarity index 77% rename from include/hw/ide/internal.h rename to hw/ide/ide-internal.h index 3bdcc75597d..0d64805da20 100644 --- a/include/hw/ide/internal.h +++ b/hw/ide/ide-internal.h @@ -4,27 +4,13 @@ /* * QEMU IDE Emulation -- internal header file * only files in hw/ide/ are supposed to include this file. - * non-internal declarations are in hw/ide.h + * non-internal declarations are in hw/include/ide-*.h */ -#include "hw/ide.h" -#include "sysemu/dma.h" -#include "hw/block/block.h" -#include "exec/ioport.h" +#include "hw/ide/ide-bus.h" /* debug IDE devices */ #define USE_DMA_CDROM -#include "qom/object.h" - -typedef struct IDEDevice IDEDevice; -typedef struct IDEState IDEState; -typedef struct IDEDMA IDEDMA; -typedef struct IDEDMAOps IDEDMAOps; - -#define TYPE_IDE_BUS "IDE" -OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) - -#define MAX_IDE_DEVS 2 /* Device/Head ("select") Register */ #define ATA_DEV_SELECT 0x10 @@ -328,30 +314,6 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) #define SMART_DISABLE 0xd9 #define SMART_STATUS 0xda -typedef enum { IDE_HD, IDE_CD, IDE_CFATA } IDEDriveKind; - -typedef void EndTransferFunc(IDEState *); - -typedef void DMAStartFunc(const IDEDMA *, IDEState *, BlockCompletionFunc *); -typedef void DMAVoidFunc(const IDEDMA *); -typedef int DMAIntFunc(const IDEDMA *, bool); -typedef int32_t DMAInt32Func(const IDEDMA *, int32_t len); -typedef void DMAu32Func(const IDEDMA *, uint32_t); -typedef void DMAStopFunc(const IDEDMA *, bool); - -struct unreported_events { - bool eject_request; - bool new_media; -}; - -enum ide_dma_cmd { - IDE_DMA_READ = 0, - IDE_DMA_WRITE, - IDE_DMA_TRIM, - IDE_DMA_ATAPI, - IDE_DMA__COUNT -}; - extern const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT]; extern const MemoryRegionPortio ide_portio_list[]; @@ -369,166 +331,6 @@ typedef struct IDEBufferedRequest { bool orphaned; } IDEBufferedRequest; -/* NOTE: IDEState represents in fact one drive */ -struct IDEState { - IDEBus *bus; - uint8_t unit; - /* ide config */ - IDEDriveKind drive_kind; - int drive_heads, drive_sectors; - int cylinders, heads, sectors, chs_trans; - int64_t nb_sectors; - int mult_sectors; - int identify_set; - uint8_t identify_data[512]; - int drive_serial; - char drive_serial_str[21]; - char drive_model_str[41]; - uint64_t wwn; - /* ide regs */ - uint8_t feature; - uint8_t error; - uint32_t nsector; - uint8_t sector; - uint8_t lcyl; - uint8_t hcyl; - /* other part of tf for lba48 support */ - uint8_t hob_feature; - uint8_t hob_nsector; - uint8_t hob_sector; - uint8_t hob_lcyl; - uint8_t hob_hcyl; - - uint8_t select; - uint8_t status; - - bool io8; - bool reset_reverts; - - /* set for lba48 access */ - uint8_t lba48; - BlockBackend *blk; - char version[9]; - /* ATAPI specific */ - struct unreported_events events; - uint8_t sense_key; - uint8_t asc; - bool tray_open; - bool tray_locked; - uint8_t cdrom_changed; - int packet_transfer_size; - int elementary_transfer_size; - int32_t io_buffer_index; - int lba; - int cd_sector_size; - int atapi_dma; /* true if dma is requested for the packet cmd */ - BlockAcctCookie acct; - BlockAIOCB *pio_aiocb; - QEMUIOVector qiov; - QLIST_HEAD(, IDEBufferedRequest) buffered_requests; - /* ATA DMA state */ - uint64_t io_buffer_offset; - int32_t io_buffer_size; - QEMUSGList sg; - /* PIO transfer handling */ - int req_nb_sectors; /* number of sectors per interrupt */ - EndTransferFunc *end_transfer_func; - uint8_t *data_ptr; - uint8_t *data_end; - uint8_t *io_buffer; - /* PIO save/restore */ - int32_t io_buffer_total_len; - int32_t cur_io_buffer_offset; - int32_t cur_io_buffer_len; - uint8_t end_transfer_fn_idx; - QEMUTimer *sector_write_timer; /* only used for win2k install hack */ - uint32_t irq_count; /* counts IRQs when using win2k install hack */ - /* CF-ATA extended error */ - uint8_t ext_error; - /* CF-ATA metadata storage */ - uint32_t mdata_size; - uint8_t *mdata_storage; - int media_changed; - enum ide_dma_cmd dma_cmd; - /* SMART */ - uint8_t smart_enabled; - uint8_t smart_autosave; - int smart_errors; - uint8_t smart_selftest_count; - uint8_t *smart_selftest_data; - /* AHCI */ - int ncq_queues; -}; - -struct IDEDMAOps { - DMAStartFunc *start_dma; - DMAVoidFunc *pio_transfer; - DMAInt32Func *prepare_buf; - DMAu32Func *commit_buf; - DMAIntFunc *rw_buf; - DMAVoidFunc *restart; - DMAVoidFunc *restart_dma; - DMAStopFunc *set_inactive; - DMAVoidFunc *cmd_done; - DMAVoidFunc *reset; -}; - -struct IDEDMA { - const struct IDEDMAOps *ops; - QEMUIOVector qiov; - BlockAIOCB *aiocb; -}; - -struct IDEBus { - BusState qbus; - IDEDevice *master; - IDEDevice *slave; - IDEState ifs[2]; - QEMUBH *bh; - - int bus_id; - int max_units; - IDEDMA *dma; - uint8_t unit; - uint8_t cmd; - qemu_irq irq; /* bus output */ - - int error_status; - uint8_t retry_unit; - int64_t retry_sector_num; - uint32_t retry_nsector; - PortioList portio_list; - PortioList portio2_list; - VMChangeStateEntry *vmstate; -}; - -#define TYPE_IDE_DEVICE "ide-device" -OBJECT_DECLARE_TYPE(IDEDevice, IDEDeviceClass, IDE_DEVICE) - -struct IDEDeviceClass { - DeviceClass parent_class; - void (*realize)(IDEDevice *dev, Error **errp); -}; - -struct IDEDevice { - DeviceState qdev; - uint32_t unit; - BlockConf conf; - int chs_trans; - char *version; - char *serial; - char *model; - uint64_t wwn; - /* - * 0x0000 - rotation rate not reported - * 0x0001 - non-rotating medium (SSD) - * 0x0002-0x0400 - reserved - * 0x0401-0xffe - rotations per minute - * 0xffff - reserved - */ - uint16_t rotation_rate; -}; - /* These are used for the error_status field of IDEBus */ #define IDE_RETRY_MASK 0xf8 #define IDE_RETRY_DMA 0x08 @@ -614,11 +416,7 @@ uint32_t ide_data_readw(void *opaque, uint32_t addr); void ide_data_writel(void *opaque, uint32_t addr, uint32_t val); uint32_t ide_data_readl(void *opaque, uint32_t addr); -int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, - const char *version, const char *serial, const char *model, - uint64_t wwn, - uint32_t cylinders, uint32_t heads, uint32_t secs, - int chs_trans, Error **errp); +int ide_init_drive(IDEState *s, IDEDevice *dev, IDEDriveKind kind, Error **errp); void ide_exit(IDEState *s); void ide_bus_init_output_irq(IDEBus *bus, qemu_irq irq_out); int ide_init_ioport(IDEBus *bus, ISADevice *isa, int iobase, int iobase2); @@ -645,15 +443,6 @@ void ide_cancel_dma_sync(IDEState *s); void ide_atapi_cmd(IDEState *s); void ide_atapi_cmd_reply_end(IDEState *s); -/* hw/ide/qdev.c */ -void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev, - int bus_id, int max_units); -IDEDevice *ide_bus_create_drive(IDEBus *bus, int unit, DriveInfo *drive); - -int ide_get_geometry(BusState *bus, int unit, - int16_t *cyls, int8_t *heads, int8_t *secs); -int ide_get_bios_chs_trans(BusState *bus, int unit); - int ide_handle_rw_error(IDEState *s, int error, int op); #endif /* HW_IDE_INTERNAL_H */ diff --git a/hw/ide/ioport.c b/hw/ide/ioport.c index 0b283ac7830..a2f457f0bd0 100644 --- a/hw/ide/ioport.c +++ b/hw/ide/ioport.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "hw/isa/isa.h" -#include "hw/ide/internal.h" +#include "ide-internal.h" #include "trace.h" int ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2) diff --git a/hw/ide/isa.c b/hw/ide/isa.c index ea60c081160..934c45887cc 100644 --- a/hw/ide/isa.c +++ b/hw/ide/isa.c @@ -32,8 +32,8 @@ #include "sysemu/dma.h" #include "hw/ide/isa.h" -#include "hw/ide/internal.h" #include "qom/object.h" +#include "ide-internal.h" /***********************************************************/ /* ISA IDE definitions */ @@ -58,7 +58,7 @@ static const VMStateDescription vmstate_ide_isa = { .name = "isa-ide", .version_id = 3, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_IDE_BUS(bus, ISAIDEState), VMSTATE_IDE_DRIVES(bus.ifs, ISAIDEState), VMSTATE_END_OF_LIST() diff --git a/hw/ide/macio.c b/hw/ide/macio.c index dca1cc9efc1..aca90d04f0e 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -33,7 +33,7 @@ #include "sysemu/block-backend.h" #include "sysemu/dma.h" -#include "hw/ide/internal.h" +#include "ide-internal.h" /* debug MACIO */ // #define DEBUG_MACIO @@ -361,7 +361,7 @@ static const VMStateDescription vmstate_pmac = { .name = "ide", .version_id = 5, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_IDE_BUS(bus, MACIOIDEState), VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState), VMSTATE_BOOL(dma_active, MACIOIDEState), diff --git a/hw/ide/meson.build b/hw/ide/meson.build index e050eef9423..d09705cac03 100644 --- a/hw/ide/meson.build +++ b/hw/ide/meson.build @@ -1,14 +1,16 @@ system_ss.add(when: 'CONFIG_AHCI', if_true: files('ahci.c')) system_ss.add(when: 'CONFIG_AHCI_ICH9', if_true: files('ich.c')) system_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('ahci-allwinner.c')) +system_ss.add(when: 'CONFIG_IDE_BUS', if_true: files('ide-bus.c')) +system_ss.add(when: 'CONFIG_IDE_CF', if_true: files('cf.c')) system_ss.add(when: 'CONFIG_IDE_CMD646', if_true: files('cmd646.c')) system_ss.add(when: 'CONFIG_IDE_CORE', if_true: files('core.c', 'atapi.c')) +system_ss.add(when: 'CONFIG_IDE_DEV', if_true: files('ide-dev.c')) system_ss.add(when: 'CONFIG_IDE_ISA', if_true: files('isa.c', 'ioport.c')) system_ss.add(when: 'CONFIG_IDE_MACIO', if_true: files('macio.c')) system_ss.add(when: 'CONFIG_IDE_MMIO', if_true: files('mmio.c')) system_ss.add(when: 'CONFIG_IDE_PCI', if_true: files('pci.c')) system_ss.add(when: 'CONFIG_IDE_PIIX', if_true: files('piix.c', 'ioport.c')) -system_ss.add(when: 'CONFIG_IDE_QDEV', if_true: files('qdev.c')) system_ss.add(when: 'CONFIG_IDE_SII3112', if_true: files('sii3112.c')) system_ss.add(when: 'CONFIG_IDE_VIA', if_true: files('via.c')) system_ss.add(when: 'CONFIG_MICRODRIVE', if_true: files('microdrive.c')) diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c index 981cfbd97fd..3bb152b5d33 100644 --- a/hw/ide/microdrive.c +++ b/hw/ide/microdrive.c @@ -31,8 +31,8 @@ #include "sysemu/dma.h" #include "hw/irq.h" -#include "hw/ide/internal.h" #include "qom/object.h" +#include "ide-internal.h" #define TYPE_MICRODRIVE "microdrive" OBJECT_DECLARE_SIMPLE_TYPE(MicroDriveState, MICRODRIVE) @@ -336,7 +336,7 @@ static const VMStateDescription vmstate_microdrive = { .name = "microdrive", .version_id = 3, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(opt, MicroDriveState), VMSTATE_UINT8(stat, MicroDriveState), VMSTATE_UINT8(pins, MicroDriveState), diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c index 3aeacab3bb2..87362813056 100644 --- a/hw/ide/mmio.c +++ b/hw/ide/mmio.c @@ -30,8 +30,8 @@ #include "sysemu/dma.h" #include "hw/ide/mmio.h" -#include "hw/ide/internal.h" #include "hw/qdev-properties.h" +#include "ide-internal.h" /***********************************************************/ /* MMIO based ide port @@ -110,7 +110,7 @@ static const VMStateDescription vmstate_ide_mmio = { .name = "mmio-ide", .version_id = 3, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_IDE_BUS(bus, MMIOIDEState), VMSTATE_IDE_DRIVES(bus.ifs, MMIOIDEState), VMSTATE_END_OF_LIST() diff --git a/hw/ide/pci.c b/hw/ide/pci.c index 810c6b6d980..4675d079a17 100644 --- a/hw/ide/pci.c +++ b/hw/ide/pci.c @@ -31,6 +31,7 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "hw/ide/pci.h" +#include "ide-internal.h" #include "trace.h" #define BMDMA_PAGE_SIZE 4096 @@ -501,7 +502,7 @@ static const VMStateDescription vmstate_bmdma_current = { .version_id = 1, .minimum_version_id = 1, .needed = ide_bmdma_current_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cur_addr, BMDMAState), VMSTATE_UINT32(cur_prd_last, BMDMAState), VMSTATE_UINT32(cur_prd_addr, BMDMAState), @@ -515,7 +516,7 @@ static const VMStateDescription vmstate_bmdma_status = { .version_id = 1, .minimum_version_id = 1, .needed = ide_bmdma_status_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(status, BMDMAState), VMSTATE_END_OF_LIST() } @@ -526,7 +527,7 @@ static const VMStateDescription vmstate_bmdma = { .version_id = 3, .minimum_version_id = 0, .pre_save = ide_bmdma_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(cmd, BMDMAState), VMSTATE_UINT8(migration_compat_status, BMDMAState), VMSTATE_UINT32(addr, BMDMAState), @@ -535,7 +536,7 @@ static const VMStateDescription vmstate_bmdma = { VMSTATE_UINT8(migration_retry_unit, BMDMAState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_bmdma_current, &vmstate_bmdma_status, NULL @@ -562,7 +563,7 @@ const VMStateDescription vmstate_ide_pci = { .version_id = 3, .minimum_version_id = 0, .post_load = ide_pci_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState), VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0, vmstate_bmdma, BMDMAState), diff --git a/hw/ide/piix.c b/hw/ide/piix.c index 4e5e12935f5..80efc633d3c 100644 --- a/hw/ide/piix.c +++ b/hw/ide/piix.c @@ -32,6 +32,7 @@ #include "hw/pci/pci.h" #include "hw/ide/piix.h" #include "hw/ide/pci.h" +#include "ide-internal.h" #include "trace.h" static uint64_t bmdma_read(void *opaque, hwaddr addr, unsigned size) diff --git a/hw/ide/sii3112.c b/hw/ide/sii3112.c index 63dc4a0494f..af17384ff20 100644 --- a/hw/ide/sii3112.c +++ b/hw/ide/sii3112.c @@ -17,6 +17,7 @@ #include "qemu/module.h" #include "trace.h" #include "qom/object.h" +#include "ide-internal.h" #define TYPE_SII3112_PCI "sii3112" OBJECT_DECLARE_SIMPLE_TYPE(SiI3112PCIState, SII3112_PCI) diff --git a/hw/ide/via.c b/hw/ide/via.c index 3f3c484253d..a32f56b0e79 100644 --- a/hw/ide/via.c +++ b/hw/ide/via.c @@ -33,6 +33,7 @@ #include "hw/isa/vt82c686.h" #include "hw/ide/pci.h" #include "hw/irq.h" +#include "ide-internal.h" #include "trace.h" static uint64_t bmdma_read(void *opaque, hwaddr addr, diff --git a/hw/input/adb-kbd.c b/hw/input/adb-kbd.c index e21edf9acd4..758fa6d2676 100644 --- a/hw/input/adb-kbd.c +++ b/hw/input/adb-kbd.c @@ -332,7 +332,7 @@ static const VMStateDescription vmstate_adb_kbd = { .name = "adb_kbd", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, KBDState, 0, vmstate_adb_device, ADBDevice), VMSTATE_BUFFER(data, KBDState), VMSTATE_INT32(rptr, KBDState), diff --git a/hw/input/adb-mouse.c b/hw/input/adb-mouse.c index e6b341f0280..144a0ccce71 100644 --- a/hw/input/adb-mouse.c +++ b/hw/input/adb-mouse.c @@ -217,7 +217,7 @@ static const VMStateDescription vmstate_adb_mouse = { .name = "adb_mouse", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, MouseState, 0, vmstate_adb_device, ADBDevice), VMSTATE_INT32(buttons_state, MouseState), diff --git a/hw/input/adb.c b/hw/input/adb.c index 8aed0da2cd5..98f39b4281a 100644 --- a/hw/input/adb.c +++ b/hw/input/adb.c @@ -221,7 +221,7 @@ static const VMStateDescription vmstate_adb_bus = { .name = "adb_bus", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(autopoll_timer, ADBBusState), VMSTATE_BOOL(autopoll_enabled, ADBBusState), VMSTATE_UINT8(autopoll_rate_ms, ADBBusState), @@ -231,9 +231,9 @@ static const VMStateDescription vmstate_adb_bus = { } }; -static void adb_bus_reset(BusState *qbus) +static void adb_bus_reset_hold(Object *obj) { - ADBBusState *adb_bus = ADB_BUS(qbus); + ADBBusState *adb_bus = ADB_BUS(obj); adb_bus->autopoll_enabled = false; adb_bus->autopoll_mask = 0xffff; @@ -262,10 +262,11 @@ static void adb_bus_unrealize(BusState *qbus) static void adb_bus_class_init(ObjectClass *klass, void *data) { BusClass *k = BUS_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); k->realize = adb_bus_realize; k->unrealize = adb_bus_unrealize; - k->reset = adb_bus_reset; + rc->phases.hold = adb_bus_reset_hold; } static const TypeInfo adb_bus_type_info = { @@ -279,7 +280,7 @@ const VMStateDescription vmstate_adb_device = { .name = "adb_device", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(devaddr, ADBDevice), VMSTATE_INT32(handler, ADBDevice), VMSTATE_END_OF_LIST() diff --git a/hw/input/ads7846.c b/hw/input/ads7846.c index 91116c6bdbd..cde38922165 100644 --- a/hw/input/ads7846.c +++ b/hw/input/ads7846.c @@ -130,7 +130,7 @@ static const VMStateDescription vmstate_ads7846 = { .version_id = 1, .minimum_version_id = 1, .post_load = ads7856_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SSI_PERIPHERAL(ssidev, ADS7846State), VMSTATE_INT32_ARRAY(input, ADS7846State, 8), VMSTATE_INT32(noise, ADS7846State), diff --git a/hw/input/hid.c b/hw/input/hid.c index b8e85374cab..76bedc18443 100644 --- a/hw/input/hid.c +++ b/hw/input/hid.c @@ -581,7 +581,7 @@ static const VMStateDescription vmstate_hid_ptr_queue = { .name = "HIDPointerEventQueue", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(xdx, HIDPointerEvent), VMSTATE_INT32(ydy, HIDPointerEvent), VMSTATE_INT32(dz, HIDPointerEvent), @@ -595,7 +595,7 @@ const VMStateDescription vmstate_hid_ptr_device = { .version_id = 1, .minimum_version_id = 1, .post_load = hid_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(ptr.queue, HIDState, QUEUE_LENGTH, 0, vmstate_hid_ptr_queue, HIDPointerEvent), VMSTATE_UINT32(head, HIDState), @@ -611,7 +611,7 @@ const VMStateDescription vmstate_hid_keyboard_device = { .version_id = 1, .minimum_version_id = 1, .post_load = hid_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(kbd.keycodes, HIDState, QUEUE_LENGTH), VMSTATE_UINT32(head, HIDState), VMSTATE_UINT32(n, HIDState), diff --git a/hw/input/lasips2.c b/hw/input/lasips2.c index 6075121b72c..d9f8c36778d 100644 --- a/hw/input/lasips2.c +++ b/hw/input/lasips2.c @@ -39,7 +39,7 @@ static const VMStateDescription vmstate_lasips2_port = { .name = "lasips2-port", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(control, LASIPS2Port), VMSTATE_UINT8(buf, LASIPS2Port), VMSTATE_BOOL(loopback_rbne, LASIPS2Port), @@ -51,7 +51,7 @@ static const VMStateDescription vmstate_lasips2 = { .name = "lasips2", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(int_status, LASIPS2State), VMSTATE_STRUCT(kbd_port.parent_obj, LASIPS2State, 1, vmstate_lasips2_port, LASIPS2Port), diff --git a/hw/input/lm832x.c b/hw/input/lm832x.c index 19a646d9bb4..59e5567afd5 100644 --- a/hw/input/lm832x.c +++ b/hw/input/lm832x.c @@ -441,7 +441,7 @@ static const VMStateDescription vmstate_lm_kbd = { .version_id = 0, .minimum_version_id = 0, .post_load = lm_kbd_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, LM823KbdState), VMSTATE_UINT8(i2c_dir, LM823KbdState), VMSTATE_UINT8(i2c_cycle, LM823KbdState), diff --git a/hw/input/meson.build b/hw/input/meson.build index 640556bbbcc..3cc8ab85f0c 100644 --- a/hw/input/meson.build +++ b/hw/input/meson.build @@ -11,7 +11,6 @@ system_ss.add(when: 'CONFIG_TSC2005', if_true: files('tsc2005.c')) system_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input.c')) system_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-hid.c')) system_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host.c')) -system_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input.c')) system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_keypad.c')) system_ss.add(when: 'CONFIG_TSC210X', if_true: files('tsc210x.c')) diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c index b92b63bedca..74f10b640fd 100644 --- a/hw/input/pckbd.c +++ b/hw/input/pckbd.c @@ -510,7 +510,7 @@ static const VMStateDescription vmstate_kbd_outport = { .minimum_version_id = 1, .post_load = kbd_outport_post_load, .needed = kbd_outport_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(outport, KBDState), VMSTATE_END_OF_LIST() } @@ -552,7 +552,7 @@ static const VMStateDescription vmstate_kbd_extended_state = { .post_load = kbd_extended_state_post_load, .pre_save = kbd_extended_state_pre_save, .needed = kbd_extended_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(migration_flags, KBDState), VMSTATE_UINT32(obsrc, KBDState), VMSTATE_UINT8(obdata, KBDState), @@ -619,14 +619,14 @@ static const VMStateDescription vmstate_kbd = { .pre_load = kbd_pre_load, .post_load = kbd_post_load, .pre_save = kbd_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(write_cmd, KBDState), VMSTATE_UINT8(status, KBDState), VMSTATE_UINT8(mode, KBDState), VMSTATE_UINT8(pending_tmp, KBDState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_kbd_outport, &vmstate_kbd_extended_state, NULL @@ -745,7 +745,7 @@ static const VMStateDescription vmstate_kbd_mmio = { .name = "pckbd-mmio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(kbd, MMIOKBDState, 0, vmstate_kbd, KBDState), VMSTATE_END_OF_LIST() } @@ -777,16 +777,11 @@ void i8042_isa_mouse_fake_event(ISAKBDState *isa) ps2_mouse_fake_event(&s->ps2mouse); } -void i8042_setup_a20_line(ISADevice *dev, qemu_irq a20_out) -{ - qdev_connect_gpio_out_named(DEVICE(dev), I8042_A20_LINE, 0, a20_out); -} - static const VMStateDescription vmstate_kbd_isa = { .name = "pckbd", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(kbd, ISAKBDState, 0, vmstate_kbd, KBDState), VMSTATE_END_OF_LIST() } diff --git a/hw/input/pl050.c b/hw/input/pl050.c index ec5e19285e3..6519e260ed5 100644 --- a/hw/input/pl050.c +++ b/hw/input/pl050.c @@ -30,7 +30,7 @@ static const VMStateDescription vmstate_pl050 = { .name = "pl050", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cr, PL050State), VMSTATE_UINT32(clk, PL050State), VMSTATE_UINT32(last, PL050State), diff --git a/hw/input/ps2.c b/hw/input/ps2.c index c8fd23cf360..00b695a0b97 100644 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@ -1093,7 +1093,7 @@ static const VMStateDescription vmstate_ps2_common = { .name = "PS2 Common State", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(write_cmd, PS2State), VMSTATE_INT32(queue.rptr, PS2State), VMSTATE_INT32(queue.wptr, PS2State), @@ -1124,7 +1124,7 @@ static const VMStateDescription vmstate_ps2_keyboard_ledstate = { .minimum_version_id = 2, .post_load = ps2_kbd_ledstate_post_load, .needed = ps2_keyboard_ledstate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(ledstate, PS2KbdState), VMSTATE_END_OF_LIST() } @@ -1141,7 +1141,7 @@ static const VMStateDescription vmstate_ps2_keyboard_need_high_bit = { .version_id = 1, .minimum_version_id = 1, .needed = ps2_keyboard_need_high_bit_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(need_high_bit, PS2KbdState), VMSTATE_END_OF_LIST() } @@ -1158,7 +1158,7 @@ static bool ps2_keyboard_cqueue_needed(void *opaque) static const VMStateDescription vmstate_ps2_keyboard_cqueue = { .name = "ps2kbd/command_reply_queue", .needed = ps2_keyboard_cqueue_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(parent_obj.queue.cwptr, PS2KbdState), VMSTATE_END_OF_LIST() } @@ -1183,7 +1183,7 @@ static const VMStateDescription vmstate_ps2_keyboard = { .version_id = 3, .minimum_version_id = 2, .post_load = ps2_kbd_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, PS2KbdState, 0, vmstate_ps2_common, PS2State), VMSTATE_INT32(scan_enabled, PS2KbdState), @@ -1191,7 +1191,7 @@ static const VMStateDescription vmstate_ps2_keyboard = { VMSTATE_INT32_V(scancode_set, PS2KbdState, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_ps2_keyboard_ledstate, &vmstate_ps2_keyboard_need_high_bit, &vmstate_ps2_keyboard_cqueue, @@ -1214,7 +1214,7 @@ static const VMStateDescription vmstate_ps2_mouse = { .version_id = 2, .minimum_version_id = 2, .post_load = ps2_mouse_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, PS2MouseState, 0, vmstate_ps2_common, PS2State), VMSTATE_UINT8(mouse_status, PS2MouseState), diff --git a/hw/input/pxa2xx_keypad.c b/hw/input/pxa2xx_keypad.c index 3dd03e8c9f4..3858648d9f6 100644 --- a/hw/input/pxa2xx_keypad.c +++ b/hw/input/pxa2xx_keypad.c @@ -288,7 +288,7 @@ static const VMStateDescription vmstate_pxa2xx_keypad = { .name = "pxa2xx_keypad", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(kpc, PXA2xxKeyPadState), VMSTATE_UINT32(kpdk, PXA2xxKeyPadState), VMSTATE_UINT32(kprec, PXA2xxKeyPadState), diff --git a/hw/input/stellaris_gamepad.c b/hw/input/stellaris_gamepad.c index 9dfa620e29a..17ee42b9fce 100644 --- a/hw/input/stellaris_gamepad.c +++ b/hw/input/stellaris_gamepad.c @@ -35,7 +35,7 @@ static const VMStateDescription vmstate_stellaris_gamepad = { .name = "stellaris_gamepad", .version_id = 4, .minimum_version_id = 4, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(pressed, StellarisGamepad, num_buttons, 0, vmstate_info_uint8, uint8_t), VMSTATE_END_OF_LIST() diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c index db2b80e35f5..941f163d364 100644 --- a/hw/input/tsc2005.c +++ b/hw/input/tsc2005.c @@ -454,7 +454,7 @@ static const VMStateDescription vmstate_tsc2005 = { .version_id = 2, .minimum_version_id = 2, .post_load = tsc2005_post_load, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_BOOL(pressure, TSC2005State), VMSTATE_BOOL(irq, TSC2005State), VMSTATE_BOOL(command, TSC2005State), diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c index 950506fb382..c4e32c7a42f 100644 --- a/hw/input/tsc210x.c +++ b/hw/input/tsc210x.c @@ -1017,7 +1017,7 @@ static int tsc210x_post_load(void *opaque, int version_id) return 0; } -static VMStateField vmstatefields_tsc210x[] = { +static const VMStateField vmstatefields_tsc210x[] = { VMSTATE_BOOL(enabled, TSC210xState), VMSTATE_BOOL(host_mode, TSC210xState), VMSTATE_BOOL(irq, TSC210xState), diff --git a/hw/input/vhost-user-input.c b/hw/input/vhost-user-input.c deleted file mode 100644 index 4ee3542106e..00000000000 --- a/hw/input/vhost-user-input.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * This work is licensed under the terms of the GNU GPL, version 2 or - * (at your option) any later version. See the COPYING file in the - * top-level directory. - */ - -#include "qemu/osdep.h" -#include "qemu/error-report.h" -#include "qapi/error.h" - -#include "hw/virtio/virtio-input.h" - -static int vhost_input_config_change(struct vhost_dev *dev) -{ - error_report("vhost-user-input: unhandled backend config change"); - return -1; -} - -static const VhostDevConfigOps config_ops = { - .vhost_dev_config_notifier = vhost_input_config_change, -}; - -static void vhost_input_realize(DeviceState *dev, Error **errp) -{ - VHostUserInput *vhi = VHOST_USER_INPUT(dev); - VirtIOInput *vinput = VIRTIO_INPUT(dev); - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - - vhost_dev_set_config_notifier(&vhi->vhost->dev, &config_ops); - vinput->cfg_size = sizeof_field(virtio_input_config, u); - if (vhost_user_backend_dev_init(vhi->vhost, vdev, 2, errp) == -1) { - return; - } -} - -static void vhost_input_change_active(VirtIOInput *vinput) -{ - VHostUserInput *vhi = VHOST_USER_INPUT(vinput); - - if (vinput->active) { - vhost_user_backend_start(vhi->vhost); - } else { - vhost_user_backend_stop(vhi->vhost); - } -} - -static void vhost_input_get_config(VirtIODevice *vdev, uint8_t *config_data) -{ - VirtIOInput *vinput = VIRTIO_INPUT(vdev); - VHostUserInput *vhi = VHOST_USER_INPUT(vdev); - Error *local_err = NULL; - int ret; - - memset(config_data, 0, vinput->cfg_size); - - ret = vhost_dev_get_config(&vhi->vhost->dev, config_data, vinput->cfg_size, - &local_err); - if (ret) { - error_report_err(local_err); - return; - } -} - -static void vhost_input_set_config(VirtIODevice *vdev, - const uint8_t *config_data) -{ - VHostUserInput *vhi = VHOST_USER_INPUT(vdev); - int ret; - - ret = vhost_dev_set_config(&vhi->vhost->dev, config_data, - 0, sizeof(virtio_input_config), - VHOST_SET_CONFIG_TYPE_FRONTEND); - if (ret) { - error_report("vhost-user-input: set device config space failed"); - return; - } - - virtio_notify_config(vdev); -} - -static struct vhost_dev *vhost_input_get_vhost(VirtIODevice *vdev) -{ - VHostUserInput *vhi = VHOST_USER_INPUT(vdev); - return &vhi->vhost->dev; -} - -static const VMStateDescription vmstate_vhost_input = { - .name = "vhost-user-input", - .unmigratable = 1, -}; - -static void vhost_input_class_init(ObjectClass *klass, void *data) -{ - VirtIOInputClass *vic = VIRTIO_INPUT_CLASS(klass); - VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->vmsd = &vmstate_vhost_input; - vdc->get_config = vhost_input_get_config; - vdc->set_config = vhost_input_set_config; - vdc->get_vhost = vhost_input_get_vhost; - vic->realize = vhost_input_realize; - vic->change_active = vhost_input_change_active; -} - -static void vhost_input_init(Object *obj) -{ - VHostUserInput *vhi = VHOST_USER_INPUT(obj); - - vhi->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND)); - object_property_add_alias(obj, "chardev", - OBJECT(vhi->vhost), "chardev"); -} - -static void vhost_input_finalize(Object *obj) -{ - VHostUserInput *vhi = VHOST_USER_INPUT(obj); - - object_unref(OBJECT(vhi->vhost)); -} - -static const TypeInfo vhost_input_info = { - .name = TYPE_VHOST_USER_INPUT, - .parent = TYPE_VIRTIO_INPUT, - .instance_size = sizeof(VHostUserInput), - .instance_init = vhost_input_init, - .instance_finalize = vhost_input_finalize, - .class_init = vhost_input_class_init, -}; - -static void vhost_input_register_types(void) -{ - type_register_static(&vhost_input_info); -} - -type_init(vhost_input_register_types) diff --git a/hw/input/virtio-input.c b/hw/input/virtio-input.c index 5b5398b3cac..3bcdae41b2f 100644 --- a/hw/input/virtio-input.c +++ b/hw/input/virtio-input.c @@ -293,7 +293,7 @@ static const VMStateDescription vmstate_virtio_input = { .name = "virtio-input", .minimum_version_id = VIRTIO_INPUT_VM_VERSION, .version_id = VIRTIO_INPUT_VM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c index d0bf8d545ba..cea559c39dd 100644 --- a/hw/intc/allwinner-a10-pic.c +++ b/hw/intc/allwinner-a10-pic.c @@ -142,7 +142,7 @@ static const VMStateDescription vmstate_aw_a10_pic = { .name = "a10.pic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vector, AwA10PICState), VMSTATE_UINT32(base_addr, AwA10PICState), VMSTATE_UINT32(protect, AwA10PICState), diff --git a/hw/intc/apic.c b/hw/intc/apic.c index ac3d47d2318..4186c57b34c 100644 --- a/hw/intc/apic.c +++ b/hw/intc/apic.c @@ -32,14 +32,13 @@ #include "qapi/error.h" #include "qom/object.h" -#define MAX_APICS 255 -#define MAX_APIC_WORDS 8 - #define SYNC_FROM_VAPIC 0x1 #define SYNC_TO_VAPIC 0x2 #define SYNC_ISR_IRR_TO_VAPIC 0x4 -static APICCommonState *local_apics[MAX_APICS + 1]; +static APICCommonState **local_apics; +static uint32_t max_apics; +static uint32_t max_apic_words; #define TYPE_APIC "apic" /*This is reusing the APICCommonState typedef from APIC_COMMON */ @@ -49,7 +48,19 @@ DECLARE_INSTANCE_CHECKER(APICCommonState, APIC, static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); static void apic_update_irq(APICCommonState *s); static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, - uint8_t dest, uint8_t dest_mode); + uint32_t dest, uint8_t dest_mode); + +void apic_set_max_apic_id(uint32_t max_apic_id) +{ + int word_size = 32; + + /* round up the max apic id to next multiple of words */ + max_apics = (max_apic_id + word_size - 1) & ~(word_size - 1); + + local_apics = g_malloc0(sizeof(*local_apics) * max_apics); + max_apic_words = max_apics >> 5; +} + /* Find first bit starting from msb */ static int apic_fls_bit(uint32_t value) @@ -199,10 +210,10 @@ static void apic_external_nmi(APICCommonState *s) #define foreach_apic(apic, deliver_bitmask, code) \ {\ int __i, __j;\ - for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ + for (__i = 0; __i < max_apic_words; __i++) {\ uint32_t __mask = deliver_bitmask[__i];\ if (__mask) {\ - for(__j = 0; __j < 32; __j++) {\ + for (__j = 0; __j < 32; __j++) {\ if (__mask & (1U << __j)) {\ apic = local_apics[__i * 32 + __j];\ if (apic) {\ @@ -226,7 +237,7 @@ static void apic_bus_deliver(const uint32_t *deliver_bitmask, { int i, d; d = -1; - for(i = 0; i < MAX_APIC_WORDS; i++) { + for (i = 0; i < max_apic_words; i++) { if (deliver_bitmask[i]) { d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); break; @@ -276,10 +287,11 @@ static void apic_bus_deliver(const uint32_t *deliver_bitmask, apic_set_irq(apic_iter, vector_num, trigger_mode) ); } -void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, - uint8_t vector_num, uint8_t trigger_mode) +static void apic_deliver_irq(uint32_t dest, uint8_t dest_mode, + uint8_t delivery_mode, uint8_t vector_num, + uint8_t trigger_mode) { - uint32_t deliver_bitmask[MAX_APIC_WORDS]; + g_autofree uint32_t *deliver_bitmask = g_new(uint32_t, max_apic_words); trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, trigger_mode); @@ -288,8 +300,56 @@ void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); } -static void apic_set_base(APICCommonState *s, uint64_t val) +bool is_x2apic_mode(DeviceState *dev) +{ + APICCommonState *s = APIC(dev); + + return s->apicbase & MSR_IA32_APICBASE_EXTD; +} + +static int apic_set_base_check(APICCommonState *s, uint64_t val) { + /* Enable x2apic when x2apic is not supported by CPU */ + if (!cpu_has_x2apic_feature(&s->cpu->env) && + val & MSR_IA32_APICBASE_EXTD) { + return -1; + } + + /* + * Transition into invalid state + * (s->apicbase & MSR_IA32_APICBASE_ENABLE == 0) && + * (s->apicbase & MSR_IA32_APICBASE_EXTD) == 1 + */ + if (!(val & MSR_IA32_APICBASE_ENABLE) && + (val & MSR_IA32_APICBASE_EXTD)) { + return -1; + } + + /* Invalid transition from disabled mode to x2APIC */ + if (!(s->apicbase & MSR_IA32_APICBASE_ENABLE) && + !(s->apicbase & MSR_IA32_APICBASE_EXTD) && + (val & MSR_IA32_APICBASE_ENABLE) && + (val & MSR_IA32_APICBASE_EXTD)) { + return -1; + } + + /* Invalid transition from x2APIC to xAPIC */ + if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) && + (s->apicbase & MSR_IA32_APICBASE_EXTD) && + (val & MSR_IA32_APICBASE_ENABLE) && + !(val & MSR_IA32_APICBASE_EXTD)) { + return -1; + } + + return 0; +} + +static int apic_set_base(APICCommonState *s, uint64_t val) +{ + if (apic_set_base_check(s, val) < 0) { + return -1; + } + s->apicbase = (val & 0xfffff000) | (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); /* if disabled, cannot be enabled again */ @@ -298,6 +358,25 @@ static void apic_set_base(APICCommonState *s, uint64_t val) cpu_clear_apic_feature(&s->cpu->env); s->spurious_vec &= ~APIC_SV_ENABLE; } + + /* Transition from disabled mode to xAPIC */ + if (!(s->apicbase & MSR_IA32_APICBASE_ENABLE) && + (val & MSR_IA32_APICBASE_ENABLE)) { + s->apicbase |= MSR_IA32_APICBASE_ENABLE; + cpu_set_apic_feature(&s->cpu->env); + } + + /* Transition from xAPIC to x2APIC */ + if (cpu_has_x2apic_feature(&s->cpu->env) && + !(s->apicbase & MSR_IA32_APICBASE_EXTD) && + (val & MSR_IA32_APICBASE_EXTD)) { + s->apicbase |= MSR_IA32_APICBASE_EXTD; + + s->log_dest = ((s->initial_apic_id & 0xffff0) << 16) | + (1 << (s->initial_apic_id & 0xf)); + } + + return 0; } static void apic_set_tpr(APICCommonState *s, uint8_t val) @@ -435,57 +514,123 @@ static void apic_eoi(APICCommonState *s) apic_update_irq(s); } -static int apic_find_dest(uint8_t dest) +static bool apic_match_dest(APICCommonState *apic, uint32_t dest) { - APICCommonState *apic = local_apics[dest]; - int i; + if (is_x2apic_mode(&apic->parent_obj)) { + return apic->initial_apic_id == dest; + } else { + return apic->id == (uint8_t)dest; + } +} - if (apic && apic->id == dest) - return dest; /* shortcut in case apic->id == local_apics[dest]->id */ +static void apic_find_dest(uint32_t *deliver_bitmask, uint32_t dest) +{ + APICCommonState *apic = NULL; + int i; - for (i = 0; i < MAX_APICS; i++) { + for (i = 0; i < max_apics; i++) { apic = local_apics[i]; - if (apic && apic->id == dest) - return i; - if (!apic) - break; + if (apic && apic_match_dest(apic, dest)) { + apic_set_bit(deliver_bitmask, i); + } } +} - return -1; +/* + * Deliver interrupt to x2APIC CPUs if it is x2APIC broadcast. + * Otherwise, deliver interrupt to xAPIC CPUs if it is xAPIC + * broadcast. + */ +static void apic_get_broadcast_bitmask(uint32_t *deliver_bitmask, + bool is_x2apic_broadcast) +{ + int i; + APICCommonState *apic_iter; + + for (i = 0; i < max_apics; i++) { + apic_iter = local_apics[i]; + if (apic_iter) { + bool apic_in_x2apic = is_x2apic_mode(&apic_iter->parent_obj); + + if (is_x2apic_broadcast && apic_in_x2apic) { + apic_set_bit(deliver_bitmask, i); + } else if (!is_x2apic_broadcast && !apic_in_x2apic) { + apic_set_bit(deliver_bitmask, i); + } + } + } } static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, - uint8_t dest, uint8_t dest_mode) + uint32_t dest, uint8_t dest_mode) { - APICCommonState *apic_iter; + APICCommonState *apic; int i; - if (dest_mode == 0) { - if (dest == 0xff) { - memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t)); + memset(deliver_bitmask, 0x00, max_apic_words * sizeof(uint32_t)); + + /* + * x2APIC broadcast is delivered to all x2APIC CPUs regardless of + * destination mode. In case the destination mode is physical, it is + * broadcasted to all xAPIC CPUs too. Otherwise, if the destination + * mode is logical, we need to continue checking if xAPIC CPUs accepts + * the interrupt. + */ + if (dest == 0xffffffff) { + if (dest_mode == APIC_DESTMODE_PHYSICAL) { + memset(deliver_bitmask, 0xff, max_apic_words * sizeof(uint32_t)); + return; } else { - int idx = apic_find_dest(dest); - memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); - if (idx >= 0) - apic_set_bit(deliver_bitmask, idx); + apic_get_broadcast_bitmask(deliver_bitmask, true); + } + } + + if (dest_mode == APIC_DESTMODE_PHYSICAL) { + apic_find_dest(deliver_bitmask, dest); + /* Any APIC in xAPIC mode will interpret 0xFF as broadcast */ + if (dest == 0xff) { + apic_get_broadcast_bitmask(deliver_bitmask, false); } } else { - /* XXX: cluster mode */ - memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); - for(i = 0; i < MAX_APICS; i++) { - apic_iter = local_apics[i]; - if (apic_iter) { - if (apic_iter->dest_mode == 0xf) { - if (dest & apic_iter->log_dest) - apic_set_bit(deliver_bitmask, i); - } else if (apic_iter->dest_mode == 0x0) { - if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) && - (dest & apic_iter->log_dest & 0x0f)) { + /* XXX: logical mode */ + for (i = 0; i < max_apics; i++) { + apic = local_apics[i]; + if (apic) { + /* x2APIC logical mode */ + if (apic->apicbase & MSR_IA32_APICBASE_EXTD) { + if ((dest >> 16) == (apic->extended_log_dest >> 16) && + (dest & apic->extended_log_dest & 0xffff)) { apic_set_bit(deliver_bitmask, i); } + continue; } - } else { - break; + + /* xAPIC logical mode */ + dest = (uint8_t)dest; + if (apic->dest_mode == APIC_DESTMODE_LOGICAL_FLAT) { + if (dest & apic->log_dest) { + apic_set_bit(deliver_bitmask, i); + } + } else if (apic->dest_mode == APIC_DESTMODE_LOGICAL_CLUSTER) { + /* + * In cluster model of xAPIC logical mode IPI, 4 higher + * bits are used as cluster address, 4 lower bits are + * the bitmask for local APICs in the cluster. The IPI + * is delivered to an APIC if the cluster address + * matches and the APIC's address bit in the cluster is + * set in bitmask of destination ID in IPI. + * + * The cluster address ranges from 0 - 14, the cluster + * address 15 (0xf) is the broadcast address to all + * clusters. + */ + if ((dest & 0xf0) == 0xf0 || + (dest & 0xf0) == (apic->log_dest & 0xf0)) { + if (dest & apic->log_dest & 0x0f) { + apic_set_bit(deliver_bitmask, i); + } + } + } } } } @@ -509,29 +654,36 @@ void apic_sipi(DeviceState *dev) s->wait_for_sipi = 0; } -static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, +static void apic_deliver(DeviceState *dev, uint32_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t vector_num, - uint8_t trigger_mode) + uint8_t trigger_mode, uint8_t dest_shorthand) { APICCommonState *s = APIC(dev); - uint32_t deliver_bitmask[MAX_APIC_WORDS]; - int dest_shorthand = (s->icr[0] >> 18) & 3; APICCommonState *apic_iter; + uint32_t deliver_bitmask_size = max_apic_words * sizeof(uint32_t); + g_autofree uint32_t *deliver_bitmask = g_new(uint32_t, max_apic_words); + uint32_t current_apic_id; + + if (is_x2apic_mode(dev)) { + current_apic_id = s->initial_apic_id; + } else { + current_apic_id = s->id; + } switch (dest_shorthand) { case 0: apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); break; case 1: - memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask)); - apic_set_bit(deliver_bitmask, s->id); + memset(deliver_bitmask, 0x00, deliver_bitmask_size); + apic_set_bit(deliver_bitmask, current_apic_id); break; case 2: - memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); + memset(deliver_bitmask, 0xff, deliver_bitmask_size); break; case 3: - memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); - apic_reset_bit(deliver_bitmask, s->id); + memset(deliver_bitmask, 0xff, deliver_bitmask_size); + apic_reset_bit(deliver_bitmask, current_apic_id); break; } @@ -636,27 +788,26 @@ static void apic_timer(void *opaque) apic_timer_update(s, s->next_time); } -static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) +static int apic_register_read(int index, uint64_t *value) { DeviceState *dev; APICCommonState *s; uint32_t val; - int index; - - if (size < 4) { - return 0; - } + int ret = 0; dev = cpu_get_current_apic(); if (!dev) { - return 0; + return -1; } s = APIC(dev); - index = (addr >> 4) & 0xff; switch(index) { case 0x02: /* id */ - val = s->id << 24; + if (is_x2apic_mode(dev)) { + val = s->initial_apic_id; + } else { + val = s->id << 24; + } break; case 0x03: /* version */ val = s->version | ((APIC_LVT_NB - 1) << 16); @@ -679,10 +830,19 @@ static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) val = 0; break; case 0x0d: - val = s->log_dest << 24; + if (is_x2apic_mode(dev)) { + val = s->extended_log_dest; + } else { + val = s->log_dest << 24; + } break; case 0x0e: - val = (s->dest_mode << 28) | 0xfffffff; + if (is_x2apic_mode(dev)) { + val = 0; + ret = -1; + } else { + val = (s->dest_mode << 28) | 0xfffffff; + } break; case 0x0f: val = s->spurious_vec; @@ -718,17 +878,56 @@ static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) default: s->esr |= APIC_ESR_ILLEGAL_ADDRESS; val = 0; + ret = -1; break; } - trace_apic_mem_readl(addr, val); + + trace_apic_register_read(index, val); + *value = val; + return ret; +} + +static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t val; + int index; + + if (size < 4) { + return 0; + } + + index = (addr >> 4) & 0xff; + apic_register_read(index, &val); + return val; } +int apic_msr_read(int index, uint64_t *val) +{ + DeviceState *dev; + + dev = cpu_get_current_apic(); + if (!dev) { + return -1; + } + + if (!is_x2apic_mode(dev)) { + return -1; + } + + return apic_register_read(index, val); +} + static void apic_send_msi(MSIMessage *msi) { uint64_t addr = msi->address; uint32_t data = msi->data; - uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; + uint32_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; + /* + * The higher 3 bytes of destination id is stored in higher word of + * msi address. See x86_iommu_irq_to_msi_message() + */ + dest = dest | (addr >> 32); uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; @@ -737,38 +936,25 @@ static void apic_send_msi(MSIMessage *msi) apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); } -static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, - unsigned size) +static int apic_register_write(int index, uint64_t val) { DeviceState *dev; APICCommonState *s; - int index = (addr >> 4) & 0xff; - - if (size < 4) { - return; - } - - if (addr > 0xfff || !index) { - /* MSI and MMIO APIC are at the same memory location, - * but actually not on the global bus: MSI is on PCI bus - * APIC is connected directly to the CPU. - * Mapping them on the global bus happens to work because - * MSI registers are reserved in APIC MMIO and vice versa. */ - MSIMessage msi = { .address = addr, .data = val }; - apic_send_msi(&msi); - return; - } dev = cpu_get_current_apic(); if (!dev) { - return; + return -1; } s = APIC(dev); - trace_apic_mem_writel(addr, val); + trace_apic_register_write(index, val); switch(index) { case 0x02: + if (is_x2apic_mode(dev)) { + return -1; + } + s->id = (val >> 24); break; case 0x03: @@ -788,9 +974,17 @@ static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, apic_eoi(s); break; case 0x0d: + if (is_x2apic_mode(dev)) { + return -1; + } + s->log_dest = val >> 24; break; case 0x0e: + if (is_x2apic_mode(dev)) { + return -1; + } + s->dest_mode = val >> 28; break; case 0x0f: @@ -802,13 +996,27 @@ static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, case 0x20 ... 0x27: case 0x28: break; - case 0x30: + case 0x30: { + uint32_t dest; + s->icr[0] = val; - apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1, + if (is_x2apic_mode(dev)) { + s->icr[1] = val >> 32; + dest = s->icr[1]; + } else { + dest = (s->icr[1] >> 24) & 0xff; + } + + apic_deliver(dev, dest, (s->icr[0] >> 11) & 1, (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), - (s->icr[0] >> 15) & 1); + (s->icr[0] >> 15) & 1, (s->icr[0] >> 18) & 3); break; + } case 0x31: + if (is_x2apic_mode(dev)) { + return -1; + } + s->icr[1] = val; break; case 0x32 ... 0x37: @@ -837,10 +1045,70 @@ static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, s->count_shift = (v + 1) & 7; } break; + case 0x3f: { + int vector = val & 0xff; + + if (!is_x2apic_mode(dev)) { + return -1; + } + + /* + * Self IPI is identical to IPI with + * - Destination shorthand: 1 (Self) + * - Trigger mode: 0 (Edge) + * - Delivery mode: 0 (Fixed) + */ + apic_deliver(dev, 0, 0, APIC_DM_FIXED, vector, 0, 1); + + break; + } default: s->esr |= APIC_ESR_ILLEGAL_ADDRESS; - break; + return -1; + } + + return 0; +} + +static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + int index = (addr >> 4) & 0xff; + + if (size < 4) { + return; + } + + if (addr > 0xfff || !index) { + /* + * MSI and MMIO APIC are at the same memory location, + * but actually not on the global bus: MSI is on PCI bus + * APIC is connected directly to the CPU. + * Mapping them on the global bus happens to work because + * MSI registers are reserved in APIC MMIO and vice versa. + */ + MSIMessage msi = { .address = addr, .data = val }; + apic_send_msi(&msi); + return; + } + + apic_register_write(index, val); +} + +int apic_msr_write(int index, uint64_t val) +{ + DeviceState *dev; + + dev = cpu_get_current_apic(); + if (!dev) { + return -1; } + + if (!is_x2apic_mode(dev)) { + return -1; + } + + return apic_register_write(index, val); } static void apic_pre_save(APICCommonState *s) @@ -871,12 +1139,6 @@ static void apic_realize(DeviceState *dev, Error **errp) { APICCommonState *s = APIC(dev); - if (s->id >= MAX_APICS) { - error_setg(errp, "%s initialization failed. APIC ID %d is invalid", - object_get_typename(OBJECT(dev)), s->id); - return; - } - if (kvm_enabled()) { warn_report("Userspace local APIC is deprecated for KVM."); warn_report("Do not use kernel-irqchip except for the -M isapc machine type."); @@ -893,7 +1155,16 @@ static void apic_realize(DeviceState *dev, Error **errp) s->io_memory.disable_reentrancy_guard = true; s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); - local_apics[s->id] = s; + + /* + * The --machine none does not call apic_set_max_apic_id before creating + * apic, so we need to call it here and set it to 1 which is the max cpus + * in machine none. + */ + if (!local_apics) { + apic_set_max_apic_id(1); + } + local_apics[s->initial_apic_id] = s; msi_nonbroken = true; } @@ -903,7 +1174,7 @@ static void apic_unrealize(DeviceState *dev) APICCommonState *s = APIC(dev); timer_free(s->timer); - local_apics[s->id] = NULL; + local_apics[s->initial_apic_id] = NULL; } static void apic_class_init(ObjectClass *klass, void *data) diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c index bccb4241c2d..d8fc1e2815f 100644 --- a/hw/intc/apic_common.c +++ b/hw/intc/apic_common.c @@ -35,20 +35,19 @@ bool apic_report_tpr_access; -void cpu_set_apic_base(DeviceState *dev, uint64_t val) +int cpu_set_apic_base(DeviceState *dev, uint64_t val) { trace_cpu_set_apic_base(val); if (dev) { APICCommonState *s = APIC_COMMON(dev); APICCommonClass *info = APIC_COMMON_GET_CLASS(s); - /* switching to x2APIC, reset possibly modified xAPIC ID */ - if (!(s->apicbase & MSR_IA32_APICBASE_EXTD) && - (val & MSR_IA32_APICBASE_EXTD)) { - s->id = s->initial_apic_id; - } - info->set_base(s, val); + /* Reset possibly modified xAPIC ID */ + s->id = s->initial_apic_id; + return info->set_base(s, val); } + + return 0; } uint64_t cpu_get_apic_base(DeviceState *dev) @@ -63,6 +62,19 @@ uint64_t cpu_get_apic_base(DeviceState *dev) } } +bool cpu_is_apic_enabled(DeviceState *dev) +{ + APICCommonState *s; + + if (!dev) { + return false; + } + + s = APIC_COMMON(dev); + + return s->apicbase & MSR_IA32_APICBASE_ENABLE; +} + void cpu_set_apic_tpr(DeviceState *dev, uint8_t val) { APICCommonState *s; @@ -287,6 +299,10 @@ static void apic_common_realize(DeviceState *dev, Error **errp) } vmstate_register_with_alias_id(NULL, instance_id, &vmstate_apic_common, s, -1, 0, NULL); + + /* APIC LDR in x2APIC mode */ + s->extended_log_dest = ((s->initial_apic_id >> 4) << 16) | + (1 << (s->initial_apic_id & 0xf)); } static void apic_common_unrealize(DeviceState *dev) @@ -349,7 +365,7 @@ static const VMStateDescription vmstate_apic_common_sipi = { .version_id = 1, .minimum_version_id = 1, .needed = apic_common_sipi_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(sipi_vector, APICCommonState), VMSTATE_INT32(wait_for_sipi, APICCommonState), VMSTATE_END_OF_LIST() @@ -363,7 +379,7 @@ static const VMStateDescription vmstate_apic_common = { .pre_load = apic_pre_load, .pre_save = apic_dispatch_pre_save, .post_load = apic_dispatch_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(apicbase, APICCommonState), VMSTATE_UINT8(id, APICCommonState), VMSTATE_UINT8(arb_id, APICCommonState), @@ -386,7 +402,7 @@ static const VMStateDescription vmstate_apic_common = { APICCommonState), /* open-coded timer state */ VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_apic_common_sipi, NULL } @@ -427,6 +443,11 @@ static void apic_common_set_id(Object *obj, Visitor *v, const char *name, return; } + if (value >= 255 && !cpu_has_x2apic_feature(&s->cpu->env)) { + error_setg(errp, "APIC ID %d requires x2APIC feature in CPU", value); + return; + } + s->initial_apic_id = value; s->id = (uint8_t)value; } diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c index 7c28504ace8..94c173cb071 100644 --- a/hw/intc/arm_gic_common.c +++ b/hw/intc/arm_gic_common.c @@ -62,7 +62,7 @@ static const VMStateDescription vmstate_gic_irq_state = { .name = "arm_gic_irq_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(enabled, gic_irq_state), VMSTATE_UINT8(pending, gic_irq_state), VMSTATE_UINT8(active, gic_irq_state), @@ -79,7 +79,7 @@ static const VMStateDescription vmstate_gic_virt_state = { .version_id = 1, .minimum_version_id = 1, .needed = gic_virt_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Virtual interface */ VMSTATE_UINT32_ARRAY(h_hcr, GICState, GIC_NCPU), VMSTATE_UINT32_ARRAY(h_misr, GICState, GIC_NCPU), @@ -104,7 +104,7 @@ static const VMStateDescription vmstate_gic = { .minimum_version_id = 12, .pre_save = gic_pre_save, .post_load = gic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctlr, GICState), VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, 0, GIC_NCPU), VMSTATE_STRUCT_ARRAY(irq_state, GICState, GIC_MAXIRQ, 1, @@ -122,7 +122,7 @@ static const VMStateDescription vmstate_gic = { VMSTATE_UINT32_2DARRAY(nsapr, GICState, GIC_NR_APRS, GIC_NCPU), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_gic_virt_state, NULL } diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index 2ebf880eada..cb55c726810 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -105,7 +105,7 @@ static const VMStateDescription vmstate_gicv3_cpu_virt = { .version_id = 1, .minimum_version_id = 1, .needed = virt_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4), VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState), VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX), @@ -139,7 +139,7 @@ const VMStateDescription vmstate_gicv3_cpu_sre_el1 = { .version_id = 1, .minimum_version_id = 1, .needed = icc_sre_el1_reg_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(icc_sre_el1, GICv3CPUState), VMSTATE_END_OF_LIST() } @@ -157,7 +157,7 @@ const VMStateDescription vmstate_gicv3_gicv4 = { .version_id = 1, .minimum_version_id = 1, .needed = gicv4_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(gicr_vpropbaser, GICv3CPUState), VMSTATE_UINT64(gicr_vpendbaser, GICv3CPUState), VMSTATE_END_OF_LIST() @@ -169,7 +169,7 @@ static const VMStateDescription vmstate_gicv3_cpu = { .version_id = 1, .minimum_version_id = 1, .pre_load = vmstate_gicv3_cpu_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, GICv3CPUState), VMSTATE_UINT32(gicr_ctlr, GICv3CPUState), VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2), @@ -192,7 +192,7 @@ static const VMStateDescription vmstate_gicv3_cpu = { VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_gicv3_cpu_virt, &vmstate_gicv3_cpu_sre_el1, &vmstate_gicv3_gicv4, @@ -232,7 +232,7 @@ const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = { .version_id = 1, .minimum_version_id = 1, .needed = needed_always, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State), VMSTATE_END_OF_LIST() } @@ -246,7 +246,7 @@ static const VMStateDescription vmstate_gicv3 = { .pre_save = gicv3_pre_save, .post_load = gicv3_post_load, .priority = MIG_PRI_GICV3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(gicd_ctlr, GICv3State), VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2), VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE), @@ -264,7 +264,7 @@ static const VMStateDescription vmstate_gicv3 = { vmstate_gicv3_cpu, GICv3CPUState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_gicv3_gicd_no_migration_shift_bug, NULL } diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 258dee1b808..67d8fd07b7f 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -934,7 +934,7 @@ void gicv3_cpuif_update(GICv3CPUState *cs) ARMCPU *cpu = ARM_CPU(cs->cpu); CPUARMState *env = &cpu->env; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq, cs->hppi.grp, cs->hppi.prio); @@ -1067,7 +1067,7 @@ static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env) */ bool irq_is_secure; - if (cs->hppi.prio == 0xff) { + if (icc_no_enabled_hppi(cs)) { return INTID_SPURIOUS; } @@ -1104,7 +1104,7 @@ static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env) */ bool irq_is_secure; - if (cs->hppi.prio == 0xff) { + if (icc_no_enabled_hppi(cs)) { return INTID_SPURIOUS; } @@ -2684,6 +2684,7 @@ static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x480, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2691,6 +2692,7 @@ static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4a0, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2698,6 +2700,7 @@ static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4c0, .access = PL2_RW, .readfn = ich_hcr_read, .writefn = ich_hcr_write, @@ -2729,6 +2732,7 @@ static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4c8, .access = PL2_RW, .readfn = ich_vmcr_read, .writefn = ich_vmcr_write, @@ -2739,6 +2743,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = { { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x488, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2746,6 +2751,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = { { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4a8, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2756,6 +2762,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x490, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2763,6 +2770,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x498, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2770,6 +2778,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4b0, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2777,6 +2786,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4b8, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2898,6 +2908,7 @@ void gicv3_init_cpuif(GICv3State *s) .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 12 + (j >> 3), .opc2 = j & 7, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x400 + 8 * j, .access = PL2_RW, .readfn = ich_lr_read, .writefn = ich_lr_write, diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c index fddd6d490c2..331d6b93cc1 100644 --- a/hw/intc/arm_gicv3_its_common.c +++ b/hw/intc/arm_gicv3_its_common.c @@ -54,7 +54,7 @@ static const VMStateDescription vmstate_its = { .pre_save = gicv3_its_pre_save, .post_load = gicv3_its_post_load, .priority = MIG_PRI_GICV3_ITS, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctlr, GICv3ITSState), VMSTATE_UINT32(iidr, GICv3ITSState), VMSTATE_UINT64(cbaser, GICv3ITSState), diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index f7df602cfff..3befc960db2 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" +#include "qemu/error-report.h" #include "hw/intc/arm_gicv3_its_common.h" #include "hw/qdev-properties.h" #include "sysemu/runstate.h" diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 942be7bd112..404a445138a 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -2498,7 +2498,7 @@ static const VMStateDescription vmstate_VecInfo = { .name = "armv7m_nvic_info", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT16(prio, VecInfo), VMSTATE_UINT8(enabled, VecInfo), VMSTATE_UINT8(pending, VecInfo), @@ -2543,7 +2543,7 @@ static const VMStateDescription vmstate_nvic_security = { .minimum_version_id = 1, .needed = nvic_security_needed, .post_load = &nvic_security_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1, vmstate_VecInfo, VecInfo), VMSTATE_UINT32(prigroup[M_REG_S], NVICState), @@ -2557,13 +2557,13 @@ static const VMStateDescription vmstate_nvic = { .version_id = 4, .minimum_version_id = 4, .post_load = &nvic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1, vmstate_VecInfo, VecInfo), VMSTATE_UINT32(prigroup[M_REG_NS], NVICState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_nvic_security, NULL } @@ -2572,6 +2572,11 @@ static const VMStateDescription vmstate_nvic = { static Property props_nvic[] = { /* Number of external IRQ lines (so excluding the 16 internal exceptions) */ DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64), + /* + * Number of the maximum priority bits that can be used. 0 means + * to use a reasonable default. + */ + DEFINE_PROP_UINT8("num-prio-bits", NVICState, num_prio_bits, 0), DEFINE_PROP_END_OF_LIST() }; @@ -2685,7 +2690,23 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp) /* include space for internal exception vectors */ s->num_irq += NVIC_FIRST_IRQ; - s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2; + if (s->num_prio_bits == 0) { + /* + * If left unspecified, use 2 bits by default on Cortex-M0/M0+/M1 + * and 8 bits otherwise. + */ + s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2; + } else { + uint8_t min_prio_bits = + arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 3 : 2; + if (s->num_prio_bits < min_prio_bits || s->num_prio_bits > 8) { + error_setg(errp, + "num-prio-bits %d is outside " + "NVIC acceptable range [%d-8]", + s->num_prio_bits, min_prio_bits); + return; + } + } /* * This device provides a single memory region which covers the diff --git a/hw/intc/aspeed_vic.c b/hw/intc/aspeed_vic.c index 5ba06c52628..ba1d953c2cf 100644 --- a/hw/intc/aspeed_vic.c +++ b/hw/intc/aspeed_vic.c @@ -326,7 +326,7 @@ static const VMStateDescription vmstate_aspeed_vic = { .name = "aspeed.new-vic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(level, AspeedVICState), VMSTATE_UINT64(raw, AspeedVICState), VMSTATE_UINT64(select, AspeedVICState), diff --git a/hw/intc/bcm2835_ic.c b/hw/intc/bcm2835_ic.c index 4513fad16f2..2c2e2b1822c 100644 --- a/hw/intc/bcm2835_ic.c +++ b/hw/intc/bcm2835_ic.c @@ -208,7 +208,7 @@ static const VMStateDescription vmstate_bcm2835_ic = { .name = TYPE_BCM2835_IC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(gpu_irq_level, BCM2835ICState), VMSTATE_UINT64(gpu_irq_enable, BCM2835ICState), VMSTATE_UINT8(arm_irq_level, BCM2835ICState), diff --git a/hw/intc/bcm2836_control.c b/hw/intc/bcm2836_control.c index b0589df1881..81faf032b0e 100644 --- a/hw/intc/bcm2836_control.c +++ b/hw/intc/bcm2836_control.c @@ -369,7 +369,7 @@ static const VMStateDescription vmstate_bcm2836_control = { .name = TYPE_BCM2836_CONTROL, .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(mailboxes, BCM2836ControlState, BCM2836_NCORES * BCM2836_MBPERCORE), VMSTATE_UINT8(route_gpu_irq, BCM2836ControlState), diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c index 4ba448fdb19..f0d310a0ebc 100644 --- a/hw/intc/exynos4210_combiner.c +++ b/hw/intc/exynos4210_combiner.c @@ -54,7 +54,7 @@ static const VMStateDescription vmstate_exynos4210_combiner_group_state = { .name = "exynos4210.combiner.groupstate", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(src_mask, CombinerGroupState), VMSTATE_UINT8(src_pending, CombinerGroupState), VMSTATE_END_OF_LIST() @@ -65,7 +65,7 @@ static const VMStateDescription vmstate_exynos4210_combiner = { .name = "exynos4210.combiner", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(group, Exynos4210CombinerState, IIC_NGRP, 0, vmstate_exynos4210_combiner_group_state, CombinerGroupState), VMSTATE_UINT32_ARRAY(reg_set, Exynos4210CombinerState, diff --git a/hw/intc/goldfish_pic.c b/hw/intc/goldfish_pic.c index dfd53275f69..d662dfeb99d 100644 --- a/hw/intc/goldfish_pic.c +++ b/hw/intc/goldfish_pic.c @@ -161,7 +161,7 @@ static const VMStateDescription vmstate_goldfish_pic = { .name = "goldfish_pic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pending, GoldfishPICState), VMSTATE_UINT32(enabled, GoldfishPICState), VMSTATE_END_OF_LIST() diff --git a/hw/intc/grlib_irqmp.c b/hw/intc/grlib_irqmp.c index 3bfe2544b7c..c6c51a349cc 100644 --- a/hw/intc/grlib_irqmp.c +++ b/hw/intc/grlib_irqmp.c @@ -1,9 +1,11 @@ /* * QEMU GRLIB IRQMP Emulator * - * (Multiprocessor and extended interrupt not supported) + * (Extended interrupt not supported) * - * Copyright (c) 2010-2019 AdaCore + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2010-2024 AdaCore * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -29,7 +31,7 @@ #include "hw/sysbus.h" #include "hw/qdev-properties.h" -#include "hw/sparc/grlib.h" +#include "hw/intc/grlib_irqmp.h" #include "trace.h" #include "qapi/error.h" @@ -50,6 +52,10 @@ #define FORCE_OFFSET 0x80 #define EXTENDED_OFFSET 0xC0 +/* Multiprocessor Status Register */ +#define MP_STATUS_CPU_STATUS_MASK ((1 << IRQMP_MAX_CPU)-2) +#define MP_STATUS_NCPU_SHIFT 28 + #define MAX_PILS 16 OBJECT_DECLARE_SIMPLE_TYPE(IRQMP, GRLIB_IRQMP) @@ -61,14 +67,17 @@ struct IRQMP { MemoryRegion iomem; + unsigned int ncpus; IRQMPState *state; - qemu_irq irq; + qemu_irq start_signal[IRQMP_MAX_CPU]; + qemu_irq irq[IRQMP_MAX_CPU]; }; struct IRQMPState { uint32_t level; uint32_t pending; uint32_t clear; + uint32_t mpstatus; uint32_t broadcast; uint32_t mask[IRQMP_MAX_CPU]; @@ -80,37 +89,35 @@ struct IRQMPState { static void grlib_irqmp_check_irqs(IRQMPState *state) { - uint32_t pend = 0; - uint32_t level0 = 0; - uint32_t level1 = 0; + int i; assert(state != NULL); assert(state->parent != NULL); - /* IRQ for CPU 0 (no SMP support) */ - pend = (state->pending | state->force[0]) - & state->mask[0]; - - level0 = pend & ~state->level; - level1 = pend & state->level; + for (i = 0; i < state->parent->ncpus; i++) { + uint32_t pend = (state->pending | state->force[i]) & state->mask[i]; + uint32_t level0 = pend & ~state->level; + uint32_t level1 = pend & state->level; - trace_grlib_irqmp_check_irqs(state->pending, state->force[0], - state->mask[0], level1, level0); + trace_grlib_irqmp_check_irqs(state->pending, state->force[i], + state->mask[i], level1, level0); - /* Trigger level1 interrupt first and level0 if there is no level1 */ - qemu_set_irq(state->parent->irq, level1 ?: level0); + /* Trigger level1 interrupt first and level0 if there is no level1 */ + qemu_set_irq(state->parent->irq[i], level1 ?: level0); + } } -static void grlib_irqmp_ack_mask(IRQMPState *state, uint32_t mask) +static void grlib_irqmp_ack_mask(IRQMPState *state, unsigned int cpu, + uint32_t mask) { /* Clear registers */ state->pending &= ~mask; - state->force[0] &= ~mask; /* Only CPU 0 (No SMP support) */ + state->force[cpu] &= ~mask; grlib_irqmp_check_irqs(state); } -void grlib_irqmp_ack(DeviceState *dev, int intno) +void grlib_irqmp_ack(DeviceState *dev, unsigned int cpu, int intno) { IRQMP *irqmp = GRLIB_IRQMP(dev); IRQMPState *state; @@ -124,7 +131,7 @@ void grlib_irqmp_ack(DeviceState *dev, int intno) trace_grlib_irqmp_ack(intno); - grlib_irqmp_ack_mask(state, mask); + grlib_irqmp_ack_mask(state, cpu, mask); } static void grlib_irqmp_set_irq(void *opaque, int irq, int level) @@ -150,7 +157,6 @@ static void grlib_irqmp_set_irq(void *opaque, int irq, int level) s->pending |= 1 << irq; } grlib_irqmp_check_irqs(s); - } } @@ -179,10 +185,12 @@ static uint64_t grlib_irqmp_read(void *opaque, hwaddr addr, return state->force[0]; case CLEAR_OFFSET: - case MP_STATUS_OFFSET: /* Always read as 0 */ return 0; + case MP_STATUS_OFFSET: + return state->mpstatus; + case BROADCAST_OFFSET: return state->broadcast; @@ -221,8 +229,9 @@ static uint64_t grlib_irqmp_read(void *opaque, hwaddr addr, static void grlib_irqmp_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { - IRQMP *irqmp = opaque; + IRQMP *irqmp = opaque; IRQMPState *state; + int i; assert(irqmp != NULL); state = irqmp->state; @@ -251,11 +260,24 @@ static void grlib_irqmp_write(void *opaque, hwaddr addr, case CLEAR_OFFSET: value &= ~1; /* clean up the value */ - grlib_irqmp_ack_mask(state, value); + for (i = 0; i < irqmp->ncpus; i++) { + grlib_irqmp_ack_mask(state, i, value); + } return; case MP_STATUS_OFFSET: - /* Read Only (no SMP support) */ + /* + * Writing and reading operations are reversed for the CPU status. + * Writing "1" will start the CPU, but reading "1" means that the CPU + * is power-down. + */ + value &= MP_STATUS_CPU_STATUS_MASK; + for (i = 0; i < irqmp->ncpus; i++) { + if ((value >> i) & 1) { + qemu_set_irq(irqmp->start_signal[i], 1); + state->mpstatus &= ~(1 << i); + } + } return; case BROADCAST_OFFSET: @@ -322,35 +344,56 @@ static void grlib_irqmp_reset(DeviceState *d) memset(irqmp->state, 0, sizeof *irqmp->state); irqmp->state->parent = irqmp; + irqmp->state->mpstatus = ((irqmp->ncpus - 1) << MP_STATUS_NCPU_SHIFT) | + ((1 << irqmp->ncpus) - 2); } -static void grlib_irqmp_init(Object *obj) +static void grlib_irqmp_realize(DeviceState *dev, Error **errp) { - IRQMP *irqmp = GRLIB_IRQMP(obj); - SysBusDevice *dev = SYS_BUS_DEVICE(obj); + IRQMP *irqmp = GRLIB_IRQMP(dev); - qdev_init_gpio_in(DEVICE(obj), grlib_irqmp_set_irq, MAX_PILS); - qdev_init_gpio_out_named(DEVICE(obj), &irqmp->irq, "grlib-irq", 1); - memory_region_init_io(&irqmp->iomem, obj, &grlib_irqmp_ops, irqmp, + if ((!irqmp->ncpus) || (irqmp->ncpus > IRQMP_MAX_CPU)) { + error_setg(errp, "Invalid ncpus properties: " + "%u, must be 0 < ncpus =< %u.", irqmp->ncpus, + IRQMP_MAX_CPU); + return; + } + + qdev_init_gpio_in(dev, grlib_irqmp_set_irq, MAX_PILS); + + /* + * Transitionning from 0 to 1 starts the CPUs. The opposite can't + * happen. + */ + qdev_init_gpio_out_named(dev, irqmp->start_signal, "grlib-start-cpu", + IRQMP_MAX_CPU); + qdev_init_gpio_out_named(dev, irqmp->irq, "grlib-irq", irqmp->ncpus); + memory_region_init_io(&irqmp->iomem, OBJECT(dev), &grlib_irqmp_ops, irqmp, "irqmp", IRQMP_REG_SIZE); irqmp->state = g_malloc0(sizeof *irqmp->state); - sysbus_init_mmio(dev, &irqmp->iomem); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &irqmp->iomem); } +static Property grlib_irqmp_properties[] = { + DEFINE_PROP_UINT32("ncpus", IRQMP, ncpus, 1), + DEFINE_PROP_END_OF_LIST(), +}; + static void grlib_irqmp_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = grlib_irqmp_realize; dc->reset = grlib_irqmp_reset; + device_class_set_props(dc, grlib_irqmp_properties); } static const TypeInfo grlib_irqmp_info = { .name = TYPE_GRLIB_IRQMP, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(IRQMP), - .instance_init = grlib_irqmp_init, .class_init = grlib_irqmp_class_init, }; diff --git a/hw/intc/heathrow_pic.c b/hw/intc/heathrow_pic.c index 13048a27354..c2946ba1ad5 100644 --- a/hw/intc/heathrow_pic.c +++ b/hw/intc/heathrow_pic.c @@ -141,7 +141,7 @@ static const VMStateDescription vmstate_heathrow_pic_one = { .name = "heathrow_pic_one", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(events, HeathrowPICState), VMSTATE_UINT32(mask, HeathrowPICState), VMSTATE_UINT32(levels, HeathrowPICState), @@ -154,7 +154,7 @@ static const VMStateDescription vmstate_heathrow = { .name = "heathrow_pic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(pics, HeathrowState, 2, 1, vmstate_heathrow_pic_one, HeathrowPICState), VMSTATE_END_OF_LIST() diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c index c931dc2d07c..ee0041115c3 100644 --- a/hw/intc/i8259_common.c +++ b/hw/intc/i8259_common.c @@ -156,7 +156,7 @@ static const VMStateDescription vmstate_pic_ltim = { .version_id = 1, .minimum_version_id = 1, .needed = ltim_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(ltim, PICCommonState), VMSTATE_END_OF_LIST() } @@ -168,7 +168,7 @@ static const VMStateDescription vmstate_pic_common = { .minimum_version_id = 1, .pre_save = pic_dispatch_pre_save, .post_load = pic_dispatch_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(last_irr, PICCommonState), VMSTATE_UINT8(irr, PICCommonState), VMSTATE_UINT8(imr, PICCommonState), @@ -187,7 +187,7 @@ static const VMStateDescription vmstate_pic_common = { VMSTATE_UINT8(elcr, PICCommonState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_pic_ltim, NULL } diff --git a/hw/intc/imx_avic.c b/hw/intc/imx_avic.c index 63fc602a1a9..aedc708bed4 100644 --- a/hw/intc/imx_avic.c +++ b/hw/intc/imx_avic.c @@ -38,7 +38,7 @@ static const VMStateDescription vmstate_imx_avic = { .name = TYPE_IMX_AVIC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(pending, IMXAVICState), VMSTATE_UINT64(enabled, IMXAVICState), VMSTATE_UINT64(is_fiq, IMXAVICState), diff --git a/hw/intc/imx_gpcv2.c b/hw/intc/imx_gpcv2.c index 237d5f97eba..af45e5194c4 100644 --- a/hw/intc/imx_gpcv2.c +++ b/hw/intc/imx_gpcv2.c @@ -96,7 +96,7 @@ static const VMStateDescription vmstate_imx_gpcv2 = { .name = TYPE_IMX_GPCV2, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IMXGPCv2State, GPC_NUM), VMSTATE_END_OF_LIST() }, diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c index b05f436dac2..efbe6958c8d 100644 --- a/hw/intc/ioapic_common.c +++ b/hw/intc/ioapic_common.c @@ -152,6 +152,7 @@ static int ioapic_dispatch_post_load(void *opaque, int version_id) static void ioapic_common_realize(DeviceState *dev, Error **errp) { + ERRP_GUARD(); IOAPICCommonState *s = IOAPIC_COMMON(dev); IOAPICCommonClass *info; @@ -162,6 +163,9 @@ static void ioapic_common_realize(DeviceState *dev, Error **errp) info = IOAPIC_COMMON_GET_CLASS(s); info->realize(dev, errp); + if (*errp) { + return; + } sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io_memory); ioapic_no++; @@ -182,7 +186,7 @@ static const VMStateDescription vmstate_ioapic_common = { .minimum_version_id = 1, .pre_save = ioapic_dispatch_pre_save, .post_load = ioapic_dispatch_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(id, IOAPICCommonState), VMSTATE_UINT8(ioregsel, IOAPICCommonState), VMSTATE_UNUSED_V(2, 8), /* to account for qemu-kvm's v2 format */ diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c index 24fb3af8cc3..0b358548eb4 100644 --- a/hw/intc/loongarch_extioi.c +++ b/hw/intc/loongarch_extioi.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "qemu/log.h" +#include "qapi/error.h" #include "hw/irq.h" #include "hw/sysbus.h" #include "hw/loongarch/virt.h" @@ -32,23 +33,23 @@ static void extioi_update_irq(LoongArchExtIOI *s, int irq, int level) if (((s->enable[irq_index]) & irq_mask) == 0) { return; } - s->coreisr[cpu][irq_index] |= irq_mask; - found = find_first_bit(s->sw_isr[cpu][ipnum], EXTIOI_IRQS); - set_bit(irq, s->sw_isr[cpu][ipnum]); + s->cpu[cpu].coreisr[irq_index] |= irq_mask; + found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS); + set_bit(irq, s->cpu[cpu].sw_isr[ipnum]); if (found < EXTIOI_IRQS) { /* other irq is handling, need not update parent irq level */ return; } } else { - s->coreisr[cpu][irq_index] &= ~irq_mask; - clear_bit(irq, s->sw_isr[cpu][ipnum]); - found = find_first_bit(s->sw_isr[cpu][ipnum], EXTIOI_IRQS); + s->cpu[cpu].coreisr[irq_index] &= ~irq_mask; + clear_bit(irq, s->cpu[cpu].sw_isr[ipnum]); + found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS); if (found < EXTIOI_IRQS) { /* other irq is handling, need not update parent irq level */ return; } } - qemu_set_irq(s->parent_irq[cpu][ipnum], level); + qemu_set_irq(s->cpu[cpu].parent_irq[ipnum], level); } static void extioi_setirq(void *opaque, int irq, int level) @@ -96,7 +97,7 @@ static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data, index = (offset - EXTIOI_COREISR_START) >> 2; /* using attrs to get current cpu index */ cpu = attrs.requester_id; - *data = s->coreisr[cpu][index]; + *data = s->cpu[cpu].coreisr[index]; break; case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1: index = (offset - EXTIOI_COREMAP_START) >> 2; @@ -129,12 +130,66 @@ static inline void extioi_enable_irq(LoongArchExtIOI *s, int index,\ } } +static inline void extioi_update_sw_coremap(LoongArchExtIOI *s, int irq, + uint64_t val, bool notify) +{ + int i, cpu; + + /* + * loongarch only support little endian, + * so we paresd the value with little endian. + */ + val = cpu_to_le64(val); + + for (i = 0; i < 4; i++) { + cpu = val & 0xff; + cpu = ctz32(cpu); + cpu = (cpu >= 4) ? 0 : cpu; + val = val >> 8; + + if (s->sw_coremap[irq + i] == cpu) { + continue; + } + + if (notify && test_bit(irq + i, (unsigned long *)s->isr)) { + /* + * lower irq at old cpu and raise irq at new cpu + */ + extioi_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + extioi_update_irq(s, irq + i, 1); + } else { + s->sw_coremap[irq + i] = cpu; + } + } +} + +static inline void extioi_update_sw_ipmap(LoongArchExtIOI *s, int index, + uint64_t val) +{ + int i; + uint8_t ipnum; + + /* + * loongarch only support little endian, + * so we paresd the value with little endian. + */ + val = cpu_to_le64(val); + for (i = 0; i < 4; i++) { + ipnum = val & 0xff; + ipnum = ctz32(ipnum); + ipnum = (ipnum >= 4) ? 0 : ipnum; + s->sw_ipmap[index * 4 + i] = ipnum; + val = val >> 8; + } +} + static MemTxResult extioi_writew(void *opaque, hwaddr addr, uint64_t val, unsigned size, MemTxAttrs attrs) { LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); - int i, cpu, index, old_data, irq; + int cpu, index, old_data, irq; uint32_t offset; trace_loongarch_extioi_writew(addr, val); @@ -152,20 +207,7 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, */ index = (offset - EXTIOI_IPMAP_START) >> 2; s->ipmap[index] = val; - /* - * loongarch only support little endian, - * so we paresd the value with little endian. - */ - val = cpu_to_le64(val); - for (i = 0; i < 4; i++) { - uint8_t ipnum; - ipnum = val & 0xff; - ipnum = ctz32(ipnum); - ipnum = (ipnum >= 4) ? 0 : ipnum; - s->sw_ipmap[index * 4 + i] = ipnum; - val = val >> 8; - } - + extioi_update_sw_ipmap(s, index, val); break; case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1: index = (offset - EXTIOI_ENABLE_START) >> 2; @@ -189,8 +231,8 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, index = (offset - EXTIOI_COREISR_START) >> 2; /* using attrs to get current cpu index */ cpu = attrs.requester_id; - old_data = s->coreisr[cpu][index]; - s->coreisr[cpu][index] = old_data & ~val; + old_data = s->cpu[cpu].coreisr[index]; + s->cpu[cpu].coreisr[index] = old_data & ~val; /* write 1 to clear interrupt */ old_data &= val; irq = ctz32(old_data); @@ -204,33 +246,8 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, irq = offset - EXTIOI_COREMAP_START; index = irq / 4; s->coremap[index] = val; - /* - * loongarch only support little endian, - * so we paresd the value with little endian. - */ - val = cpu_to_le64(val); - - for (i = 0; i < 4; i++) { - cpu = val & 0xff; - cpu = ctz32(cpu); - cpu = (cpu >= 4) ? 0 : cpu; - val = val >> 8; - - if (s->sw_coremap[irq + i] == cpu) { - continue; - } - - if (test_bit(irq, (unsigned long *)s->isr)) { - /* - * lower irq at old cpu and raise irq at new cpu - */ - extioi_update_irq(s, irq + i, 0); - s->sw_coremap[irq + i] = cpu; - extioi_update_irq(s, irq + i, 1); - } else { - s->sw_coremap[irq + i] = cpu; - } - } + + extioi_update_sw_coremap(s, irq, val, true); break; default: break; @@ -248,65 +265,112 @@ static const MemoryRegionOps extioi_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static const VMStateDescription vmstate_loongarch_extioi = { - .name = TYPE_LOONGARCH_EXTIOI, +static void loongarch_extioi_realize(DeviceState *dev, Error **errp) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i, pin; + + if (s->num_cpu == 0) { + error_setg(errp, "num-cpu must be at least 1"); + return; + } + + for (i = 0; i < EXTIOI_IRQS; i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } + + qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS); + memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, + s, "extioi_system_mem", 0x900); + sysbus_init_mmio(sbd, &s->extioi_system_mem); + s->cpu = g_new0(ExtIOICore, s->num_cpu); + if (s->cpu == NULL) { + error_setg(errp, "Memory allocation for ExtIOICore faile"); + return; + } + + for (i = 0; i < s->num_cpu; i++) { + for (pin = 0; pin < LS3A_INTC_IP; pin++) { + qdev_init_gpio_out(dev, &s->cpu[i].parent_irq[pin], 1); + } + } +} + +static void loongarch_extioi_finalize(Object *obj) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(obj); + + g_free(s->cpu); +} + +static int vmstate_extioi_post_load(void *opaque, int version_id) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + int i, start_irq; + + for (i = 0; i < (EXTIOI_IRQS / 4); i++) { + start_irq = i * 4; + extioi_update_sw_coremap(s, start_irq, s->coremap[i], false); + } + + for (i = 0; i < (EXTIOI_IRQS_IPMAP_SIZE / 4); i++) { + extioi_update_sw_ipmap(s, i, s->ipmap[i]); + } + + return 0; +} + +static const VMStateDescription vmstate_extioi_core = { + .name = "extioi-core", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(coreisr, ExtIOICore, EXTIOI_IRQS_GROUP_COUNT), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_loongarch_extioi = { + .name = TYPE_LOONGARCH_EXTIOI, + .version_id = 2, + .minimum_version_id = 2, + .post_load = vmstate_extioi_post_load, + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOI, EXTIOI_IRQS_GROUP_COUNT), - VMSTATE_UINT32_2DARRAY(coreisr, LoongArchExtIOI, EXTIOI_CPUS, - EXTIOI_IRQS_GROUP_COUNT), VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOI, EXTIOI_IRQS_NODETYPE_COUNT / 2), VMSTATE_UINT32_ARRAY(enable, LoongArchExtIOI, EXTIOI_IRQS / 32), VMSTATE_UINT32_ARRAY(isr, LoongArchExtIOI, EXTIOI_IRQS / 32), VMSTATE_UINT32_ARRAY(ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE / 4), VMSTATE_UINT32_ARRAY(coremap, LoongArchExtIOI, EXTIOI_IRQS / 4), - VMSTATE_UINT8_ARRAY(sw_ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE), - VMSTATE_UINT8_ARRAY(sw_coremap, LoongArchExtIOI, EXTIOI_IRQS), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchExtIOI, num_cpu, + vmstate_extioi_core, ExtIOICore), VMSTATE_END_OF_LIST() } }; -static void loongarch_extioi_instance_init(Object *obj) -{ - SysBusDevice *dev = SYS_BUS_DEVICE(obj); - LoongArchExtIOI *s = LOONGARCH_EXTIOI(obj); - int i, cpu, pin; - - for (i = 0; i < EXTIOI_IRQS; i++) { - sysbus_init_irq(dev, &s->irq[i]); - } - - qdev_init_gpio_in(DEVICE(obj), extioi_setirq, EXTIOI_IRQS); - - for (cpu = 0; cpu < EXTIOI_CPUS; cpu++) { - memory_region_init_io(&s->extioi_iocsr_mem[cpu], OBJECT(s), &extioi_ops, - s, "extioi_iocsr", 0x900); - sysbus_init_mmio(dev, &s->extioi_iocsr_mem[cpu]); - for (pin = 0; pin < LS3A_INTC_IP; pin++) { - qdev_init_gpio_out(DEVICE(obj), &s->parent_irq[cpu][pin], 1); - } - } - memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, - s, "extioi_system_mem", 0x900); - sysbus_init_mmio(dev, &s->extioi_system_mem); -} +static Property extioi_properties[] = { + DEFINE_PROP_UINT32("num-cpu", LoongArchExtIOI, num_cpu, 1), + DEFINE_PROP_END_OF_LIST(), +}; static void loongarch_extioi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = loongarch_extioi_realize; + device_class_set_props(dc, extioi_properties); dc->vmsd = &vmstate_loongarch_extioi; } static const TypeInfo loongarch_extioi_info = { .name = TYPE_LOONGARCH_EXTIOI, .parent = TYPE_SYS_BUS_DEVICE, - .instance_init = loongarch_extioi_instance_init, .instance_size = sizeof(struct LoongArchExtIOI), .class_init = loongarch_extioi_class_init, + .instance_finalize = loongarch_extioi_finalize, }; static void loongarch_extioi_register_types(void) diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c index 67858b521c6..a184112b092 100644 --- a/hw/intc/loongarch_ipi.c +++ b/hw/intc/loongarch_ipi.c @@ -9,6 +9,7 @@ #include "hw/sysbus.h" #include "hw/intc/loongarch_ipi.h" #include "hw/irq.h" +#include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/log.h" #include "exec/address-spaces.h" @@ -17,14 +18,16 @@ #include "target/loongarch/internals.h" #include "trace.h" -static void loongarch_ipi_writel(void *, hwaddr, uint64_t, unsigned); - -static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size) +static MemTxResult loongarch_ipi_readl(void *opaque, hwaddr addr, + uint64_t *data, + unsigned size, MemTxAttrs attrs) { - IPICore *s = opaque; + IPICore *s; + LoongArchIPI *ipi = opaque; uint64_t ret = 0; int index = 0; + s = &ipi->cpu[attrs.requester_id]; addr &= 0xff; switch (addr) { case CORE_STATUS_OFF: @@ -49,10 +52,12 @@ static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size) } trace_loongarch_ipi_read(size, (uint64_t)addr, ret); - return ret; + *data = ret; + return MEMTX_OK; } -static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) +static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr, + MemTxAttrs attrs) { int i, mask = 0, data = 0; @@ -61,8 +66,8 @@ static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) * if the mask is 0, we need not to do anything. */ if ((val >> 27) & 0xf) { - data = address_space_ldl(&env->address_space_iocsr, addr, - MEMTXATTRS_UNSPECIFIED, NULL); + data = address_space_ldl(env->address_space_iocsr, addr, + attrs, NULL); for (i = 0; i < 4; i++) { /* get mask for byte writing */ if (val & (0x1 << (27 + i))) { @@ -73,8 +78,8 @@ static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) data &= mask; data |= (val >> 32) & ~mask; - address_space_stl(&env->address_space_iocsr, addr, - data, MEMTXATTRS_UNSPECIFIED, NULL); + address_space_stl(env->address_space_iocsr, addr, + data, attrs, NULL); } static int archid_cmp(const void *a, const void *b) @@ -103,80 +108,72 @@ static CPUState *ipi_getcpu(int arch_id) CPUArchId *archid; archid = find_cpu_by_archid(machine, arch_id); - return CPU(archid->cpu); -} - -static void ipi_send(uint64_t val) -{ - uint32_t cpuid; - uint8_t vector; - CPUState *cs; - LoongArchCPU *cpu; - LoongArchIPI *s; - - cpuid = extract32(val, 16, 10); - if (cpuid >= LOONGARCH_MAX_CPUS) { - trace_loongarch_ipi_unsupported_cpuid("IOCSR_IPI_SEND", cpuid); - return; + if (archid) { + return CPU(archid->cpu); } - /* IPI status vector */ - vector = extract8(val, 0, 5); - - cs = ipi_getcpu(cpuid); - cpu = LOONGARCH_CPU(cs); - s = LOONGARCH_IPI(cpu->env.ipistate); - loongarch_ipi_writel(&s->ipi_core, CORE_SET_OFF, BIT(vector), 4); + return NULL; } -static void mail_send(uint64_t val) +static MemTxResult mail_send(uint64_t val, MemTxAttrs attrs) { uint32_t cpuid; hwaddr addr; - CPULoongArchState *env; CPUState *cs; - LoongArchCPU *cpu; cpuid = extract32(val, 16, 10); if (cpuid >= LOONGARCH_MAX_CPUS) { trace_loongarch_ipi_unsupported_cpuid("IOCSR_MAIL_SEND", cpuid); - return; + return MEMTX_DECODE_ERROR; } - addr = 0x1020 + (val & 0x1c); cs = ipi_getcpu(cpuid); - cpu = LOONGARCH_CPU(cs); - env = &cpu->env; - send_ipi_data(env, val, addr); + if (cs == NULL) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + addr = SMP_IPI_MAILBOX + CORE_BUF_20 + (val & 0x1c); + attrs.requester_id = cs->cpu_index; + send_ipi_data(&LOONGARCH_CPU(cs)->env, val, addr, attrs); + return MEMTX_OK; } -static void any_send(uint64_t val) +static MemTxResult any_send(uint64_t val, MemTxAttrs attrs) { uint32_t cpuid; hwaddr addr; - CPULoongArchState *env; CPUState *cs; - LoongArchCPU *cpu; cpuid = extract32(val, 16, 10); if (cpuid >= LOONGARCH_MAX_CPUS) { trace_loongarch_ipi_unsupported_cpuid("IOCSR_ANY_SEND", cpuid); - return; + return MEMTX_DECODE_ERROR; } - addr = val & 0xffff; cs = ipi_getcpu(cpuid); - cpu = LOONGARCH_CPU(cs); - env = &cpu->env; - send_ipi_data(env, val, addr); + if (cs == NULL) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + addr = val & 0xffff; + attrs.requester_id = cs->cpu_index; + send_ipi_data(&LOONGARCH_CPU(cs)->env, val, addr, attrs); + return MEMTX_OK; } -static void loongarch_ipi_writel(void *opaque, hwaddr addr, uint64_t val, - unsigned size) +static MemTxResult loongarch_ipi_writel(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) { - IPICore *s = opaque; + LoongArchIPI *ipi = opaque; + IPICore *s; int index = 0; + uint32_t cpuid; + uint8_t vector; + CPUState *cs; + s = &ipi->cpu[attrs.requester_id]; addr &= 0xff; trace_loongarch_ipi_write(size, (uint64_t)addr, val); switch (addr) { @@ -203,17 +200,34 @@ static void loongarch_ipi_writel(void *opaque, hwaddr addr, uint64_t val, s->buf[index] = val; break; case IOCSR_IPI_SEND: - ipi_send(val); + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_IPI_SEND", cpuid); + return MEMTX_DECODE_ERROR; + } + + /* IPI status vector */ + vector = extract8(val, 0, 5); + cs = ipi_getcpu(cpuid); + if (cs == NULL) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + attrs.requester_id = cs->cpu_index; + loongarch_ipi_writel(ipi, CORE_SET_OFF, BIT(vector), 4, attrs); break; default: qemu_log_mask(LOG_UNIMP, "invalid write: %x", (uint32_t)addr); break; } + + return MEMTX_OK; } static const MemoryRegionOps loongarch_ipi_ops = { - .read = loongarch_ipi_readl, - .write = loongarch_ipi_writel, + .read_with_attrs = loongarch_ipi_readl, + .write_with_attrs = loongarch_ipi_writel, .impl.min_access_size = 4, .impl.max_access_size = 4, .valid.min_access_size = 4, @@ -222,24 +236,28 @@ static const MemoryRegionOps loongarch_ipi_ops = { }; /* mail send and any send only support writeq */ -static void loongarch_ipi_writeq(void *opaque, hwaddr addr, uint64_t val, - unsigned size) +static MemTxResult loongarch_ipi_writeq(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) { + MemTxResult ret = MEMTX_OK; + addr &= 0xfff; switch (addr) { case MAIL_SEND_OFFSET: - mail_send(val); + ret = mail_send(val, attrs); break; case ANY_SEND_OFFSET: - any_send(val); + ret = any_send(val, attrs); break; default: break; } + + return ret; } static const MemoryRegionOps loongarch_ipi64_ops = { - .write = loongarch_ipi_writeq, + .write_with_attrs = loongarch_ipi_writeq, .impl.min_access_size = 8, .impl.max_access_size = 8, .valid.min_access_size = 8, @@ -247,30 +265,46 @@ static const MemoryRegionOps loongarch_ipi64_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static void loongarch_ipi_init(Object *obj) +static void loongarch_ipi_realize(DeviceState *dev, Error **errp) { - LoongArchIPI *s = LOONGARCH_IPI(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + LoongArchIPI *s = LOONGARCH_IPI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i; + + if (s->num_cpu == 0) { + error_setg(errp, "num-cpu must be at least 1"); + return; + } - memory_region_init_io(&s->ipi_iocsr_mem, obj, &loongarch_ipi_ops, - &s->ipi_core, "loongarch_ipi_iocsr", 0x48); + memory_region_init_io(&s->ipi_iocsr_mem, OBJECT(dev), &loongarch_ipi_ops, + s, "loongarch_ipi_iocsr", 0x48); /* loongarch_ipi_iocsr performs re-entrant IO through ipi_send */ s->ipi_iocsr_mem.disable_reentrancy_guard = true; sysbus_init_mmio(sbd, &s->ipi_iocsr_mem); - memory_region_init_io(&s->ipi64_iocsr_mem, obj, &loongarch_ipi64_ops, - &s->ipi_core, "loongarch_ipi64_iocsr", 0x118); + memory_region_init_io(&s->ipi64_iocsr_mem, OBJECT(dev), + &loongarch_ipi64_ops, + s, "loongarch_ipi64_iocsr", 0x118); sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem); - qdev_init_gpio_out(DEVICE(obj), &s->ipi_core.irq, 1); + + s->cpu = g_new0(IPICore, s->num_cpu); + if (s->cpu == NULL) { + error_setg(errp, "Memory allocation for ExtIOICore faile"); + return; + } + + for (i = 0; i < s->num_cpu; i++) { + qdev_init_gpio_out(dev, &s->cpu[i].irq, 1); + } } static const VMStateDescription vmstate_ipi_core = { .name = "ipi-single", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(status, IPICore), VMSTATE_UINT32(en, IPICore), VMSTATE_UINT32(set, IPICore), @@ -282,27 +316,42 @@ static const VMStateDescription vmstate_ipi_core = { static const VMStateDescription vmstate_loongarch_ipi = { .name = TYPE_LOONGARCH_IPI, - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_STRUCT(ipi_core, LoongArchIPI, 0, vmstate_ipi_core, IPICore), + .version_id = 2, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchIPI, num_cpu, + vmstate_ipi_core, IPICore), VMSTATE_END_OF_LIST() } }; +static Property ipi_properties[] = { + DEFINE_PROP_UINT32("num-cpu", LoongArchIPI, num_cpu, 1), + DEFINE_PROP_END_OF_LIST(), +}; + static void loongarch_ipi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = loongarch_ipi_realize; + device_class_set_props(dc, ipi_properties); dc->vmsd = &vmstate_loongarch_ipi; } +static void loongarch_ipi_finalize(Object *obj) +{ + LoongArchIPI *s = LOONGARCH_IPI(obj); + + g_free(s->cpu); +} + static const TypeInfo loongarch_ipi_info = { .name = TYPE_LOONGARCH_IPI, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(LoongArchIPI), - .instance_init = loongarch_ipi_init, .class_init = loongarch_ipi_class_init, + .instance_finalize = loongarch_ipi_finalize, }; static void loongarch_ipi_register_types(void) diff --git a/hw/intc/loongarch_pch_pic.c b/hw/intc/loongarch_pch_pic.c index 6aa4cadfa4a..2d5e65abfff 100644 --- a/hw/intc/loongarch_pch_pic.c +++ b/hw/intc/loongarch_pch_pic.c @@ -420,7 +420,7 @@ static const VMStateDescription vmstate_loongarch_pch_pic = { .name = TYPE_LOONGARCH_PCH_PIC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(int_mask, LoongArchPCHPIC), VMSTATE_UINT64(htmsi_en, LoongArchPCHPIC), VMSTATE_UINT64(intedge, LoongArchPCHPIC), diff --git a/hw/intc/m68k_irqc.c b/hw/intc/m68k_irqc.c index e09705eeaf1..4b11fb9f726 100644 --- a/hw/intc/m68k_irqc.c +++ b/hw/intc/m68k_irqc.c @@ -80,7 +80,7 @@ static const VMStateDescription vmstate_m68k_irqc = { .name = "m68k-irqc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(ipr, M68KIRQCState), VMSTATE_END_OF_LIST() } diff --git a/hw/intc/nios2_vic.c b/hw/intc/nios2_vic.c index cf63212a886..7e2d9d63276 100644 --- a/hw/intc/nios2_vic.c +++ b/hw/intc/nios2_vic.c @@ -275,7 +275,7 @@ static const VMStateDescription nios2_vic_vmstate = { .name = "nios2-vic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32_ARRAY(int_config, Nios2VIC, 32), VMSTATE_UINT32(vic_config, Nios2VIC), VMSTATE_UINT32(int_raw_status, Nios2VIC), diff --git a/hw/intc/ompic.c b/hw/intc/ompic.c index 1f10314807d..99032ea7f73 100644 --- a/hw/intc/ompic.c +++ b/hw/intc/ompic.c @@ -137,7 +137,7 @@ static const VMStateDescription vmstate_or1k_ompic_cpu = { .name = "or1k_ompic_cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(status, OR1KOMPICCPUState), VMSTATE_UINT32(control, OR1KOMPICCPUState), VMSTATE_END_OF_LIST() @@ -148,7 +148,7 @@ static const VMStateDescription vmstate_or1k_ompic = { .name = TYPE_OR1K_OMPIC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(cpus, OR1KOMPICState, OMPIC_MAX_CPUS, 1, vmstate_or1k_ompic_cpu, OR1KOMPICCPUState), VMSTATE_UINT32(num_cpus, OR1KOMPICState), diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c index a6f91d4bcdf..9792a112240 100644 --- a/hw/intc/openpic.c +++ b/hw/intc/openpic.c @@ -1391,7 +1391,7 @@ static const VMStateDescription vmstate_openpic_irq_queue = { .name = "openpic_irq_queue", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BITMAP(queue, IRQQueue, 0, queue_size), VMSTATE_INT32(next, IRQQueue), VMSTATE_INT32(priority, IRQQueue), @@ -1403,7 +1403,7 @@ static const VMStateDescription vmstate_openpic_irqdest = { .name = "openpic_irqdest", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(ctpr, IRQDest), VMSTATE_STRUCT(raised, IRQDest, 0, vmstate_openpic_irq_queue, IRQQueue), @@ -1418,7 +1418,7 @@ static const VMStateDescription vmstate_openpic_irqsource = { .name = "openpic_irqsource", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ivpr, IRQSource), VMSTATE_UINT32(idr, IRQSource), VMSTATE_UINT32(destmask, IRQSource), @@ -1432,7 +1432,7 @@ static const VMStateDescription vmstate_openpic_timer = { .name = "openpic_timer", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tccr, OpenPICTimer), VMSTATE_UINT32(tbcr, OpenPICTimer), VMSTATE_END_OF_LIST() @@ -1443,7 +1443,7 @@ static const VMStateDescription vmstate_openpic_msi = { .name = "openpic_msi", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(msir, OpenPICMSI), VMSTATE_END_OF_LIST() } @@ -1468,7 +1468,7 @@ static const VMStateDescription vmstate_openpic = { .version_id = 3, .minimum_version_id = 3, .post_load = openpic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(gcr, OpenPICState), VMSTATE_UINT32(vir, OpenPICState), VMSTATE_UINT32(pir, OpenPICState), diff --git a/hw/intc/pl190.c b/hw/intc/pl190.c index cd88443601e..d79e5d8076f 100644 --- a/hw/intc/pl190.c +++ b/hw/intc/pl190.c @@ -258,7 +258,7 @@ static const VMStateDescription vmstate_pl190 = { .name = "pl190", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, PL190State), VMSTATE_UINT32(soft_level, PL190State), VMSTATE_UINT32(irq_enable, PL190State), diff --git a/hw/intc/ppc-uic.c b/hw/intc/ppc-uic.c index dcf5de5d43c..9a67f7f6511 100644 --- a/hw/intc/ppc-uic.c +++ b/hw/intc/ppc-uic.c @@ -269,7 +269,7 @@ static const VMStateDescription ppc_uic_vmstate = { .name = "ppc-uic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, PPCUIC), VMSTATE_UINT32(uicsr, PPCUIC), VMSTATE_UINT32(uicer, PPCUIC), diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c index ab1a0b4b3ad..e9f0536b1c6 100644 --- a/hw/intc/riscv_aclint.c +++ b/hw/intc/riscv_aclint.c @@ -321,7 +321,7 @@ static const VMStateDescription vmstate_riscv_mtimer = { .name = "riscv_mtimer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(timecmp, RISCVAclintMTimerState, num_harts, 0, vmstate_info_uint64, uint64_t), diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c index c677b5cfbb5..fc5df0d5983 100644 --- a/hw/intc/riscv_aplic.c +++ b/hw/intc/riscv_aplic.c @@ -162,7 +162,7 @@ static bool is_kvm_aia(bool msimode) static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic, uint32_t word) { - uint32_t i, irq, ret = 0; + uint32_t i, irq, sourcecfg, sm, raw_input, irq_inverted, ret = 0; for (i = 0; i < 32; i++) { irq = word * 32 + i; @@ -170,7 +170,20 @@ static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic, continue; } - ret |= ((aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0) << i; + sourcecfg = aplic->sourcecfg[irq]; + if (sourcecfg & APLIC_SOURCECFG_D) { + continue; + } + + sm = sourcecfg & APLIC_SOURCECFG_SM_MASK; + if (sm == APLIC_SOURCECFG_SM_INACTIVE) { + continue; + } + + raw_input = (aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0; + irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW || + sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0; + ret |= (raw_input ^ irq_inverted) << i; } return ret; @@ -218,13 +231,25 @@ static void riscv_aplic_set_pending(RISCVAPLICState *aplic, } sm = sourcecfg & APLIC_SOURCECFG_SM_MASK; - if ((sm == APLIC_SOURCECFG_SM_INACTIVE) || - ((!aplic->msimode || (aplic->msimode && !pending)) && - ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) || - (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))) { + if (sm == APLIC_SOURCECFG_SM_INACTIVE) { return; } + if ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) || + (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)) { + if (!aplic->msimode || (aplic->msimode && !pending)) { + return; + } + if ((aplic->state[irq] & APLIC_ISTATE_INPUT) && + (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)) { + return; + } + if (!(aplic->state[irq] & APLIC_ISTATE_INPUT) && + (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)) { + return; + } + } + riscv_aplic_set_pending_raw(aplic, irq, pending); } @@ -463,6 +488,7 @@ static uint32_t riscv_aplic_idc_claimi(RISCVAPLICState *aplic, uint32_t idc) if (!topi) { aplic->iforce[idc] = 0; + riscv_aplic_idc_update(aplic, idc); return 0; } @@ -878,7 +904,7 @@ static const VMStateDescription vmstate_riscv_aplic = { .name = "riscv_aplic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(domaincfg, RISCVAPLICState), VMSTATE_UINT32(mmsicfgaddr, RISCVAPLICState), VMSTATE_UINT32(mmsicfgaddrH, RISCVAPLICState), diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c index b31d07980c8..b90f0d731df 100644 --- a/hw/intc/riscv_imsic.c +++ b/hw/intc/riscv_imsic.c @@ -386,7 +386,7 @@ static const VMStateDescription vmstate_riscv_imsic = { .name = "riscv_imsic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState, num_pages, 0, vmstate_info_uint32, uint32_t), diff --git a/hw/intc/rx_icu.c b/hw/intc/rx_icu.c index e5c01807b9a..b2d4338f612 100644 --- a/hw/intc/rx_icu.c +++ b/hw/intc/rx_icu.c @@ -345,7 +345,7 @@ static const VMStateDescription vmstate_rxicu = { .name = "rx-icu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(ir, RXICUState, NR_IRQS), VMSTATE_UINT8_ARRAY(dtcer, RXICUState, NR_IRQS), VMSTATE_UINT8_ARRAY(ier, RXICUState, NR_IRQS / 8), diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c index 74e02858d43..f4a848460b8 100644 --- a/hw/intc/s390_flic.c +++ b/hw/intc/s390_flic.c @@ -106,7 +106,7 @@ static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id, QEMUS390FlicIO *cur, *next; uint8_t isc; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (!(flic->pending & FLIC_PENDING_IO)) { return 0; } @@ -223,7 +223,7 @@ uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic) { uint32_t tmp; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); g_assert(flic->pending & FLIC_PENDING_SERVICE); tmp = flic->service_param; flic->service_param = 0; @@ -238,7 +238,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6) QEMUS390FlicIO *io; uint8_t isc; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) { return NULL; } @@ -262,7 +262,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6) void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); g_assert(flic->pending & FLIC_PENDING_MCHK_CR); flic->pending &= ~FLIC_PENDING_MCHK_CR; } @@ -271,7 +271,7 @@ static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm) { QEMUS390FLICState *flic = s390_get_qemu_flic(fs); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* multiplexing is good enough for sclp - kvm does it internally as well */ flic->service_param |= parm; flic->pending |= FLIC_PENDING_SERVICE; @@ -287,7 +287,7 @@ static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id, QEMUS390FLICState *flic = s390_get_qemu_flic(fs); QEMUS390FlicIO *io; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); io = g_new0(QEMUS390FlicIO, 1); io->id = subchannel_id; io->nr = subchannel_nr; @@ -304,7 +304,7 @@ static void qemu_s390_inject_crw_mchk(S390FLICState *fs) { QEMUS390FLICState *flic = s390_get_qemu_flic(fs); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); flic->pending |= FLIC_PENDING_MCHK_CR; qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR); @@ -330,7 +330,7 @@ bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic) bool qemu_s390_flic_has_any(QEMUS390FLICState *flic) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); return !!flic->pending; } @@ -340,7 +340,7 @@ static void qemu_s390_flic_reset(DeviceState *dev) QEMUS390FlicIO *cur, *next; int isc; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); flic->simm = 0; flic->nimm = 0; flic->pending = 0; @@ -366,7 +366,7 @@ static const VMStateDescription qemu_s390_flic_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = ais_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(simm, QEMUS390FLICState), VMSTATE_UINT8(nimm, QEMUS390FLICState), VMSTATE_END_OF_LIST() @@ -465,7 +465,7 @@ const VMStateDescription vmstate_adapter_info_so = { .version_id = 1, .minimum_version_id = 1, .needed = adapter_info_so_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(summary_offset, AdapterInfo), VMSTATE_END_OF_LIST() } @@ -475,7 +475,7 @@ const VMStateDescription vmstate_adapter_info = { .name = "s390_adapter_info", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ind_offset, AdapterInfo), /* * We do not have to migrate neither the id nor the addresses. @@ -484,7 +484,7 @@ const VMStateDescription vmstate_adapter_info = { */ VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_adapter_info_so, NULL } @@ -495,7 +495,7 @@ const VMStateDescription vmstate_adapter_routes = { .name = "s390_adapter_routes", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(adapter, AdapterRoutes, 1, vmstate_adapter_info, AdapterInfo), VMSTATE_END_OF_LIST() diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c index 28364b22d65..baaa30dcb73 100644 --- a/hw/intc/s390_flic_kvm.c +++ b/hw/intc/s390_flic_kvm.c @@ -525,7 +525,7 @@ static const VMStateDescription kvm_s390_flic_ais_tmp = { .name = "s390-flic-ais-tmp", .pre_save = kvm_flic_ais_pre_save, .post_load = kvm_flic_ais_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(simm, KVMS390FLICStateMigTmp), VMSTATE_UINT8(nimm, KVMS390FLICStateMigTmp), VMSTATE_END_OF_LIST() @@ -537,7 +537,7 @@ static const VMStateDescription kvm_s390_flic_vmstate_ais = { .version_id = 1, .minimum_version_id = 1, .needed = ais_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP(KVMS390FLICState, KVMS390FLICStateMigTmp, kvm_s390_flic_ais_tmp), VMSTATE_END_OF_LIST() @@ -550,7 +550,7 @@ static const VMStateDescription kvm_s390_flic_vmstate = { .name = "s390-flic", .version_id = FLIC_SAVEVM_VERSION, .minimum_version_id = FLIC_SAVEVM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { { .name = "irqs", .info = &(const VMStateInfo) { @@ -562,7 +562,7 @@ static const VMStateDescription kvm_s390_flic_vmstate = { }, VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &kvm_s390_flic_vmstate_ais, NULL } @@ -646,9 +646,10 @@ static void kvm_s390_flic_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc); + KVMS390FLICStateClass *kfsc = KVM_S390_FLIC_CLASS(oc); - KVM_S390_FLIC_CLASS(oc)->parent_realize = dc->realize; - dc->realize = kvm_s390_flic_realize; + device_class_set_parent_realize(dc, kvm_s390_flic_realize, + &kfsc->parent_realize); dc->vmsd = &kvm_s390_flic_vmstate; dc->reset = kvm_s390_flic_reset; fsc->register_io_adapter = kvm_s390_register_io_adapter; diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c index 5522ede2cf8..e559f118052 100644 --- a/hw/intc/sifive_plic.c +++ b/hw/intc/sifive_plic.c @@ -406,7 +406,7 @@ static const VMStateDescription vmstate_sifive_plic = { .name = "riscv_sifive_plic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(source_priority, SiFivePLICState, num_sources, 0, vmstate_info_uint32, uint32_t), diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c index f7e59ba6432..36b4a12f606 100644 --- a/hw/intc/slavio_intctl.c +++ b/hw/intc/slavio_intctl.c @@ -353,7 +353,7 @@ static const VMStateDescription vmstate_intctl_cpu = { .name ="slavio_intctl_cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(intreg_pending, SLAVIO_CPUINTCTLState), VMSTATE_END_OF_LIST() } @@ -364,7 +364,7 @@ static const VMStateDescription vmstate_intctl = { .version_id = 1, .minimum_version_id = 1, .post_load = vmstate_intctl_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(slaves, SLAVIO_INTCTLState, MAX_CPUS, 1, vmstate_intctl_cpu, SLAVIO_CPUINTCTLState), VMSTATE_UINT32(intregm_pending, SLAVIO_INTCTLState), diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 199c261b072..d7e56bfb20e 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -522,7 +522,7 @@ static const VMStateDescription vmstate_spapr_xive_end = { .name = TYPE_SPAPR_XIVE "/end", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(w0, XiveEND), VMSTATE_UINT32(w1, XiveEND), VMSTATE_UINT32(w2, XiveEND), @@ -539,7 +539,7 @@ static const VMStateDescription vmstate_spapr_xive_eas = { .name = TYPE_SPAPR_XIVE "/eas", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT64(w, XiveEAS), VMSTATE_END_OF_LIST() }, @@ -577,7 +577,7 @@ static const VMStateDescription vmstate_spapr_xive = { .minimum_version_id = 1, .pre_save = vmstate_spapr_xive_pre_save, .post_load = NULL, /* handled at the machine level */ - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs, vmstate_spapr_xive_eas, XiveEAS), diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 36ff71f9475..1ef29d0256a 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -14,8 +14,8 @@ cpu_get_apic_base(uint64_t val) "0x%016"PRIx64 # apic.c apic_local_deliver(int vector, uint32_t lvt) "vector %d delivery mode %d" apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t vector_num, uint8_t trigger_mode) "dest %d dest_mode %d delivery_mode %d vector %d trigger_mode %d" -apic_mem_readl(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x" -apic_mem_writel(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x" +apic_register_read(uint8_t reg, uint64_t val) "register 0x%02x = 0x%"PRIx64 +apic_register_write(uint8_t reg, uint64_t val) "register 0x%02x = 0x%"PRIx64 # ioapic.c ioapic_set_remote_irr(int n) "set remote irr for pin %d" diff --git a/hw/intc/xics.c b/hw/intc/xics.c index c77e986136e..700abfa7a62 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -40,6 +40,7 @@ #include "hw/irq.h" #include "sysemu/kvm.h" #include "sysemu/reset.h" +#include "target/ppc/cpu.h" void icp_pic_print_info(ICPState *icp, Monitor *mon) { @@ -273,7 +274,7 @@ static const VMStateDescription vmstate_icp_server = { .minimum_version_id = 1, .pre_save = icp_pre_save, .post_load = icp_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Sanity check */ VMSTATE_UINT32(xirr, ICPState), VMSTATE_UINT8(pending_priority, ICPState), @@ -665,7 +666,7 @@ static const VMStateDescription vmstate_ics_irq = { .name = "ics/irq", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(server, ICSIRQState), VMSTATE_UINT8(priority, ICSIRQState), VMSTATE_UINT8(saved_priority, ICSIRQState), @@ -681,7 +682,7 @@ static const VMStateDescription vmstate_ics = { .minimum_version_id = 1, .pre_save = ics_pre_save, .post_load = ics_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Sanity check */ VMSTATE_UINT32_EQUAL(nr_irqs, ICSState, NULL), diff --git a/hw/intc/xive.c b/hw/intc/xive.c index a3585593d8f..057b308ae92 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -798,7 +798,7 @@ static const VMStateDescription vmstate_xive_tctx = { .minimum_version_id = 1, .pre_save = vmstate_xive_tctx_pre_save, .post_load = vmstate_xive_tctx_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(regs, XiveTCTX), VMSTATE_END_OF_LIST() }, @@ -1271,7 +1271,7 @@ static const VMStateDescription vmstate_xive_source = { .name = TYPE_XIVE_SOURCE, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL), VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs), VMSTATE_END_OF_LIST() diff --git a/hw/intc/xlnx-pmu-iomod-intc.c b/hw/intc/xlnx-pmu-iomod-intc.c index acaa1c3e6f7..12bd1a3fff3 100644 --- a/hw/intc/xlnx-pmu-iomod-intc.c +++ b/hw/intc/xlnx-pmu-iomod-intc.c @@ -526,7 +526,7 @@ static const VMStateDescription vmstate_xlnx_pmu_io_intc = { .name = TYPE_XLNX_PMU_IO_INTC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxPMUIOIntc, XLNXPMUIOINTC_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/intc/xlnx-zynqmp-ipi.c b/hw/intc/xlnx-zynqmp-ipi.c index adc11790141..509ee799cc2 100644 --- a/hw/intc/xlnx-zynqmp-ipi.c +++ b/hw/intc/xlnx-zynqmp-ipi.c @@ -349,7 +349,7 @@ static const VMStateDescription vmstate_zynqmp_pmu_ipi = { .name = TYPE_XLNX_ZYNQMP_IPI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPIPI, R_XLNX_ZYNQMP_IPI_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/ipack/ipack.c b/hw/ipack/ipack.c index ae20f36da68..c39dbb481f2 100644 --- a/hw/ipack/ipack.c +++ b/hw/ipack/ipack.c @@ -93,7 +93,7 @@ const VMStateDescription vmstate_ipack_device = { .name = "ipack_device", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(slot, IPackDevice), VMSTATE_END_OF_LIST() } diff --git a/hw/ipack/tpci200.c b/hw/ipack/tpci200.c index 6b3edbf0176..88eef4b8308 100644 --- a/hw/ipack/tpci200.c +++ b/hw/ipack/tpci200.c @@ -619,7 +619,7 @@ static const VMStateDescription vmstate_tpci200 = { .name = "tpci200", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, TPCI200State), VMSTATE_BOOL_ARRAY(big_endian, TPCI200State, 3), VMSTATE_UINT8_ARRAY(ctrl, TPCI200State, N_MODULES), diff --git a/hw/ipmi/ipmi_bmc_extern.c b/hw/ipmi/ipmi_bmc_extern.c index 2117dad35a9..29c5af3cc36 100644 --- a/hw/ipmi/ipmi_bmc_extern.c +++ b/hw/ipmi/ipmi_bmc_extern.c @@ -479,7 +479,7 @@ static const VMStateDescription vmstate_ipmi_bmc_extern = { .version_id = 1, .minimum_version_id = 1, .post_load = ipmi_bmc_extern_post_migrate, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(send_reset, IPMIBmcExtern), VMSTATE_BOOL(waiting_rsp, IPMIBmcExtern), VMSTATE_END_OF_LIST() diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c index 905e091094b..33c839c65aa 100644 --- a/hw/ipmi/ipmi_bmc_sim.c +++ b/hw/ipmi/ipmi_bmc_sim.c @@ -2103,7 +2103,7 @@ static const VMStateDescription vmstate_ipmi_sim = { .name = TYPE_IPMI_BMC_SIMULATOR, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(bmc_global_enables, IPMIBmcSim), VMSTATE_UINT8(msg_flags, IPMIBmcSim), VMSTATE_BOOL(watchdog_initialized, IPMIBmcSim), diff --git a/hw/ipmi/ipmi_bt.c b/hw/ipmi/ipmi_bt.c index 22f94fb98da..583fc64730c 100644 --- a/hw/ipmi/ipmi_bt.c +++ b/hw/ipmi/ipmi_bt.c @@ -396,7 +396,7 @@ const VMStateDescription vmstate_IPMIBT = { .version_id = 1, .minimum_version_id = 1, .post_load = ipmi_bt_vmstate_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(obf_irq_set, IPMIBT), VMSTATE_BOOL(atn_irq_set, IPMIBT), VMSTATE_BOOL(irqs_enabled, IPMIBT), diff --git a/hw/ipmi/ipmi_kcs.c b/hw/ipmi/ipmi_kcs.c index a77612946a5..c15977cab4c 100644 --- a/hw/ipmi/ipmi_kcs.c +++ b/hw/ipmi/ipmi_kcs.c @@ -379,7 +379,7 @@ const VMStateDescription vmstate_IPMIKCS = { .version_id = 2, .minimum_version_id = 1, .post_load = ipmi_kcs_vmstate_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(obf_irq_set, IPMIKCS), VMSTATE_BOOL(atn_irq_set, IPMIKCS), VMSTATE_UNUSED_TEST(vmstate_kcs_before_version2, 1), /* Was use_irq */ diff --git a/hw/ipmi/isa_ipmi_bt.c b/hw/ipmi/isa_ipmi_bt.c index aec064d3cd7..7b36d514945 100644 --- a/hw/ipmi/isa_ipmi_bt.c +++ b/hw/ipmi/isa_ipmi_bt.c @@ -77,7 +77,7 @@ static const VMStateDescription vmstate_ISAIPMIBTDevice = { * because it used VMSTATE_VBUFFER_UINT32, but it did not transfer * the buffer length, so random things would happen. */ - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(bt, ISAIPMIBTDevice, 1, vmstate_IPMIBT, IPMIBT), VMSTATE_END_OF_LIST() } diff --git a/hw/ipmi/isa_ipmi_kcs.c b/hw/ipmi/isa_ipmi_kcs.c index b5dcb646166..f52b32e590b 100644 --- a/hw/ipmi/isa_ipmi_kcs.c +++ b/hw/ipmi/isa_ipmi_kcs.c @@ -76,7 +76,7 @@ static const VMStateDescription vmstate_ISAIPMIKCSDevice = { .name = TYPE_IPMI_INTERFACE, .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VSTRUCT_TEST(kcs, ISAIPMIKCSDevice, vmstate_kcs_before_version2, 0, vmstate_IPMIKCS, IPMIKCS, 1), VMSTATE_VSTRUCT_V(kcs, ISAIPMIKCSDevice, 2, vmstate_IPMIKCS, diff --git a/hw/ipmi/pci_ipmi_bt.c b/hw/ipmi/pci_ipmi_bt.c index 633931b8257..afeea6f3031 100644 --- a/hw/ipmi/pci_ipmi_bt.c +++ b/hw/ipmi/pci_ipmi_bt.c @@ -87,7 +87,7 @@ const VMStateDescription vmstate_PCIIPMIBTDevice = { .name = TYPE_IPMI_INTERFACE_PREFIX "pci-bt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCIIPMIBTDevice), VMSTATE_STRUCT(bt, PCIIPMIBTDevice, 1, vmstate_IPMIBT, IPMIBT), VMSTATE_END_OF_LIST() diff --git a/hw/ipmi/pci_ipmi_kcs.c b/hw/ipmi/pci_ipmi_kcs.c index 1a581413c26..05ba97ec58f 100644 --- a/hw/ipmi/pci_ipmi_kcs.c +++ b/hw/ipmi/pci_ipmi_kcs.c @@ -87,7 +87,7 @@ const VMStateDescription vmstate_PCIIPMIKCSDevice = { .name = TYPE_IPMI_INTERFACE_PREFIX "pci-kcs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCIIPMIKCSDevice), VMSTATE_STRUCT(kcs, PCIIPMIKCSDevice, 1, vmstate_IPMIKCS, IPMIKCS), VMSTATE_END_OF_LIST() diff --git a/hw/ipmi/smbus_ipmi.c b/hw/ipmi/smbus_ipmi.c index d0991ab7f93..56865df7dbd 100644 --- a/hw/ipmi/smbus_ipmi.c +++ b/hw/ipmi/smbus_ipmi.c @@ -299,7 +299,7 @@ static const VMStateDescription vmstate_smbus_ipmi = { .name = TYPE_SMBUS_IPMI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SMBUS_DEVICE(parent, SMBusIPMIDevice), VMSTATE_UINT8(waiting_rsp, SMBusIPMIDevice), VMSTATE_UINT32(outlen, SMBusIPMIDevice), diff --git a/hw/isa/Kconfig b/hw/isa/Kconfig index 040a18c0709..73c6470805c 100644 --- a/hw/isa/Kconfig +++ b/hw/isa/Kconfig @@ -15,9 +15,17 @@ config I82378 config ISA_SUPERIO bool - select ISA_BUS + depends on ISA_BUS select PCKBD + select PARALLEL + select SERIAL_ISA select FDC_ISA + # Some users of ISA_SUPERIO do not use it + #select IDE_ISA + +config FDC37M81X + bool + select ISA_SUPERIO config PC87312 bool @@ -26,9 +34,6 @@ config PC87312 select I8254 select I8257 select MC146818RTC - select SERIAL_ISA - select PARALLEL - select FDC_ISA select IDE_ISA config PIIX @@ -46,11 +51,10 @@ config PIIX config VT82C686 bool + select ISA_BUS select ISA_SUPERIO select ACPI select ACPI_SMBUS - select SERIAL_ISA - select FDC_ISA select USB_UHCI select APM select I8254 @@ -58,14 +62,10 @@ config VT82C686 select I8259 select IDE_VIA select MC146818RTC - select PARALLEL config SMC37C669 bool select ISA_SUPERIO - select SERIAL_ISA - select PARALLEL - select FDC_ISA config LPC_ICH9 bool diff --git a/hw/isa/apm.c b/hw/isa/apm.c index dfe9020d30b..e34edb864cd 100644 --- a/hw/isa/apm.c +++ b/hw/isa/apm.c @@ -68,7 +68,7 @@ const VMStateDescription vmstate_apm = { .name = "APM State", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(apmc, APMState), VMSTATE_UINT8(apms, APMState), VMSTATE_END_OF_LIST() diff --git a/hw/isa/fdc37m81x-superio.c b/hw/isa/fdc37m81x-superio.c new file mode 100644 index 00000000000..55e91fbca17 --- /dev/null +++ b/hw/isa/fdc37m81x-superio.c @@ -0,0 +1,32 @@ +/* + * SMS FDC37M817 Super I/O + * + * Copyright (c) 2018 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/isa/superio.h" + +static void fdc37m81x_class_init(ObjectClass *klass, void *data) +{ + ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); + + sc->serial.count = 2; /* NS16C550A */ + sc->parallel.count = 1; + sc->floppy.count = 1; /* SMSC 82077AA Compatible */ + sc->ide.count = 0; +} + +static const TypeInfo types[] = { + { + .name = TYPE_FDC37M81X_SUPERIO, + .parent = TYPE_ISA_SUPERIO, + .class_init = fdc37m81x_class_init, + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c index 203b92c2645..cbaa152a899 100644 --- a/hw/isa/i82378.c +++ b/hw/isa/i82378.c @@ -40,7 +40,7 @@ static const VMStateDescription vmstate_i82378 = { .name = "pci-i82378", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, I82378State), VMSTATE_END_OF_LIST() }, diff --git a/hw/isa/isa-superio.c b/hw/isa/isa-superio.c index 7dbfc374da3..a8c8c58ef7f 100644 --- a/hw/isa/isa-superio.c +++ b/hw/isa/isa-superio.c @@ -116,7 +116,9 @@ static void isa_superio_realize(DeviceState *dev, Error **errp) } /* Floppy disc */ - if (!k->floppy.is_enabled || k->floppy.is_enabled(sio, 0)) { + assert(k->floppy.count <= 1); + if (k->floppy.count && + (!k->floppy.is_enabled || k->floppy.is_enabled(sio, 0))) { isa = isa_new(TYPE_ISA_FDC); d = DEVICE(isa); if (k->floppy.get_iobase) { @@ -185,30 +187,12 @@ static const TypeInfo isa_superio_type_info = { .abstract = true, .class_size = sizeof(ISASuperIOClass), .class_init = isa_superio_class_init, -}; - -/* SMS FDC37M817 Super I/O */ -static void fdc37m81x_class_init(ObjectClass *klass, void *data) -{ - ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); - - sc->serial.count = 2; /* NS16C550A */ - sc->parallel.count = 1; - sc->floppy.count = 1; /* SMSC 82077AA Compatible */ - sc->ide.count = 0; -} - -static const TypeInfo fdc37m81x_type_info = { - .name = TYPE_FDC37M81X_SUPERIO, - .parent = TYPE_ISA_SUPERIO, .instance_size = sizeof(ISASuperIODevice), - .class_init = fdc37m81x_class_init, }; static void isa_superio_register_types(void) { type_register_static(&isa_superio_type_info); - type_register_static(&fdc37m81x_type_info); } type_init(isa_superio_register_types) diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c index 23eba64f226..bd727b2320b 100644 --- a/hw/isa/lpc_ich9.c +++ b/hw/isa/lpc_ich9.c @@ -41,7 +41,6 @@ #include "hw/isa/apm.h" #include "hw/pci/pci.h" #include "hw/southbridge/ich9.h" -#include "hw/i386/pc.h" #include "hw/acpi/acpi.h" #include "hw/acpi/ich9.h" #include "hw/pci/pci_bus.h" @@ -739,7 +738,7 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp) isa_bus_register_input_irqs(isa_bus, lpc->gsi); - i8257_dma_init(isa_bus, 0); + i8257_dma_init(OBJECT(d), isa_bus, 0); /* RTC */ qdev_prop_set_int32(DEVICE(&lpc->rtc), "base_year", 2000); @@ -768,7 +767,7 @@ static const VMStateDescription vmstate_ich9_rst_cnt = { .version_id = 1, .minimum_version_id = 1, .needed = ich9_rst_cnt_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(rst_cnt, ICH9LPCState), VMSTATE_END_OF_LIST() } @@ -788,7 +787,7 @@ static const VMStateDescription vmstate_ich9_smi_feat = { .version_id = 1, .minimum_version_id = 1, .needed = ich9_smi_feat_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(smi_guest_features_le, ICH9LPCState, sizeof(uint64_t)), VMSTATE_UINT8(smi_features_ok, ICH9LPCState), @@ -802,7 +801,7 @@ static const VMStateDescription vmstate_ich9_lpc = { .version_id = 1, .minimum_version_id = 1, .post_load = ich9_lpc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(d, ICH9LPCState), VMSTATE_STRUCT(apm, ICH9LPCState, 0, vmstate_apm, APMState), VMSTATE_STRUCT(pm, ICH9LPCState, 0, vmstate_ich9_pm, ICH9LPCPMRegs), @@ -810,7 +809,7 @@ static const VMStateDescription vmstate_ich9_lpc = { VMSTATE_UINT32(sci_level, ICH9LPCState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_ich9_rst_cnt, &vmstate_ich9_smi_feat, NULL diff --git a/hw/isa/meson.build b/hw/isa/meson.build index 2ab99ce0c6b..32192822170 100644 --- a/hw/isa/meson.build +++ b/hw/isa/meson.build @@ -1,4 +1,5 @@ system_ss.add(when: 'CONFIG_APM', if_true: files('apm.c')) +system_ss.add(when: 'CONFIG_FDC37M81X', if_true: files('fdc37m81x-superio.c')) system_ss.add(when: 'CONFIG_I82378', if_true: files('i82378.c')) system_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('isa-bus.c')) system_ss.add(when: 'CONFIG_ISA_SUPERIO', if_true: files('isa-superio.c')) diff --git a/hw/isa/pc87312.c b/hw/isa/pc87312.c index 8d7b8d3db2b..64dd17b537f 100644 --- a/hw/isa/pc87312.c +++ b/hw/isa/pc87312.c @@ -319,7 +319,7 @@ static const VMStateDescription vmstate_pc87312 = { .version_id = 1, .minimum_version_id = 1, .post_load = pc87312_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(read_id_step, PC87312State), VMSTATE_UINT8(selected_index, PC87312State), VMSTATE_UINT8_ARRAY(regs, PC87312State, 3), @@ -338,10 +338,10 @@ static void pc87312_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); - sc->parent_realize = dc->realize; - dc->realize = pc87312_realize; dc->reset = pc87312_reset; dc->vmsd = &vmstate_pc87312; + device_class_set_parent_realize(dc, pc87312_realize, + &sc->parent_realize); device_class_set_props(dc, pc87312_properties); sc->parallel = (ISASuperIOFuncs){ diff --git a/hw/isa/piix.c b/hw/isa/piix.c index 04ebed5b526..2d30711b178 100644 --- a/hw/isa/piix.c +++ b/hw/isa/piix.c @@ -230,7 +230,7 @@ static const VMStateDescription vmstate_piix3_rcr = { .version_id = 1, .minimum_version_id = 1, .needed = piix3_rcr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(rcr, PIIXState), VMSTATE_END_OF_LIST() } @@ -242,13 +242,13 @@ static const VMStateDescription vmstate_piix3 = { .minimum_version_id = 2, .post_load = piix_post_load, .pre_save = piix3_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PIIXState), VMSTATE_INT32_ARRAY_V(pci_irq_levels_vmstate, PIIXState, PIIX_NUM_PIRQS, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_piix3_rcr, NULL } @@ -259,7 +259,7 @@ static const VMStateDescription vmstate_piix4 = { .version_id = 3, .minimum_version_id = 2, .post_load = piix4_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PIIXState), VMSTATE_UINT8_V(rcr, PIIXState, 3), VMSTATE_END_OF_LIST() @@ -336,7 +336,7 @@ static void pci_piix_realize(PCIDevice *dev, const char *uhci_type, i8254_pit_init(isa_bus, 0x40, 0, NULL); } - i8257_dma_init(isa_bus, 0); + i8257_dma_init(OBJECT(dev), isa_bus, 0); /* RTC */ qdev_prop_set_int32(DEVICE(&d->rtc), "base_year", 2000); diff --git a/hw/isa/smc37c669-superio.c b/hw/isa/smc37c669-superio.c index 18287741cb4..d2e58c9a895 100644 --- a/hw/isa/smc37c669-superio.c +++ b/hw/isa/smc37c669-superio.c @@ -14,11 +14,6 @@ /* UARTs (compatible with NS16450 or PC16550) */ -static bool is_serial_enabled(ISASuperIODevice *sio, uint8_t index) -{ - return index < 2; -} - static uint16_t get_serial_iobase(ISASuperIODevice *sio, uint8_t index) { return index ? 0x2f8 : 0x3f8; @@ -31,11 +26,6 @@ static unsigned int get_serial_irq(ISASuperIODevice *sio, uint8_t index) /* Parallel port */ -static bool is_parallel_enabled(ISASuperIODevice *sio, uint8_t index) -{ - return index < 1; -} - static uint16_t get_parallel_iobase(ISASuperIODevice *sio, uint8_t index) { return 0x378; @@ -53,11 +43,6 @@ static unsigned int get_parallel_dma(ISASuperIODevice *sio, uint8_t index) /* Diskette controller (Software compatible with the Intel PC8477) */ -static bool is_fdc_enabled(ISASuperIODevice *sio, uint8_t index) -{ - return index < 1; -} - static uint16_t get_fdc_iobase(ISASuperIODevice *sio, uint8_t index) { return 0x3f0; @@ -79,20 +64,17 @@ static void smc37c669_class_init(ObjectClass *klass, void *data) sc->parallel = (ISASuperIOFuncs){ .count = 1, - .is_enabled = is_parallel_enabled, .get_iobase = get_parallel_iobase, .get_irq = get_parallel_irq, .get_dma = get_parallel_dma, }; sc->serial = (ISASuperIOFuncs){ .count = 2, - .is_enabled = is_serial_enabled, .get_iobase = get_serial_iobase, .get_irq = get_serial_irq, }; sc->floppy = (ISASuperIOFuncs){ .count = 1, - .is_enabled = is_fdc_enabled, .get_iobase = get_fdc_iobase, .get_irq = get_fdc_irq, .get_dma = get_fdc_dma, @@ -103,7 +85,6 @@ static void smc37c669_class_init(ObjectClass *klass, void *data) static const TypeInfo smc37c669_type_info = { .name = TYPE_SMC37C669_SUPERIO, .parent = TYPE_ISA_SUPERIO, - .instance_size = sizeof(ISASuperIODevice), .class_size = sizeof(ISASuperIOClass), .class_init = smc37c669_class_init, }; diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c index 9c2333a277d..8582ac0322e 100644 --- a/hw/isa/vt82c686.c +++ b/hw/isa/vt82c686.c @@ -15,6 +15,9 @@ #include "qemu/osdep.h" #include "hw/isa/vt82c686.h" +#include "hw/block/fdc.h" +#include "hw/char/parallel-isa.h" +#include "hw/char/serial.h" #include "hw/pci/pci.h" #include "hw/qdev-properties.h" #include "hw/ide/pci.h" @@ -82,7 +85,7 @@ static const VMStateDescription vmstate_acpi = { .version_id = 1, .minimum_version_id = 1, .post_load = vmstate_acpi_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, ViaPMState), VMSTATE_UINT16(ar.pm1.evt.sts, ViaPMState), VMSTATE_UINT16(ar.pm1.evt.en, ViaPMState), @@ -323,13 +326,24 @@ static uint64_t via_superio_cfg_read(void *opaque, hwaddr addr, unsigned size) return val; } +static void via_superio_devices_enable(ViaSuperIOState *s, uint8_t data) +{ + ISASuperIOClass *ic = ISA_SUPERIO_GET_CLASS(s); + + isa_parallel_set_enabled(s->superio.parallel[0], (data & 0x3) != 3); + for (int i = 0; i < ic->serial.count; i++) { + isa_serial_set_enabled(s->superio.serial[i], data & BIT(i + 2)); + } + isa_fdc_set_enabled(s->superio.floppy, data & BIT(4)); +} + static void via_superio_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); - sc->parent_realize = dc->realize; - dc->realize = via_superio_realize; + device_class_set_parent_realize(dc, via_superio_realize, + &sc->parent_realize); } static const TypeInfo via_superio_info = { @@ -368,7 +382,25 @@ static void vt82c686b_superio_cfg_write(void *opaque, hwaddr addr, case 0xfd ... 0xff: /* ignore write to read only registers */ return; - /* case 0xe6 ... 0xe8: Should set base port of parallel and serial */ + case 0xe2: + data &= 0x1f; + via_superio_devices_enable(sc, data); + break; + case 0xe3: + data &= 0xfc; + isa_fdc_set_iobase(sc->superio.floppy, data << 2); + break; + case 0xe6: + isa_parallel_set_iobase(sc->superio.parallel[0], data << 2); + break; + case 0xe7: + data &= 0xfe; + isa_serial_set_iobase(sc->superio.serial[0], data << 2); + break; + case 0xe8: + data &= 0xfe; + isa_serial_set_iobase(sc->superio.serial[1], data << 2); + break; default: qemu_log_mask(LOG_UNIMP, "via_superio_cfg: unimplemented register 0x%x\n", idx); @@ -395,9 +427,14 @@ static void vt82c686b_superio_reset(DeviceState *dev) /* Device ID */ vt82c686b_superio_cfg_write(s, 0, 0xe0, 1); vt82c686b_superio_cfg_write(s, 1, 0x3c, 1); - /* Function select - all disabled */ + /* + * Function select - only serial enabled + * Fuloong 2e's rescue-yl prints to the serial console w/o enabling it. This + * suggests that the serial ports are enabled by default, so override the + * datasheet. + */ vt82c686b_superio_cfg_write(s, 0, 0xe2, 1); - vt82c686b_superio_cfg_write(s, 1, 0x03, 1); + vt82c686b_superio_cfg_write(s, 1, 0x0f, 1); /* Floppy ctrl base addr 0x3f0-7 */ vt82c686b_superio_cfg_write(s, 0, 0xe3, 1); vt82c686b_superio_cfg_write(s, 1, 0xfc, 1); @@ -465,6 +502,21 @@ static void vt8231_superio_cfg_write(void *opaque, hwaddr addr, case 0xfd: /* ignore write to read only registers */ return; + case 0xf2: + data &= 0x17; + via_superio_devices_enable(sc, data); + break; + case 0xf4: + data &= 0xfe; + isa_serial_set_iobase(sc->superio.serial[0], data << 2); + break; + case 0xf6: + isa_parallel_set_iobase(sc->superio.parallel[0], data << 2); + break; + case 0xf7: + data &= 0xfc; + isa_fdc_set_iobase(sc->superio.floppy, data << 2); + break; default: qemu_log_mask(LOG_UNIMP, "via_superio_cfg: unimplemented register 0x%x\n", idx); @@ -513,12 +565,6 @@ static void vt8231_superio_init(Object *obj) VIA_SUPERIO(obj)->io_ops = &vt8231_superio_cfg_ops; } -static uint16_t vt8231_superio_serial_iobase(ISASuperIODevice *sio, - uint8_t index) -{ - return 0x2f8; /* FIXME: This should be settable via registers f2-f4 */ -} - static void vt8231_superio_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -526,7 +572,6 @@ static void vt8231_superio_class_init(ObjectClass *klass, void *data) dc->reset = vt8231_superio_reset; sc->serial.count = 1; - sc->serial.get_iobase = vt8231_superio_serial_iobase; sc->parallel.count = 1; sc->ide.count = 0; /* emulated by via-ide */ sc->floppy.count = 1; @@ -563,7 +608,7 @@ static const VMStateDescription vmstate_via = { .name = "via-isa", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, ViaISAState), VMSTATE_END_OF_LIST() } @@ -613,7 +658,7 @@ void via_isa_set_irq(PCIDevice *d, int pin, int level) ViaISAState *s = VIA_ISA(pci_get_function_0(d)); uint8_t irq = d->config[PCI_INTERRUPT_LINE], max_irq = 15; int f = PCI_FUNC(d->devfn); - uint16_t mask = BIT(f); + uint16_t mask; switch (f) { case 0: /* PIRQ/PINT inputs */ @@ -628,6 +673,7 @@ void via_isa_set_irq(PCIDevice *d, int pin, int level) } /* Keep track of the state of all sources */ + mask = BIT(f); if (level) { s->irq_state[0] |= mask; } else { @@ -686,7 +732,7 @@ static void via_isa_realize(PCIDevice *d, Error **errp) s->isa_irqs_in = i8259_init(isa_bus, *isa_irq); isa_bus_register_input_irqs(isa_bus, s->isa_irqs_in); i8254_pit_init(isa_bus, 0x40, 0, NULL); - i8257_dma_init(isa_bus, 0); + i8257_dma_init(OBJECT(d), isa_bus, 0); /* RTC */ qdev_prop_set_int32(DEVICE(&s->rtc), "base_year", 2000); diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c index ae292fc5432..e5ab1080af8 100644 --- a/hw/loongarch/acpi-build.c +++ b/hw/loongarch/acpi-build.c @@ -314,16 +314,39 @@ static void build_pci_device_aml(Aml *scope, LoongArchMachineState *lams) static void build_flash_aml(Aml *scope, LoongArchMachineState *lams) { Aml *dev, *crs; + MemoryRegion *flash_mem; - hwaddr flash_base = VIRT_FLASH_BASE; - hwaddr flash_size = VIRT_FLASH_SIZE; + hwaddr flash0_base; + hwaddr flash0_size; + + hwaddr flash1_base; + hwaddr flash1_size; + + flash_mem = pflash_cfi01_get_memory(lams->flash[0]); + flash0_base = flash_mem->addr; + flash0_size = memory_region_size(flash_mem); + + flash_mem = pflash_cfi01_get_memory(lams->flash[1]); + flash1_base = flash_mem->addr; + flash1_size = memory_region_size(flash_mem); dev = aml_device("FLS0"); aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015"))); aml_append(dev, aml_name_decl("_UID", aml_int(0))); crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(flash_base, flash_size, AML_READ_WRITE)); + aml_append(crs, aml_memory32_fixed(flash0_base, flash0_size, + AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + + dev = aml_device("FLS1"); + aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015"))); + aml_append(dev, aml_name_decl("_UID", aml_int(1))); + + crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(flash1_base, flash1_size, + AML_READ_WRITE)); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); } @@ -509,7 +532,7 @@ static void acpi_build(AcpiBuildTables *tables, MachineState *machine) " migration may not work", tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); error_printf("Try removing CPUs, NUMA nodes, memory slots" - " or PCI bridges."); + " or PCI bridges.\n"); } acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE); @@ -564,7 +587,7 @@ static const VMStateDescription vmstate_acpi_build = { .name = "acpi_build", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(patched, AcpiBuildState), VMSTATE_END_OF_LIST() }, diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c index 4b7dc67a2d7..441d764843d 100644 --- a/hw/loongarch/virt.c +++ b/hw/loongarch/virt.c @@ -54,7 +54,9 @@ struct loaderparams { const char *initrd_filename; }; -static void virt_flash_create(LoongArchMachineState *lams) +static PFlashCFI01 *virt_flash_create1(LoongArchMachineState *lams, + const char *name, + const char *alias_prop_name) { DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01); @@ -66,45 +68,78 @@ static void virt_flash_create(LoongArchMachineState *lams) qdev_prop_set_uint16(dev, "id1", 0x18); qdev_prop_set_uint16(dev, "id2", 0x00); qdev_prop_set_uint16(dev, "id3", 0x00); - qdev_prop_set_string(dev, "name", "virt.flash"); - object_property_add_child(OBJECT(lams), "virt.flash", OBJECT(dev)); - object_property_add_alias(OBJECT(lams), "pflash", + qdev_prop_set_string(dev, "name", name); + object_property_add_child(OBJECT(lams), name, OBJECT(dev)); + object_property_add_alias(OBJECT(lams), alias_prop_name, OBJECT(dev), "drive"); + return PFLASH_CFI01(dev); +} - lams->flash = PFLASH_CFI01(dev); +static void virt_flash_create(LoongArchMachineState *lams) +{ + lams->flash[0] = virt_flash_create1(lams, "virt.flash0", "pflash0"); + lams->flash[1] = virt_flash_create1(lams, "virt.flash1", "pflash1"); } -static void virt_flash_map(LoongArchMachineState *lams, - MemoryRegion *sysmem) +static void virt_flash_map1(PFlashCFI01 *flash, + hwaddr base, hwaddr size, + MemoryRegion *sysmem) { - PFlashCFI01 *flash = lams->flash; DeviceState *dev = DEVICE(flash); - hwaddr base = VIRT_FLASH_BASE; - hwaddr size = VIRT_FLASH_SIZE; + BlockBackend *blk; + hwaddr real_size = size; - assert(QEMU_IS_ALIGNED(size, VIRT_FLASH_SECTOR_SIZE)); - assert(size / VIRT_FLASH_SECTOR_SIZE <= UINT32_MAX); + blk = pflash_cfi01_get_blk(flash); + if (blk) { + real_size = blk_getlength(blk); + assert(real_size && real_size <= size); + } - qdev_prop_set_uint32(dev, "num-blocks", size / VIRT_FLASH_SECTOR_SIZE); + assert(QEMU_IS_ALIGNED(real_size, VIRT_FLASH_SECTOR_SIZE)); + assert(real_size / VIRT_FLASH_SECTOR_SIZE <= UINT32_MAX); + + qdev_prop_set_uint32(dev, "num-blocks", real_size / VIRT_FLASH_SECTOR_SIZE); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); memory_region_add_subregion(sysmem, base, sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0)); +} + +static void virt_flash_map(LoongArchMachineState *lams, + MemoryRegion *sysmem) +{ + PFlashCFI01 *flash0 = lams->flash[0]; + PFlashCFI01 *flash1 = lams->flash[1]; + virt_flash_map1(flash0, VIRT_FLASH0_BASE, VIRT_FLASH0_SIZE, sysmem); + virt_flash_map1(flash1, VIRT_FLASH1_BASE, VIRT_FLASH1_SIZE, sysmem); } static void fdt_add_flash_node(LoongArchMachineState *lams) { MachineState *ms = MACHINE(lams); char *nodename; + MemoryRegion *flash_mem; - hwaddr flash_base = VIRT_FLASH_BASE; - hwaddr flash_size = VIRT_FLASH_SIZE; + hwaddr flash0_base; + hwaddr flash0_size; - nodename = g_strdup_printf("/flash@%" PRIx64, flash_base); + hwaddr flash1_base; + hwaddr flash1_size; + + flash_mem = pflash_cfi01_get_memory(lams->flash[0]); + flash0_base = flash_mem->addr; + flash0_size = memory_region_size(flash_mem); + + flash_mem = pflash_cfi01_get_memory(lams->flash[1]); + flash1_base = flash_mem->addr; + flash1_size = memory_region_size(flash_mem); + + nodename = g_strdup_printf("/flash@%" PRIx64, flash0_base); qemu_fdt_add_subnode(ms->fdt, nodename); qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash"); qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", - 2, flash_base, 2, flash_size); + 2, flash0_base, 2, flash0_size, + 2, flash1_base, 2, flash1_size); qemu_fdt_setprop_cell(ms->fdt, nodename, "bank-width", 4); g_free(nodename); } @@ -320,10 +355,11 @@ static void virt_build_smbios(LoongArchMachineState *lams) return; } - smbios_set_defaults("QEMU", product, mc->name, false, - true, SMBIOS_ENTRY_POINT_TYPE_64); + smbios_set_defaults("QEMU", product, mc->name, true); - smbios_get_tables(ms, NULL, 0, &smbios_tables, &smbios_tables_len, + smbios_get_tables(ms, SMBIOS_ENTRY_POINT_TYPE_64, + NULL, 0, + &smbios_tables, &smbios_tables_len, &smbios_anchor, &smbios_anchor_len, &error_fatal); if (smbios_anchor) { @@ -504,9 +540,7 @@ static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState * fdt_add_uart_node(lams); /* Network init */ - for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); - } + pci_init_nic_devices(pci_bus, mc->default_nic); /* * There are some invalid guest memory access. @@ -535,9 +569,6 @@ static void loongarch_irq_init(LoongArchMachineState *lams) CPUState *cpu_state; int cpu, pin, i, start, num; - extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); - sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); - /* * The connection of interrupts: * +-----+ +---------+ +-------+ @@ -559,41 +590,42 @@ static void loongarch_irq_init(LoongArchMachineState *lams) * | UARTs | | Devices | | Devices | * +--------+ +---------+ +---------+ */ + + /* Create IPI device */ + ipi = qdev_new(TYPE_LOONGARCH_IPI); + qdev_prop_set_uint32(ipi, "num-cpu", ms->smp.cpus); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + + /* IPI iocsr memory region */ + memory_region_add_subregion(&lams->system_iocsr, SMP_IPI_MAILBOX, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 0)); + memory_region_add_subregion(&lams->system_iocsr, MAIL_SEND_ADDR, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 1)); + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { cpu_state = qemu_get_cpu(cpu); cpudev = DEVICE(cpu_state); lacpu = LOONGARCH_CPU(cpu_state); env = &(lacpu->env); - - ipi = qdev_new(TYPE_LOONGARCH_IPI); - sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + env->address_space_iocsr = &lams->as_iocsr; /* connect ipi irq to cpu irq */ - qdev_connect_gpio_out(ipi, 0, qdev_get_gpio_in(cpudev, IRQ_IPI)); - /* IPI iocsr memory region */ - memory_region_add_subregion(&env->system_iocsr, SMP_IPI_MAILBOX, - sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - 0)); - memory_region_add_subregion(&env->system_iocsr, MAIL_SEND_ADDR, - sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - 1)); - /* - * extioi iocsr memory region - * only one extioi is added on loongarch virt machine - * external device interrupt can only be routed to cpu 0-3 - */ - if (cpu < EXTIOI_CPUS) - memory_region_add_subregion(&env->system_iocsr, APIC_BASE, - sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), - cpu)); + qdev_connect_gpio_out(ipi, cpu, qdev_get_gpio_in(cpudev, IRQ_IPI)); env->ipistate = ipi; } + /* Create EXTIOI device */ + extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); + qdev_prop_set_uint32(extioi, "num-cpu", ms->smp.cpus); + sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); + memory_region_add_subregion(&lams->system_iocsr, APIC_BASE, + sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 0)); + /* * connect ext irq to the cpu irq * cpu_pin[9:2] <= intc_pin[7:0] */ - for (cpu = 0; cpu < MIN(ms->smp.cpus, EXTIOI_CPUS); cpu++) { + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { cpudev = DEVICE(qemu_get_cpu(cpu)); for (pin = 0; pin < LS3A_INTC_IP; pin++) { qdev_connect_gpio_out(extioi, (cpu * 8 + pin), @@ -641,12 +673,32 @@ static void loongarch_firmware_init(LoongArchMachineState *lams) { char *filename = MACHINE(lams)->firmware; char *bios_name = NULL; - int bios_size; + int bios_size, i; + BlockBackend *pflash_blk0; + MemoryRegion *mr; lams->bios_loaded = false; + /* Map legacy -drive if=pflash to machine properties */ + for (i = 0; i < ARRAY_SIZE(lams->flash); i++) { + pflash_cfi01_legacy_drive(lams->flash[i], + drive_get(IF_PFLASH, 0, i)); + } + virt_flash_map(lams, get_system_memory()); + pflash_blk0 = pflash_cfi01_get_blk(lams->flash[0]); + + if (pflash_blk0) { + if (filename) { + error_report("cannot use both '-bios' and '-drive if=pflash'" + "options at once"); + exit(1); + } + lams->bios_loaded = true; + return; + } + if (filename) { bios_name = qemu_find_file(QEMU_FILE_TYPE_BIOS, filename); if (!bios_name) { @@ -654,21 +706,15 @@ static void loongarch_firmware_init(LoongArchMachineState *lams) exit(1); } - bios_size = load_image_targphys(bios_name, VIRT_BIOS_BASE, VIRT_BIOS_SIZE); + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(lams->flash[0]), 0); + bios_size = load_image_mr(bios_name, mr); if (bios_size < 0) { error_report("Could not load ROM image '%s'", bios_name); exit(1); } - g_free(bios_name); - - memory_region_init_ram(&lams->bios, NULL, "loongarch.bios", - VIRT_BIOS_SIZE, &error_fatal); - memory_region_set_readonly(&lams->bios, true); - memory_region_add_subregion(get_system_memory(), VIRT_BIOS_BASE, &lams->bios); lams->bios_loaded = true; } - } static void reset_load_elf(void *opaque) @@ -733,6 +779,43 @@ static void loongarch_direct_kernel_boot(LoongArchMachineState *lams, } } +static void loongarch_qemu_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ +} + +static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) +{ + switch (addr) { + case VERSION_REG: + return 0x11ULL; + case FEATURE_REG: + return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | + 1ULL << IOCSRF_CSRIPI; + case VENDOR_REG: + return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ + case CPUNAME_REG: + return 0x303030354133ULL; /* "3A5000" */ + case MISC_FUNC_REG: + return 1ULL << IOCSRM_EXTIOI_EN; + } + return 0ULL; +} + +static const MemoryRegionOps loongarch_qemu_ops = { + .read = loongarch_qemu_read, + .write = loongarch_qemu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + static void loongarch_init(MachineState *machine) { LoongArchCPU *lacpu; @@ -761,13 +844,22 @@ static void loongarch_init(MachineState *machine) exit(1); } create_fdt(lams); - /* Init CPUs */ + /* Create IOCSR space */ + memory_region_init_io(&lams->system_iocsr, OBJECT(machine), NULL, + machine, "iocsr", UINT64_MAX); + address_space_init(&lams->as_iocsr, &lams->system_iocsr, "IOCSR"); + memory_region_init_io(&lams->iocsr_mem, OBJECT(machine), + &loongarch_qemu_ops, + machine, "iocsr_misc", 0x428); + memory_region_add_subregion(&lams->system_iocsr, 0, &lams->iocsr_mem); + + /* Init CPUs */ possible_cpus = mc->possible_cpu_arch_ids(machine); for (i = 0; i < possible_cpus->len; i++) { cpu = cpu_create(machine->cpu_type); cpu->cpu_index = i; - machine->possible_cpus->cpus[i].cpu = OBJECT(cpu); + machine->possible_cpus->cpus[i].cpu = cpu; lacpu = LOONGARCH_CPU(cpu); lacpu->phy_id = machine->possible_cpus->cpus[i].arch_id; } diff --git a/hw/m68k/mcf5206.c b/hw/m68k/mcf5206.c index a46a23538d9..183fd3cc085 100644 --- a/hw/m68k/mcf5206.c +++ b/hw/m68k/mcf5206.c @@ -148,15 +148,11 @@ static void m5206_timer_write(m5206_timer_state *s, uint32_t addr, uint32_t val) m5206_timer_update(s); } -static m5206_timer_state *m5206_timer_init(qemu_irq irq) +static void m5206_timer_init(m5206_timer_state *s, qemu_irq irq) { - m5206_timer_state *s; - - s = g_new0(m5206_timer_state, 1); s->timer = ptimer_init(m5206_timer_trigger, s, PTIMER_POLICY_LEGACY); s->irq = irq; m5206_timer_reset(s); - return s; } /* System Integration Module. */ @@ -167,7 +163,7 @@ typedef struct { M68kCPU *cpu; MemoryRegion iomem; qemu_irq *pic; - m5206_timer_state *timer[2]; + m5206_timer_state timer[2]; DeviceState *uart[2]; uint8_t scr; uint8_t icr[14]; @@ -293,9 +289,9 @@ static uint64_t m5206_mbar_read(m5206_mbar_state *s, uint16_t offset, unsigned size) { if (offset >= 0x100 && offset < 0x120) { - return m5206_timer_read(s->timer[0], offset - 0x100); + return m5206_timer_read(&s->timer[0], offset - 0x100); } else if (offset >= 0x120 && offset < 0x140) { - return m5206_timer_read(s->timer[1], offset - 0x120); + return m5206_timer_read(&s->timer[1], offset - 0x120); } else if (offset >= 0x140 && offset < 0x160) { return mcf_uart_read(s->uart[0], offset - 0x140, size); } else if (offset >= 0x180 && offset < 0x1a0) { @@ -333,10 +329,10 @@ static void m5206_mbar_write(m5206_mbar_state *s, uint16_t offset, uint64_t value, unsigned size) { if (offset >= 0x100 && offset < 0x120) { - m5206_timer_write(s->timer[0], offset - 0x100, value); + m5206_timer_write(&s->timer[0], offset - 0x100, value); return; } else if (offset >= 0x120 && offset < 0x140) { - m5206_timer_write(s->timer[1], offset - 0x120, value); + m5206_timer_write(&s->timer[1], offset - 0x120, value); return; } else if (offset >= 0x140 && offset < 0x160) { mcf_uart_write(s->uart[0], offset - 0x140, value, size); @@ -598,8 +594,8 @@ static void mcf5206_mbar_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); s->pic = qemu_allocate_irqs(m5206_mbar_set_irq, s, 14); - s->timer[0] = m5206_timer_init(s->pic[9]); - s->timer[1] = m5206_timer_init(s->pic[10]); + m5206_timer_init(&s->timer[0], s->pic[9]); + m5206_timer_init(&s->timer[1], s->pic[10]); s->uart[0] = mcf_uart_create(s->pic[12], serial_hd(0)); s->uart[1] = mcf_uart_create(s->pic[13], serial_hd(1)); } diff --git a/hw/m68k/mcf5208.c b/hw/m68k/mcf5208.c index d22d8536db1..ec14096aa43 100644 --- a/hw/m68k/mcf5208.c +++ b/hw/m68k/mcf5208.c @@ -40,6 +40,8 @@ #define PCSR_PRE_SHIFT 8 #define PCSR_PRE_MASK 0x0f00 +#define RCR_SOFTRST 0x80 + typedef struct { MemoryRegion iomem; qemu_irq irq; @@ -185,12 +187,50 @@ static const MemoryRegionOps m5208_sys_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic) +static uint64_t m5208_rcm_read(void *opaque, hwaddr addr, + unsigned size) +{ + return 0; +} + +static void m5208_rcm_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + M68kCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + switch (addr) { + case 0x0: /* RCR */ + if (value & RCR_SOFTRST) { + cpu_reset(cs); + cpu->env.aregs[7] = ldl_phys(cs->as, 0); + cpu->env.pc = ldl_phys(cs->as, 4); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, addr); + break; + } +} + +static const MemoryRegionOps m5208_rcm_ops = { + .read = m5208_rcm_read, + .write = m5208_rcm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic, + M68kCPU *cpu) { MemoryRegion *iomem = g_new(MemoryRegion, 1); + MemoryRegion *iomem_rcm = g_new(MemoryRegion, 1); m5208_timer_state *s; int i; + /* RCM */ + memory_region_init_io(iomem_rcm, NULL, &m5208_rcm_ops, cpu, + "m5208-rcm", 0x00000080); + memory_region_add_subregion(address_space, 0xfc0a0000, iomem_rcm); /* SDRAMC. */ memory_region_init_io(iomem, NULL, &m5208_sys_ops, NULL, "m5208-sys", 0x00004000); memory_region_add_subregion(address_space, 0xfc0a8000, iomem); @@ -206,16 +246,16 @@ static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic) } } -static void mcf_fec_init(MemoryRegion *sysmem, NICInfo *nd, hwaddr base, - qemu_irq *irqs) +static void mcf_fec_init(MemoryRegion *sysmem, hwaddr base, qemu_irq *irqs) { DeviceState *dev; SysBusDevice *s; int i; - qemu_check_nic_model(nd, TYPE_MCF_FEC_NET); - dev = qdev_new(TYPE_MCF_FEC_NET); - qdev_set_nic_properties(dev, nd); + dev = qemu_create_nic_device(TYPE_MCF_FEC_NET, true, NULL); + if (!dev) { + return; + } s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); @@ -265,16 +305,9 @@ static void mcf5208evb_init(MachineState *machine) mcf_uart_create_mmap(0xfc064000, pic[27], serial_hd(1)); mcf_uart_create_mmap(0xfc068000, pic[28], serial_hd(2)); - mcf5208_sys_init(address_space_mem, pic); + mcf5208_sys_init(address_space_mem, pic, cpu); - if (nb_nics > 1) { - error_report("Too many NICs"); - exit(1); - } - if (nd_table[0].used) { - mcf_fec_init(address_space_mem, &nd_table[0], - 0xfc030000, pic + 36); - } + mcf_fec_init(address_space_mem, 0xfc030000, pic + 36); g_free(pic); diff --git a/hw/m68k/next-cube.c b/hw/m68k/next-cube.c index fabd861941c..9f6f90d68b4 100644 --- a/hw/m68k/next-cube.c +++ b/hw/m68k/next-cube.c @@ -62,6 +62,7 @@ typedef struct next_dma { } next_dma; typedef struct NextRtc { + int8_t phase; uint8_t ram[32]; uint8_t command; uint8_t value; @@ -73,6 +74,12 @@ typedef struct NextRtc { struct NeXTState { MachineState parent; + MemoryRegion rom; + MemoryRegion rom2; + MemoryRegion dmamem; + MemoryRegion bmapm1; + MemoryRegion bmapm2; + next_dma dma[10]; }; @@ -90,8 +97,10 @@ struct NeXTPC { uint32_t scr1; uint32_t scr2; + uint32_t old_scr2; uint32_t int_mask; uint32_t int_status; + uint32_t led; uint8_t scsi_csr_1; uint8_t scsi_csr_2; @@ -121,49 +130,46 @@ static const uint8_t rtc_ram2[32] = { #define SCR2_RTDATA 0x4 #define SCR2_TOBCD(x) (((x / 10) << 4) + (x % 10)) -static void nextscr2_write(NeXTPC *s, uint32_t val, int size) +static void next_scr2_led_update(NeXTPC *s) { - static int led; - static int phase; - static uint8_t old_scr2; - uint8_t scr2_2; - NextRtc *rtc = &s->rtc; - - if (size == 4) { - scr2_2 = (val >> 8) & 0xFF; - } else { - scr2_2 = val & 0xFF; - } - - if (val & 0x1) { + if (s->scr2 & 0x1) { DPRINTF("fault!\n"); - led++; - if (led == 10) { + s->led++; + if (s->led == 10) { DPRINTF("LED flashing, possible fault!\n"); - led = 0; + s->led = 0; } } +} + +static void next_scr2_rtc_update(NeXTPC *s) +{ + uint8_t old_scr2, scr2_2; + NextRtc *rtc = &s->rtc; + + old_scr2 = extract32(s->old_scr2, 8, 8); + scr2_2 = extract32(s->scr2, 8, 8); if (scr2_2 & 0x1) { - /* DPRINTF("RTC %x phase %i\n", scr2_2, phase); */ - if (phase == -1) { - phase = 0; + /* DPRINTF("RTC %x phase %i\n", scr2_2, rtc->phase); */ + if (rtc->phase == -1) { + rtc->phase = 0; } /* If we are in going down clock... do something */ if (((old_scr2 & SCR2_RTCLK) != (scr2_2 & SCR2_RTCLK)) && ((scr2_2 & SCR2_RTCLK) == 0)) { - if (phase < 8) { + if (rtc->phase < 8) { rtc->command = (rtc->command << 1) | ((scr2_2 & SCR2_RTDATA) ? 1 : 0); } - if (phase >= 8 && phase < 16) { + if (rtc->phase >= 8 && rtc->phase < 16) { rtc->value = (rtc->value << 1) | ((scr2_2 & SCR2_RTDATA) ? 1 : 0); /* if we read RAM register, output RT_DATA bit */ if (rtc->command <= 0x1F) { scr2_2 = scr2_2 & (~SCR2_RTDATA); - if (rtc->ram[rtc->command] & (0x80 >> (phase - 8))) { + if (rtc->ram[rtc->command] & (0x80 >> (rtc->phase - 8))) { scr2_2 |= SCR2_RTDATA; } @@ -174,7 +180,7 @@ static void nextscr2_write(NeXTPC *s, uint32_t val, int size) if (rtc->command == 0x30) { scr2_2 = scr2_2 & (~SCR2_RTDATA); /* for now status = 0x98 (new rtc + FTU) */ - if (rtc->status & (0x80 >> (phase - 8))) { + if (rtc->status & (0x80 >> (rtc->phase - 8))) { scr2_2 |= SCR2_RTDATA; } @@ -184,7 +190,7 @@ static void nextscr2_write(NeXTPC *s, uint32_t val, int size) /* read the status 0x31 */ if (rtc->command == 0x31) { scr2_2 = scr2_2 & (~SCR2_RTDATA); - if (rtc->control & (0x80 >> (phase - 8))) { + if (rtc->control & (0x80 >> (rtc->phase - 8))) { scr2_2 |= SCR2_RTDATA; } rtc->retval = (rtc->retval << 1) | @@ -220,7 +226,7 @@ static void nextscr2_write(NeXTPC *s, uint32_t val, int size) } - if (ret & (0x80 >> (phase - 8))) { + if (ret & (0x80 >> (rtc->phase - 8))) { scr2_2 |= SCR2_RTDATA; } rtc->retval = (rtc->retval << 1) | @@ -229,8 +235,8 @@ static void nextscr2_write(NeXTPC *s, uint32_t val, int size) } - phase++; - if (phase == 16) { + rtc->phase++; + if (rtc->phase == 16) { if (rtc->command >= 0x80 && rtc->command <= 0x9F) { rtc->ram[rtc->command - 0x80] = rtc->value; } @@ -246,233 +252,172 @@ static void nextscr2_write(NeXTPC *s, uint32_t val, int size) } } else { /* else end or abort */ - phase = -1; + rtc->phase = -1; rtc->command = 0; rtc->value = 0; } - s->scr2 = val & 0xFFFF00FF; - s->scr2 |= scr2_2 << 8; - old_scr2 = scr2_2; -} -static uint32_t mmio_readb(NeXTPC *s, hwaddr addr) -{ - switch (addr) { - case 0xc000: - return (s->scr1 >> 24) & 0xFF; - case 0xc001: - return (s->scr1 >> 16) & 0xFF; - case 0xc002: - return (s->scr1 >> 8) & 0xFF; - case 0xc003: - return (s->scr1 >> 0) & 0xFF; - - case 0xd000: - return (s->scr2 >> 24) & 0xFF; - case 0xd001: - return (s->scr2 >> 16) & 0xFF; - case 0xd002: - return (s->scr2 >> 8) & 0xFF; - case 0xd003: - return (s->scr2 >> 0) & 0xFF; - case 0x14020: - DPRINTF("MMIO Read 0x4020\n"); - return 0x7f; - - default: - DPRINTF("MMIO Read B @ %"HWADDR_PRIx"\n", addr); - return 0x0; - } + s->scr2 = deposit32(s->scr2, 8, 8, scr2_2); } -static uint32_t mmio_readw(NeXTPC *s, hwaddr addr) +static uint64_t next_mmio_read(void *opaque, hwaddr addr, unsigned size) { - switch (addr) { - default: - DPRINTF("MMIO Read W @ %"HWADDR_PRIx"\n", addr); - return 0x0; - } -} + NeXTPC *s = NEXT_PC(opaque); + uint64_t val; -static uint32_t mmio_readl(NeXTPC *s, hwaddr addr) -{ switch (addr) { case 0x7000: /* DPRINTF("Read INT status: %x\n", s->int_status); */ - return s->int_status; + val = s->int_status; + break; case 0x7800: DPRINTF("MMIO Read INT mask: %x\n", s->int_mask); - return s->int_mask; - - case 0xc000: - return s->scr1; + val = s->int_mask; + break; - case 0xd000: - return s->scr2; + case 0xc000 ... 0xc003: + val = extract32(s->scr1, (4 - (addr - 0xc000) - size) << 3, + size << 3); + break; - default: - DPRINTF("MMIO Read L @ %"HWADDR_PRIx"\n", addr); - return 0x0; - } -} + case 0xd000 ... 0xd003: + val = extract32(s->scr2, (4 - (addr - 0xd000) - size) << 3, + size << 3); + break; -static void mmio_writeb(NeXTPC *s, hwaddr addr, uint32_t val) -{ - switch (addr) { - case 0xd003: - nextscr2_write(s, val, 1); + case 0x14020: + val = 0x7f; break; + default: - DPRINTF("MMIO Write B @ %x with %x\n", (unsigned int)addr, val); + val = 0; + DPRINTF("MMIO Read @ 0x%"HWADDR_PRIx" size %d\n", addr, size); + break; } + return val; } -static void mmio_writew(NeXTPC *s, hwaddr addr, uint32_t val) +static void next_mmio_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) { - DPRINTF("MMIO Write W\n"); -} + NeXTPC *s = NEXT_PC(opaque); -static void mmio_writel(NeXTPC *s, hwaddr addr, uint32_t val) -{ switch (addr) { case 0x7000: - DPRINTF("INT Status old: %x new: %x\n", s->int_status, val); + DPRINTF("INT Status old: %x new: %x\n", s->int_status, + (unsigned int)val); s->int_status = val; break; + case 0x7800: - DPRINTF("INT Mask old: %x new: %x\n", s->int_mask, val); + DPRINTF("INT Mask old: %x new: %x\n", s->int_mask, (unsigned int)val); s->int_mask = val; break; - case 0xc000: - DPRINTF("SCR1 Write: %x\n", val); - break; - case 0xd000: - nextscr2_write(s, val, 4); - break; - - default: - DPRINTF("MMIO Write l @ %x with %x\n", (unsigned int)addr, val); - } -} -static uint64_t mmio_readfn(void *opaque, hwaddr addr, unsigned size) -{ - NeXTPC *s = NEXT_PC(opaque); - - switch (size) { - case 1: - return mmio_readb(s, addr); - case 2: - return mmio_readw(s, addr); - case 4: - return mmio_readl(s, addr); - default: - g_assert_not_reached(); - } -} - -static void mmio_writefn(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - NeXTPC *s = NEXT_PC(opaque); - - switch (size) { - case 1: - mmio_writeb(s, addr, value); - break; - case 2: - mmio_writew(s, addr, value); + case 0xc000 ... 0xc003: + DPRINTF("SCR1 Write: %x\n", (unsigned int)val); + s->scr1 = deposit32(s->scr1, (4 - (addr - 0xc000) - size) << 3, + size << 3, val); break; - case 4: - mmio_writel(s, addr, value); + + case 0xd000 ... 0xd003: + s->scr2 = deposit32(s->scr2, (4 - (addr - 0xd000) - size) << 3, + size << 3, val); + next_scr2_led_update(s); + next_scr2_rtc_update(s); + s->old_scr2 = s->scr2; break; + default: - g_assert_not_reached(); + DPRINTF("MMIO Write @ 0x%"HWADDR_PRIx " with 0x%x size %u\n", addr, + (unsigned int)val, size); } } -static const MemoryRegionOps mmio_ops = { - .read = mmio_readfn, - .write = mmio_writefn, +static const MemoryRegionOps next_mmio_ops = { + .read = next_mmio_read, + .write = next_mmio_write, .valid.min_access_size = 1, .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; -static uint32_t scr_readb(NeXTPC *s, hwaddr addr) +#define SCSICSR_ENABLE 0x01 +#define SCSICSR_RESET 0x02 /* reset scsi dma */ +#define SCSICSR_FIFOFL 0x04 +#define SCSICSR_DMADIR 0x08 /* if set, scsi to mem */ +#define SCSICSR_CPUDMA 0x10 /* if set, dma enabled */ +#define SCSICSR_INTMASK 0x20 /* if set, interrupt enabled */ + +static uint64_t next_scr_readfn(void *opaque, hwaddr addr, unsigned size) { + NeXTPC *s = NEXT_PC(opaque); + uint64_t val; + switch (addr) { case 0x14108: DPRINTF("FD read @ %x\n", (unsigned int)addr); - return 0x40 | 0x04 | 0x2 | 0x1; + val = 0x40 | 0x04 | 0x2 | 0x1; + break; + case 0x14020: DPRINTF("SCSI 4020 STATUS READ %X\n", s->scsi_csr_1); - return s->scsi_csr_1; + val = s->scsi_csr_1; + break; case 0x14021: DPRINTF("SCSI 4021 STATUS READ %X\n", s->scsi_csr_2); - return 0x40; + val = 0x40; + break; /* * These 4 registers are the hardware timer, not sure which register - * is the latch instead of data, but no problems so far + * is the latch instead of data, but no problems so far. + * + * Hack: We need to have the LSB change consistently to make it work */ - case 0x1a000: - return 0xff & (clock() >> 24); - case 0x1a001: - return 0xff & (clock() >> 16); - case 0x1a002: - return 0xff & (clock() >> 8); - case 0x1a003: - /* Hack: We need to have this change consistently to make it work */ - return 0xFF & clock(); + case 0x1a000 ... 0x1a003: + val = extract32(clock(), (4 - (addr - 0x1a000) - size) << 3, + size << 3); + break; + + /* For now return dummy byte to allow the Ethernet test to timeout */ + case 0x6000: + val = 0xff; + break; default: - DPRINTF("BMAP Read B @ %x\n", (unsigned int)addr); - return 0; + DPRINTF("BMAP Read @ 0x%x size %u\n", (unsigned int)addr, size); + val = 0; + break; } -} -static uint32_t scr_readw(NeXTPC *s, hwaddr addr) -{ - DPRINTF("BMAP Read W @ %x\n", (unsigned int)addr); - return 0; + return val; } -static uint32_t scr_readl(NeXTPC *s, hwaddr addr) +static void next_scr_writefn(void *opaque, hwaddr addr, uint64_t val, + unsigned size) { - DPRINTF("BMAP Read L @ %x\n", (unsigned int)addr); - return 0; -} - -#define SCSICSR_ENABLE 0x01 -#define SCSICSR_RESET 0x02 /* reset scsi dma */ -#define SCSICSR_FIFOFL 0x04 -#define SCSICSR_DMADIR 0x08 /* if set, scsi to mem */ -#define SCSICSR_CPUDMA 0x10 /* if set, dma enabled */ -#define SCSICSR_INTMASK 0x20 /* if set, interrupt enabled */ + NeXTPC *s = NEXT_PC(opaque); -static void scr_writeb(NeXTPC *s, hwaddr addr, uint32_t value) -{ switch (addr) { case 0x14108: DPRINTF("FDCSR Write: %x\n", value); - - if (value == 0x0) { + if (val == 0x0) { /* qemu_irq_raise(s->fd_irq[0]); */ } break; + case 0x14020: /* SCSI Control Register */ - if (value & SCSICSR_FIFOFL) { + if (val & SCSICSR_FIFOFL) { DPRINTF("SCSICSR FIFO Flush\n"); /* will have to add another irq to the esp if this is needed */ /* esp_puflush_fifo(esp_g); */ - qemu_irq_pulse(s->scsi_dma); } - if (value & SCSICSR_ENABLE) { + if (val & SCSICSR_ENABLE) { DPRINTF("SCSICSR Enable\n"); /* * qemu_irq_raise(s->scsi_dma); @@ -486,17 +431,17 @@ static void scr_writeb(NeXTPC *s, hwaddr addr, uint32_t value) * s->scsi_csr_1 &= ~SCSICSR_ENABLE; */ - if (value & SCSICSR_RESET) { + if (val & SCSICSR_RESET) { DPRINTF("SCSICSR Reset\n"); /* I think this should set DMADIR. CPUDMA and INTMASK to 0 */ qemu_irq_raise(s->scsi_reset); s->scsi_csr_1 &= ~(SCSICSR_INTMASK | 0x80 | 0x1); qemu_irq_lower(s->scsi_reset); } - if (value & SCSICSR_DMADIR) { + if (val & SCSICSR_DMADIR) { DPRINTF("SCSICSR DMAdir\n"); } - if (value & SCSICSR_CPUDMA) { + if (val & SCSICSR_CPUDMA) { DPRINTF("SCSICSR CPUDMA\n"); /* qemu_irq_raise(s->scsi_dma); */ s->int_status |= 0x4000000; @@ -505,11 +450,11 @@ static void scr_writeb(NeXTPC *s, hwaddr addr, uint32_t value) s->int_status &= ~(0x4000000); /* qemu_irq_lower(s->scsi_dma); */ } - if (value & SCSICSR_INTMASK) { + if (val & SCSICSR_INTMASK) { DPRINTF("SCSICSR INTMASK\n"); /* * int_mask &= ~0x1000; - * s->scsi_csr_1 |= value; + * s->scsi_csr_1 |= val; * s->scsi_csr_1 &= ~SCSICSR_INTMASK; * if (s->scsi_queued) { * s->scsi_queued = 0; @@ -519,72 +464,28 @@ static void scr_writeb(NeXTPC *s, hwaddr addr, uint32_t value) } else { /* int_mask |= 0x1000; */ } - if (value & 0x80) { + if (val & 0x80) { /* int_mask |= 0x1000; */ /* s->scsi_csr_1 |= 0x80; */ } - DPRINTF("SCSICSR Write: %x\n", value); - /* s->scsi_csr_1 = value; */ - return; + DPRINTF("SCSICSR Write: %x\n", val); + /* s->scsi_csr_1 = val; */ + break; + /* Hardware timer latch - not implemented yet */ case 0x1a000: default: - DPRINTF("BMAP Write B @ %x with %x\n", (unsigned int)addr, value); - } -} - -static void scr_writew(NeXTPC *s, hwaddr addr, uint32_t value) -{ - DPRINTF("BMAP Write W @ %x with %x\n", (unsigned int)addr, value); -} - -static void scr_writel(NeXTPC *s, hwaddr addr, uint32_t value) -{ - DPRINTF("BMAP Write L @ %x with %x\n", (unsigned int)addr, value); -} - -static uint64_t scr_readfn(void *opaque, hwaddr addr, unsigned size) -{ - NeXTPC *s = NEXT_PC(opaque); - - switch (size) { - case 1: - return scr_readb(s, addr); - case 2: - return scr_readw(s, addr); - case 4: - return scr_readl(s, addr); - default: - g_assert_not_reached(); - } -} - -static void scr_writefn(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - NeXTPC *s = NEXT_PC(opaque); - - switch (size) { - case 1: - scr_writeb(s, addr, value); - break; - case 2: - scr_writew(s, addr, value); - break; - case 4: - scr_writel(s, addr, value); - break; - default: - g_assert_not_reached(); + DPRINTF("BMAP Write @ 0x%x with 0x%x size %u\n", (unsigned int)addr, + val, size); } } -static const MemoryRegionOps scr_ops = { - .read = scr_readfn, - .write = scr_writefn, +static const MemoryRegionOps next_scr_ops = { + .read = next_scr_readfn, + .write = next_scr_writefn, .valid.min_access_size = 1, .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; #define NEXTDMA_SCSI(x) (0x10 + x) @@ -599,59 +500,63 @@ static const MemoryRegionOps scr_ops = { #define NEXTDMA_NEXT_INIT 0x4200 #define NEXTDMA_SIZE 0x4204 -static void dma_writel(void *opaque, hwaddr addr, uint64_t value, - unsigned int size) +static void next_dma_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) { NeXTState *next_state = NEXT_MACHINE(opaque); switch (addr) { case NEXTDMA_ENRX(NEXTDMA_CSR): - if (value & DMA_DEV2M) { + if (val & DMA_DEV2M) { next_state->dma[NEXTDMA_ENRX].csr |= DMA_DEV2M; } - if (value & DMA_SETENABLE) { + if (val & DMA_SETENABLE) { /* DPRINTF("SCSI DMA ENABLE\n"); */ next_state->dma[NEXTDMA_ENRX].csr |= DMA_ENABLE; } - if (value & DMA_SETSUPDATE) { + if (val & DMA_SETSUPDATE) { next_state->dma[NEXTDMA_ENRX].csr |= DMA_SUPDATE; } - if (value & DMA_CLRCOMPLETE) { + if (val & DMA_CLRCOMPLETE) { next_state->dma[NEXTDMA_ENRX].csr &= ~DMA_COMPLETE; } - if (value & DMA_RESET) { + if (val & DMA_RESET) { next_state->dma[NEXTDMA_ENRX].csr &= ~(DMA_COMPLETE | DMA_SUPDATE | DMA_ENABLE | DMA_DEV2M); } /* DPRINTF("RXCSR \tWrite: %x\n",value); */ break; + case NEXTDMA_ENRX(NEXTDMA_NEXT_INIT): - next_state->dma[NEXTDMA_ENRX].next_initbuf = value; + next_state->dma[NEXTDMA_ENRX].next_initbuf = val; break; + case NEXTDMA_ENRX(NEXTDMA_NEXT): - next_state->dma[NEXTDMA_ENRX].next = value; + next_state->dma[NEXTDMA_ENRX].next = val; break; + case NEXTDMA_ENRX(NEXTDMA_LIMIT): - next_state->dma[NEXTDMA_ENRX].limit = value; + next_state->dma[NEXTDMA_ENRX].limit = val; break; + case NEXTDMA_SCSI(NEXTDMA_CSR): - if (value & DMA_DEV2M) { + if (val & DMA_DEV2M) { next_state->dma[NEXTDMA_SCSI].csr |= DMA_DEV2M; } - if (value & DMA_SETENABLE) { + if (val & DMA_SETENABLE) { /* DPRINTF("SCSI DMA ENABLE\n"); */ next_state->dma[NEXTDMA_SCSI].csr |= DMA_ENABLE; } - if (value & DMA_SETSUPDATE) { + if (val & DMA_SETSUPDATE) { next_state->dma[NEXTDMA_SCSI].csr |= DMA_SUPDATE; } - if (value & DMA_CLRCOMPLETE) { + if (val & DMA_CLRCOMPLETE) { next_state->dma[NEXTDMA_SCSI].csr &= ~DMA_COMPLETE; } - if (value & DMA_RESET) { + if (val & DMA_RESET) { next_state->dma[NEXTDMA_SCSI].csr &= ~(DMA_COMPLETE | DMA_SUPDATE | DMA_ENABLE | DMA_DEV2M); /* DPRINTF("SCSI DMA RESET\n"); */ @@ -660,23 +565,23 @@ static void dma_writel(void *opaque, hwaddr addr, uint64_t value, break; case NEXTDMA_SCSI(NEXTDMA_NEXT): - next_state->dma[NEXTDMA_SCSI].next = value; + next_state->dma[NEXTDMA_SCSI].next = val; break; case NEXTDMA_SCSI(NEXTDMA_LIMIT): - next_state->dma[NEXTDMA_SCSI].limit = value; + next_state->dma[NEXTDMA_SCSI].limit = val; break; case NEXTDMA_SCSI(NEXTDMA_START): - next_state->dma[NEXTDMA_SCSI].start = value; + next_state->dma[NEXTDMA_SCSI].start = val; break; case NEXTDMA_SCSI(NEXTDMA_STOP): - next_state->dma[NEXTDMA_SCSI].stop = value; + next_state->dma[NEXTDMA_SCSI].stop = val; break; case NEXTDMA_SCSI(NEXTDMA_NEXT_INIT): - next_state->dma[NEXTDMA_SCSI].next_initbuf = value; + next_state->dma[NEXTDMA_SCSI].next_initbuf = val; break; default: @@ -684,52 +589,73 @@ static void dma_writel(void *opaque, hwaddr addr, uint64_t value, } } -static uint64_t dma_readl(void *opaque, hwaddr addr, unsigned int size) +static uint64_t next_dma_read(void *opaque, hwaddr addr, unsigned int size) { NeXTState *next_state = NEXT_MACHINE(opaque); + uint64_t val; switch (addr) { case NEXTDMA_SCSI(NEXTDMA_CSR): DPRINTF("SCSI DMA CSR READ\n"); - return next_state->dma[NEXTDMA_SCSI].csr; + val = next_state->dma[NEXTDMA_SCSI].csr; + break; + case NEXTDMA_ENRX(NEXTDMA_CSR): - return next_state->dma[NEXTDMA_ENRX].csr; + val = next_state->dma[NEXTDMA_ENRX].csr; + break; + case NEXTDMA_ENRX(NEXTDMA_NEXT_INIT): - return next_state->dma[NEXTDMA_ENRX].next_initbuf; + val = next_state->dma[NEXTDMA_ENRX].next_initbuf; + break; + case NEXTDMA_ENRX(NEXTDMA_NEXT): - return next_state->dma[NEXTDMA_ENRX].next; + val = next_state->dma[NEXTDMA_ENRX].next; + break; + case NEXTDMA_ENRX(NEXTDMA_LIMIT): - return next_state->dma[NEXTDMA_ENRX].limit; + val = next_state->dma[NEXTDMA_ENRX].limit; + break; case NEXTDMA_SCSI(NEXTDMA_NEXT): - return next_state->dma[NEXTDMA_SCSI].next; + val = next_state->dma[NEXTDMA_SCSI].next; + break; + case NEXTDMA_SCSI(NEXTDMA_NEXT_INIT): - return next_state->dma[NEXTDMA_SCSI].next_initbuf; + val = next_state->dma[NEXTDMA_SCSI].next_initbuf; + break; + case NEXTDMA_SCSI(NEXTDMA_LIMIT): - return next_state->dma[NEXTDMA_SCSI].limit; + val = next_state->dma[NEXTDMA_SCSI].limit; + break; + case NEXTDMA_SCSI(NEXTDMA_START): - return next_state->dma[NEXTDMA_SCSI].start; + val = next_state->dma[NEXTDMA_SCSI].start; + break; + case NEXTDMA_SCSI(NEXTDMA_STOP): - return next_state->dma[NEXTDMA_SCSI].stop; + val = next_state->dma[NEXTDMA_SCSI].stop; + break; default: DPRINTF("DMA read @ %x\n", (unsigned int)addr); - return 0; + val = 0; } /* * once the csr's are done, subtract 0x3FEC from the addr, and that will * normalize the upper registers */ + + return val; } -static const MemoryRegionOps dma_ops = { - .read = dma_readl, - .write = dma_writel, +static const MemoryRegionOps next_dma_ops = { + .read = next_dma_read, + .write = next_dma_write, .impl.min_access_size = 4, .valid.min_access_size = 4, .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; static void next_irq(void *opaque, int number, int level) @@ -959,6 +885,7 @@ static void next_pc_reset(DeviceState *dev) /* 0x0000XX00 << vital bits */ s->scr1 = 0x00011102; s->scr2 = 0x00ff0c80; + s->old_scr2 = s->scr2; s->rtc.status = 0x90; @@ -973,9 +900,9 @@ static void next_pc_realize(DeviceState *dev, Error **errp) qdev_init_gpio_in(dev, next_irq, NEXT_NUM_IRQS); - memory_region_init_io(&s->mmiomem, OBJECT(s), &mmio_ops, s, - "next.mmio", 0xD0000); - memory_region_init_io(&s->scrmem, OBJECT(s), &scr_ops, s, + memory_region_init_io(&s->mmiomem, OBJECT(s), &next_mmio_ops, s, + "next.mmio", 0xd0000); + memory_region_init_io(&s->scrmem, OBJECT(s), &next_scr_ops, s, "next.scr", 0x20000); sysbus_init_mmio(sbd, &s->mmiomem); sysbus_init_mmio(sbd, &s->scrmem); @@ -994,9 +921,10 @@ static Property next_pc_properties[] = { static const VMStateDescription next_rtc_vmstate = { .name = "next-rtc", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { + .version_id = 2, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { + VMSTATE_INT8(phase, NextRtc), VMSTATE_UINT8_ARRAY(ram, NextRtc, 32), VMSTATE_UINT8(command, NextRtc), VMSTATE_UINT8(value, NextRtc), @@ -1009,13 +937,15 @@ static const VMStateDescription next_rtc_vmstate = { static const VMStateDescription next_pc_vmstate = { .name = "next-pc", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { + .version_id = 2, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { VMSTATE_UINT32(scr1, NeXTPC), VMSTATE_UINT32(scr2, NeXTPC), + VMSTATE_UINT32(old_scr2, NeXTPC), VMSTATE_UINT32(int_mask, NeXTPC), VMSTATE_UINT32(int_status, NeXTPC), + VMSTATE_UINT32(led, NeXTPC), VMSTATE_UINT8(scsi_csr_1, NeXTPC), VMSTATE_UINT8(scsi_csr_2, NeXTPC), VMSTATE_STRUCT(rtc, NeXTPC, 0, next_rtc_vmstate, NextRtc), @@ -1043,13 +973,9 @@ static const TypeInfo next_pc_info = { static void next_cube_init(MachineState *machine) { + NeXTState *m = NEXT_MACHINE(machine); M68kCPU *cpu; CPUM68KState *env; - MemoryRegion *rom = g_new(MemoryRegion, 1); - MemoryRegion *rom2 = g_new(MemoryRegion, 1); - MemoryRegion *dmamem = g_new(MemoryRegion, 1); - MemoryRegion *bmapm1 = g_new(MemoryRegion, 1); - MemoryRegion *bmapm2 = g_new(MemoryRegion, 1); MemoryRegion *sysmem = get_system_memory(); const char *bios_name = machine->firmware ?: ROM_FILE; DeviceState *pcdev; @@ -1084,21 +1010,23 @@ static void next_cube_init(MachineState *machine) sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 1, 0x02100000); /* BMAP memory */ - memory_region_init_ram_flags_nomigrate(bmapm1, NULL, "next.bmapmem", 64, - RAM_SHARED, &error_fatal); - memory_region_add_subregion(sysmem, 0x020c0000, bmapm1); + memory_region_init_ram_flags_nomigrate(&m->bmapm1, NULL, "next.bmapmem", + 64, RAM_SHARED, &error_fatal); + memory_region_add_subregion(sysmem, 0x020c0000, &m->bmapm1); /* The Rev_2.5_v66.bin firmware accesses it at 0x820c0020, too */ - memory_region_init_alias(bmapm2, NULL, "next.bmapmem2", bmapm1, 0x0, 64); - memory_region_add_subregion(sysmem, 0x820c0000, bmapm2); + memory_region_init_alias(&m->bmapm2, NULL, "next.bmapmem2", &m->bmapm1, + 0x0, 64); + memory_region_add_subregion(sysmem, 0x820c0000, &m->bmapm2); /* KBD */ sysbus_create_simple(TYPE_NEXTKBD, 0x0200e000, NULL); /* Load ROM here */ - memory_region_init_rom(rom, NULL, "next.rom", 0x20000, &error_fatal); - memory_region_add_subregion(sysmem, 0x01000000, rom); - memory_region_init_alias(rom2, NULL, "next.rom2", rom, 0x0, 0x20000); - memory_region_add_subregion(sysmem, 0x0, rom2); + memory_region_init_rom(&m->rom, NULL, "next.rom", 0x20000, &error_fatal); + memory_region_add_subregion(sysmem, 0x01000000, &m->rom); + memory_region_init_alias(&m->rom2, NULL, "next.rom2", &m->rom, 0x0, + 0x20000); + memory_region_add_subregion(sysmem, 0x0, &m->rom2); if (load_image_targphys(bios_name, 0x01000000, 0x20000) < 8) { if (!qtest_enabled()) { error_report("Failed to load firmware '%s'.", bios_name); @@ -1125,8 +1053,9 @@ static void next_cube_init(MachineState *machine) next_scsi_init(pcdev, cpu); /* DMA */ - memory_region_init_io(dmamem, NULL, &dma_ops, machine, "next.dma", 0x5000); - memory_region_add_subregion(sysmem, 0x02000000, dmamem); + memory_region_init_io(&m->dmamem, NULL, &next_dma_ops, machine, + "next.dma", 0x5000); + memory_region_add_subregion(sysmem, 0x02000000, &m->dmamem); } static void next_machine_class_init(ObjectClass *oc, void *data) diff --git a/hw/m68k/q800-glue.c b/hw/m68k/q800-glue.c index f413b1599a2..b5a7713863f 100644 --- a/hw/m68k/q800-glue.c +++ b/hw/m68k/q800-glue.c @@ -189,7 +189,7 @@ static const VMStateDescription vmstate_glue = { .name = "q800-glue", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(ipr, GLUEState), VMSTATE_UINT8(auxmode, GLUEState), VMSTATE_TIMER_PTR(nmi_release, GLUEState), diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c index 83d1571d02f..fa7683bf76f 100644 --- a/hw/m68k/q800.c +++ b/hw/m68k/q800.c @@ -48,6 +48,7 @@ #include "hw/display/macfb.h" #include "hw/block/swim.h" #include "net/net.h" +#include "net/util.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "sysemu/qtest.h" @@ -253,7 +254,6 @@ static void q800_machine_init(MachineState *machine) int bios_size; ram_addr_t initrd_base; int32_t initrd_size; - MemoryRegion *dp8393x_prom = g_new(MemoryRegion, 1); uint8_t *prom; int i, checksum; MacFbMode *macfb_mode; @@ -271,6 +271,8 @@ static void q800_machine_init(MachineState *machine) BusState *adb_bus; NubusBus *nubus; DriveInfo *dinfo; + NICInfo *nd; + MACAddr mac; uint8_t rng_seed[32]; linux_boot = (kernel_filename != NULL); @@ -371,13 +373,6 @@ static void q800_machine_init(MachineState *machine) /* MACSONIC */ - if (nb_nics > 1) { - error_report("q800 can only have one ethernet interface"); - exit(1); - } - - qemu_check_nic_model(&nd_table[0], "dp83932"); - /* * MacSonic driver needs an Apple MAC address * Valid prefix are: @@ -387,14 +382,21 @@ static void q800_machine_init(MachineState *machine) * 08:00:07 Apple * (Q800 use the last one) */ - nd_table[0].macaddr.a[0] = 0x08; - nd_table[0].macaddr.a[1] = 0x00; - nd_table[0].macaddr.a[2] = 0x07; - object_initialize_child(OBJECT(machine), "dp8393x", &m->dp8393x, TYPE_DP8393X); dev = DEVICE(&m->dp8393x); - qdev_set_nic_properties(dev, &nd_table[0]); + nd = qemu_find_nic_info(TYPE_DP8393X, true, "dp83932"); + if (nd) { + qdev_set_nic_properties(dev, nd); + memcpy(mac.a, nd->macaddr.a, sizeof(mac.a)); + } else { + qemu_macaddr_default_if_unset(&mac); + } + mac.a[0] = 0x08; + mac.a[1] = 0x00; + mac.a[2] = 0x07; + qdev_prop_set_macaddr(dev, "mac", mac.a); + qdev_prop_set_uint8(dev, "it_shift", 2); qdev_prop_set_bit(dev, "big_endian", true); object_property_set_link(OBJECT(dev), "dma_mr", @@ -406,16 +408,16 @@ static void q800_machine_init(MachineState *machine) sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(DEVICE(&m->glue), GLUE_IRQ_IN_SONIC)); - memory_region_init_rom(dp8393x_prom, NULL, "dp8393x-q800.prom", + memory_region_init_rom(&m->dp8393x_prom, NULL, "dp8393x-q800.prom", SONIC_PROM_SIZE, &error_fatal); memory_region_add_subregion(get_system_memory(), SONIC_PROM_BASE, - dp8393x_prom); + &m->dp8393x_prom); /* Add MAC address with valid checksum to PROM */ - prom = memory_region_get_ram_ptr(dp8393x_prom); + prom = memory_region_get_ram_ptr(&m->dp8393x_prom); checksum = 0; for (i = 0; i < 6; i++) { - prom[i] = revbit8(nd_table[0].macaddr.a[i]); + prom[i] = revbit8(mac.a[i]); checksum ^= prom[i]; } prom[7] = 0xff - checksum; diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c index 2e49e262ee0..b8e5e102e6b 100644 --- a/hw/m68k/virt.c +++ b/hw/m68k/virt.c @@ -239,9 +239,20 @@ static void virt_init(MachineState *machine) param_ptr = param_blob; BOOTINFO1(param_ptr, BI_MACHTYPE, MACH_VIRT); - BOOTINFO1(param_ptr, BI_FPUTYPE, FPU_68040); - BOOTINFO1(param_ptr, BI_MMUTYPE, MMU_68040); - BOOTINFO1(param_ptr, BI_CPUTYPE, CPU_68040); + if (m68k_feature(&cpu->env, M68K_FEATURE_M68020)) { + BOOTINFO1(param_ptr, BI_CPUTYPE, CPU_68020); + } else if (m68k_feature(&cpu->env, M68K_FEATURE_M68030)) { + BOOTINFO1(param_ptr, BI_MMUTYPE, MMU_68030); + BOOTINFO1(param_ptr, BI_CPUTYPE, CPU_68030); + } else if (m68k_feature(&cpu->env, M68K_FEATURE_M68040)) { + BOOTINFO1(param_ptr, BI_FPUTYPE, FPU_68040); + BOOTINFO1(param_ptr, BI_MMUTYPE, MMU_68040); + BOOTINFO1(param_ptr, BI_CPUTYPE, CPU_68040); + } else if (m68k_feature(&cpu->env, M68K_FEATURE_M68060)) { + BOOTINFO1(param_ptr, BI_FPUTYPE, FPU_68060); + BOOTINFO1(param_ptr, BI_MMUTYPE, MMU_68060); + BOOTINFO1(param_ptr, BI_CPUTYPE, CPU_68060); + } BOOTINFO2(param_ptr, BI_MEMCHUNK, 0, ram_size); BOOTINFO1(param_ptr, BI_VIRT_QEMU_VERSION, @@ -346,10 +357,17 @@ type_init(virt_machine_register_types) } \ type_init(machvirt_machine_##major##_##minor##_init); +static void virt_machine_9_0_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE(9, 0, true) + static void virt_machine_8_2_options(MachineClass *mc) { + virt_machine_9_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_VIRT_MACHINE(8, 2, true) +DEFINE_VIRT_MACHINE(8, 2, false) static void virt_machine_8_1_options(MachineClass *mc) { diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index 52647b4ac7b..b0a7e9f11b6 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -42,21 +42,18 @@ enum { CT3_CDAT_NUM_ENTRIES }; -static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, - int dsmad_handle, MemoryRegion *mr, - bool is_pmem, uint64_t dpa_base) +static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, + int dsmad_handle, MemoryRegion *mr, + bool is_pmem, uint64_t dpa_base) { - g_autofree CDATDsmas *dsmas = NULL; - g_autofree CDATDslbis *dslbis0 = NULL; - g_autofree CDATDslbis *dslbis1 = NULL; - g_autofree CDATDslbis *dslbis2 = NULL; - g_autofree CDATDslbis *dslbis3 = NULL; - g_autofree CDATDsemts *dsemts = NULL; + CDATDsmas *dsmas; + CDATDslbis *dslbis0; + CDATDslbis *dslbis1; + CDATDslbis *dslbis2; + CDATDslbis *dslbis3; + CDATDsemts *dsemts; dsmas = g_malloc(sizeof(*dsmas)); - if (!dsmas) { - return -ENOMEM; - } *dsmas = (CDATDsmas) { .header = { .type = CDAT_TYPE_DSMAS, @@ -70,9 +67,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, /* For now, no memory side cache, plausiblish numbers */ dslbis0 = g_malloc(sizeof(*dslbis0)); - if (!dslbis0) { - return -ENOMEM; - } *dslbis0 = (CDATDslbis) { .header = { .type = CDAT_TYPE_DSLBIS, @@ -86,9 +80,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, }; dslbis1 = g_malloc(sizeof(*dslbis1)); - if (!dslbis1) { - return -ENOMEM; - } *dslbis1 = (CDATDslbis) { .header = { .type = CDAT_TYPE_DSLBIS, @@ -102,9 +93,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, }; dslbis2 = g_malloc(sizeof(*dslbis2)); - if (!dslbis2) { - return -ENOMEM; - } *dslbis2 = (CDATDslbis) { .header = { .type = CDAT_TYPE_DSLBIS, @@ -118,9 +106,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, }; dslbis3 = g_malloc(sizeof(*dslbis3)); - if (!dslbis3) { - return -ENOMEM; - } *dslbis3 = (CDATDslbis) { .header = { .type = CDAT_TYPE_DSLBIS, @@ -134,9 +119,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, }; dsemts = g_malloc(sizeof(*dsemts)); - if (!dsemts) { - return -ENOMEM; - } *dsemts = (CDATDsemts) { .header = { .type = CDAT_TYPE_DSEMTS, @@ -153,14 +135,12 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, }; /* Header always at start of structure */ - cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas); - cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0); - cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1); - cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2); - cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3); - cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts); - - return 0; + cdat_table[CT3_CDAT_DSMAS] = (CDATSubHeader *)dsmas; + cdat_table[CT3_CDAT_DSLBIS0] = (CDATSubHeader *)dslbis0; + cdat_table[CT3_CDAT_DSLBIS1] = (CDATSubHeader *)dslbis1; + cdat_table[CT3_CDAT_DSLBIS2] = (CDATSubHeader *)dslbis2; + cdat_table[CT3_CDAT_DSLBIS3] = (CDATSubHeader *)dslbis3; + cdat_table[CT3_CDAT_DSEMTS] = (CDATSubHeader *)dsemts; } static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) @@ -171,7 +151,6 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) int dsmad_handle = 0; int cur_ent = 0; int len = 0; - int rc, i; if (!ct3d->hostpmem && !ct3d->hostvmem) { return 0; @@ -194,27 +173,18 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) } table = g_malloc0(len * sizeof(*table)); - if (!table) { - return -ENOMEM; - } /* Now fill them in */ if (volatile_mr) { - rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr, - false, 0); - if (rc < 0) { - return rc; - } + ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr, + false, 0); cur_ent = CT3_CDAT_NUM_ENTRIES; } if (nonvolatile_mr) { uint64_t base = volatile_mr ? memory_region_size(volatile_mr) : 0; - rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++, - nonvolatile_mr, true, base); - if (rc < 0) { - goto error_cleanup; - } + ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++, + nonvolatile_mr, true, base); cur_ent += CT3_CDAT_NUM_ENTRIES; } assert(len == cur_ent); @@ -222,11 +192,6 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) *cdat_table = g_steal_pointer(&table); return len; -error_cleanup: - for (i = 0; i < cur_ent; i++) { - g_free(table[i]); - } - return rc; } static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv) @@ -354,7 +319,7 @@ static void build_dvsecs(CXLType3Dev *ct3d) cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, PCIE_CXL_DEVICE_DVSEC_LENGTH, PCIE_CXL_DEVICE_DVSEC, - PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec); + PCIE_CXL31_DEVICE_DVSEC_REVID, dvsec); dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){ .rsvd = 0, @@ -381,9 +346,9 @@ static void build_dvsecs(CXLType3Dev *ct3d) .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */ }; cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, - PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0, + PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH, PCIE_FLEXBUS_PORT_DVSEC, - PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec); + PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec); } static void hdm_decoder_commit(CXLType3Dev *ct3d, int which) @@ -680,6 +645,7 @@ static DOEProtocol doe_cdat_prot[] = { static void ct3_realize(PCIDevice *pci_dev, Error **errp) { + ERRP_GUARD(); CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; ComponentRegisters *regs = &cxl_cstate->crb; @@ -829,8 +795,13 @@ static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa) } if (((uint64_t)host_addr < decoder_base) || (hpa_offset >= decoder_size)) { - dpa_base += decoder_size / - cxl_interleave_ways_dec(iw, &error_fatal); + int decoded_iw = cxl_interleave_ways_dec(iw, &error_fatal); + + if (decoded_iw == 0) { + return false; + } + + dpa_base += decoder_size / decoded_iw; continue; } @@ -1168,9 +1139,6 @@ void qmp_cxl_inject_uncorrectable_errors(const char *path, } cxl_err = g_malloc0(sizeof(*cxl_err)); - if (!cxl_err) { - return; - } cxl_err->type = cxl_err_code; while (header && header_count < 32) { diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c index a1b1af26bc4..e098585cda8 100644 --- a/hw/mem/memory-device.c +++ b/hw/mem/memory-device.c @@ -374,6 +374,20 @@ void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms, goto out; } + /* + * We always want the memory region size to be multiples of the memory + * region alignment: for example, DIMMs with 1G+1byte size don't make + * any sense. Note that we don't check that the size is multiples + * of any additional alignment requirements the memory device might + * have when it comes to the address in physical address space. + */ + if (!QEMU_IS_ALIGNED(memory_region_size(mr), + memory_region_get_alignment(mr))) { + error_setg(errp, "backend memory size must be multiple of 0x%" + PRIx64, memory_region_get_alignment(mr)); + return; + } + if (legacy_align) { align = *legacy_align; } else { diff --git a/hw/mem/meson.build b/hw/mem/meson.build index ec26ef55443..faee1fe9360 100644 --- a/hw/mem/meson.build +++ b/hw/mem/meson.build @@ -5,7 +5,6 @@ mem_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_mc.c')) mem_ss.add(when: 'CONFIG_NVDIMM', if_true: files('nvdimm.c')) mem_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_true: files('cxl_type3.c')) system_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_false: files('cxl_type3_stubs.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('cxl_type3_stubs.c')) system_ss.add_all(when: 'CONFIG_MEM_DEVICE', if_true: mem_ss) diff --git a/hw/meson.build b/hw/meson.build index f01fac4617c..463d7026830 100644 --- a/hw/meson.build +++ b/hw/meson.build @@ -44,6 +44,7 @@ subdir('virtio') subdir('watchdog') subdir('xen') subdir('xenpv') +subdir('fsi') subdir('alpha') subdir('arm') diff --git a/hw/microblaze/petalogix_ml605_mmu.c b/hw/microblaze/petalogix_ml605_mmu.c index fb7889cf67c..0f5fabc32e3 100644 --- a/hw/microblaze/petalogix_ml605_mmu.c +++ b/hw/microblaze/petalogix_ml605_mmu.c @@ -133,7 +133,6 @@ petalogix_ml605_init(MachineState *machine) sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]); /* axi ethernet and dma initialization. */ - qemu_check_nic_model(&nd_table[0], "xlnx.axi-ethernet"); eth0 = qdev_new("xlnx.axi-ethernet"); dma = qdev_new("xlnx.axi-dma"); @@ -145,7 +144,7 @@ petalogix_ml605_init(MachineState *machine) "axistream-connected-target", NULL); cs = object_property_get_link(OBJECT(dma), "axistream-control-connected-target", NULL); - qdev_set_nic_properties(eth0, &nd_table[0]); + qemu_configure_nic_device(eth0, true, NULL); qdev_prop_set_uint32(eth0, "rxmem", 0x1000); qdev_prop_set_uint32(eth0, "txmem", 0x1000); object_property_set_link(OBJECT(eth0), "axistream-connected", ds, diff --git a/hw/microblaze/petalogix_s3adsp1800_mmu.c b/hw/microblaze/petalogix_s3adsp1800_mmu.c index 505639c2980..dad46bd7f98 100644 --- a/hw/microblaze/petalogix_s3adsp1800_mmu.c +++ b/hw/microblaze/petalogix_s3adsp1800_mmu.c @@ -114,9 +114,8 @@ petalogix_s3adsp1800_init(MachineState *machine) sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]); - qemu_check_nic_model(&nd_table[0], "xlnx.xps-ethernetlite"); dev = qdev_new("xlnx.xps-ethernetlite"); - qdev_set_nic_properties(dev, &nd_table[0]); + qemu_configure_nic_device(dev, true, NULL); qdev_prop_set_uint32(dev, "tx-ping-pong", 0); qdev_prop_set_uint32(dev, "rx-ping-pong", 0); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/microblaze/xlnx-zynqmp-pmu.c b/hw/microblaze/xlnx-zynqmp-pmu.c index 5a2016672a3..1bfc9641d29 100644 --- a/hw/microblaze/xlnx-zynqmp-pmu.c +++ b/hw/microblaze/xlnx-zynqmp-pmu.c @@ -125,6 +125,8 @@ static void xlnx_zynqmp_pmu_soc_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); + /* xlnx-zynqmp-pmu-soc causes crashes when cold-plugged twice */ + dc->user_creatable = false; dc->realize = xlnx_zynqmp_pmu_soc_realize; } diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig index 505381a0bba..5c83ef49cf6 100644 --- a/hw/mips/Kconfig +++ b/hw/mips/Kconfig @@ -1,13 +1,19 @@ config MALTA bool + imply PCNET_PCI + imply PCI_DEVICES + imply TEST_DEVICES + select FDC37M81X select GT64120 - select ISA_SUPERIO + select MIPS_CPS select PIIX + select PFLASH_CFI01 + select SERIAL + select SMBUS_EEPROM config MIPSSIM bool - select ISA_BUS - select SERIAL_ISA + select SERIAL select MIPSNET config JAZZ @@ -32,17 +38,26 @@ config JAZZ config FULOONG bool + imply PCI_DEVICES + imply TEST_DEVICES + imply ATI_VGA + imply RTL8139_PCI select PCI_BONITO + select SMBUS_EEPROM select VT82C686 config LOONGSON3V bool + imply PCI_DEVICES + imply TEST_DEVICES + imply VIRTIO_PCI + imply VIRTIO_NET imply VIRTIO_VGA imply QXL if SPICE + imply USB_OHCI_PCI select SERIAL select GOLDFISH_RTC select LOONGSON_LIOINTC - select PCI_DEVICES select PCI_EXPRESS_GENERIC_BRIDGE select MSI_NONBROKEN select FW_CFG_MIPS @@ -50,11 +65,12 @@ config LOONGSON3V config MIPS_CPS bool - select PTIMER select MIPS_ITU config MIPS_BOSTON bool + imply PCI_DEVICES + imply TEST_DEVICES select FITLOADER select MIPS_CPS select PCI_EXPRESS_XILINX diff --git a/hw/mips/boston.c b/hw/mips/boston.c index 4e11ff6cd6d..1b44fb354c4 100644 --- a/hw/mips/boston.c +++ b/hw/mips/boston.c @@ -24,7 +24,7 @@ #include "hw/boards.h" #include "hw/char/serial.h" #include "hw/ide/pci.h" -#include "hw/ide/ahci.h" +#include "hw/ide/ahci-pci.h" #include "hw/loader.h" #include "hw/loader-fit.h" #include "hw/mips/bootloader.h" @@ -677,7 +677,8 @@ static void boston_mach_init(MachineState *machine) MemoryRegion *flash, *ddr_low_alias, *lcd, *platreg; MemoryRegion *sys_mem = get_system_memory(); XilinxPCIEHost *pcie2; - PCIDevice *ahci; + PCIDevice *pdev; + AHCIPCIState *ich9; DriveInfo *hd[6]; Chardev *chr; int fw_size, fit_err; @@ -769,11 +770,12 @@ static void boston_mach_init(MachineState *machine) qemu_chr_fe_set_handlers(&s->lcd_display, NULL, NULL, boston_lcd_event, NULL, s, NULL, true); - ahci = pci_create_simple_multifunction(&PCI_BRIDGE(&pcie2->root)->sec_bus, + pdev = pci_create_simple_multifunction(&PCI_BRIDGE(&pcie2->root)->sec_bus, PCI_DEVFN(0, 0), TYPE_ICH9_AHCI); - g_assert(ARRAY_SIZE(hd) == ahci_get_num_ports(ahci)); - ide_drive_get(hd, ahci_get_num_ports(ahci)); - ahci_ide_create_devs(ahci, hd); + ich9 = ICH9_AHCI(pdev); + g_assert(ARRAY_SIZE(hd) == ich9->ahci.ports); + ide_drive_get(hd, ich9->ahci.ports); + ahci_ide_create_devs(&ich9->ahci, hd); if (machine->firmware) { fw_size = load_image_targphys(machine->firmware, diff --git a/hw/mips/cps.c b/hw/mips/cps.c index b6612c1762e..07b73b0a1f4 100644 --- a/hw/mips/cps.c +++ b/hw/mips/cps.c @@ -78,10 +78,9 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) CPUMIPSState *env = &cpu->env; /* All VPs are halted on reset. Leave powering up to CPC. */ - if (!object_property_set_bool(OBJECT(cpu), "start-powered-off", true, - errp)) { - return; - } + object_property_set_bool(OBJECT(cpu), "start-powered-off", true, + &error_abort); + /* All cores use the same clock tree */ qdev_connect_clock_in(DEVICE(cpu), "clk-in", s->clock); @@ -97,7 +96,6 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) itu_present = true; /* Attach ITC Tag to the VP */ env->itc_tag = mips_itu_get_tag_region(&s->itu); - env->itu = &s->itu; } qemu_register_reset(main_cpu_reset, cpu); } @@ -105,8 +103,6 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) /* Inter-Thread Communication Unit */ if (itu_present) { object_initialize_child(OBJECT(dev), "itu", &s->itu, TYPE_MIPS_ITU); - object_property_set_link(OBJECT(&s->itu), "cpu[0]", - OBJECT(first_cpu), &error_abort); object_property_set_uint(OBJECT(&s->itu), "num-fifo", 16, &error_abort); object_property_set_uint(OBJECT(&s->itu), "num-semaphores", 16, diff --git a/hw/mips/fuloong2e.c b/hw/mips/fuloong2e.c index 97b2c8ed8e4..a45aac368c3 100644 --- a/hw/mips/fuloong2e.c +++ b/hw/mips/fuloong2e.c @@ -201,19 +201,9 @@ static void main_cpu_reset(void *opaque) /* Network support */ static void network_init(PCIBus *pci_bus) { - int i; - - for (i = 0; i < nb_nics; i++) { - NICInfo *nd = &nd_table[i]; - const char *default_devaddr = NULL; - - if (i == 0 && (!nd->model || strcmp(nd->model, "rtl8139") == 0)) { - /* The Fuloong board has a RTL8139 card using PCI SLOT 7 */ - default_devaddr = "07"; - } - - pci_nic_init_nofail(nd, pci_bus, "rtl8139", default_devaddr); - } + /* The Fuloong board has a RTL8139 card using PCI SLOT 7 */ + pci_init_nic_in_slot(pci_bus, "rtl8139", NULL, "07"); + pci_init_nic_devices(pci_bus, "rtl8139"); } static void mips_fuloong2e_init(MachineState *machine) diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c index d33a76ad4d1..1bc17e69d3a 100644 --- a/hw/mips/jazz.c +++ b/hw/mips/jazz.c @@ -36,7 +36,6 @@ #include "hw/boards.h" #include "net/net.h" #include "hw/scsi/esp.h" -#include "hw/mips/bios.h" #include "hw/loader.h" #include "hw/rtc/mc146818rtc.h" #include "hw/timer/i8254.h" @@ -53,12 +52,19 @@ #ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" #endif /* CONFIG_TCG */ +#include "cpu.h" enum jazz_model_e { JAZZ_MAGNUM, JAZZ_PICA61, }; +#if TARGET_BIG_ENDIAN +#define BIOS_FILENAME "mips_bios.bin" +#else +#define BIOS_FILENAME "mipsel_bios.bin" +#endif + static void main_cpu_reset(void *opaque) { MIPSCPU *cpu = opaque; @@ -113,15 +119,19 @@ static const MemoryRegionOps dma_dummy_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -static void mips_jazz_init_net(NICInfo *nd, IOMMUMemoryRegion *rc4030_dma_mr, +static void mips_jazz_init_net(IOMMUMemoryRegion *rc4030_dma_mr, DeviceState *rc4030, MemoryRegion *dp8393x_prom) { DeviceState *dev; SysBusDevice *sysbus; int checksum, i; uint8_t *prom; + NICInfo *nd; - qemu_check_nic_model(nd, "dp83932"); + nd = qemu_find_nic_info("dp8393x", true, "dp82932"); + if (!nd) { + return; + } dev = qdev_new("dp8393x"); qdev_set_nic_properties(dev, nd); @@ -147,6 +157,8 @@ static void mips_jazz_init_net(NICInfo *nd, IOMMUMemoryRegion *rc4030_dma_mr, prom[7] = 0xff - checksum; } +#define BIOS_SIZE (4 * MiB) + #define MAGNUM_BIOS_SIZE_MAX 0x7e000 #define MAGNUM_BIOS_SIZE \ (BIOS_SIZE < MAGNUM_BIOS_SIZE_MAX ? BIOS_SIZE : MAGNUM_BIOS_SIZE_MAX) @@ -277,7 +289,7 @@ static void mips_jazz_init(MachineState *machine, /* ISA devices */ i8259 = i8259_init(isa_bus, env->irq[4]); isa_bus_register_input_irqs(isa_bus, i8259); - i8257_dma_init(isa_bus, 0); + i8257_dma_init(OBJECT(rc4030), isa_bus, 0); pit = i8254_pit_init(isa_bus, 0x40, 0, NULL); pcspk = isa_new(TYPE_PC_SPEAKER); object_property_set_link(OBJECT(pcspk), "pit", OBJECT(pit), &error_fatal); @@ -316,12 +328,7 @@ static void mips_jazz_init(MachineState *machine, } /* Network controller */ - if (nb_nics == 1) { - mips_jazz_init_net(&nd_table[0], rc4030_dma_mr, rc4030, dp8393x_prom); - } else if (nb_nics > 1) { - error_report("This machine only supports one NIC"); - exit(1); - } + mips_jazz_init_net(rc4030_dma_mr, rc4030, dp8393x_prom); /* SCSI adapter */ dev = qdev_new(TYPE_SYSBUS_ESP); diff --git a/hw/mips/loongson3_bootp.h b/hw/mips/loongson3_bootp.h index d525ab745a6..1b0dd3b5917 100644 --- a/hw/mips/loongson3_bootp.h +++ b/hw/mips/loongson3_bootp.h @@ -25,7 +25,7 @@ struct efi_memory_map_loongson { uint16_t vers; /* version of efi_memory_map */ uint32_t nr_map; /* number of memory_maps */ - uint32_t mem_freq; /* memory frequence */ + uint32_t mem_freq; /* memory frequency */ struct mem_map { uint32_t node_id; /* node_id which memory attached to */ uint32_t mem_type; /* system memory, pci memory, pci io, etc. */ @@ -156,7 +156,7 @@ struct board_devices { struct loongson_special_attribute { uint16_t vers; /* version of this special */ - char special_name[64]; /* special_atribute_name */ + char special_name[64]; /* special_attribute_name */ uint32_t loongson_special_type; /* type of special device */ /* for each device's resource */ struct resource_loongson resource[MAX_RESOURCE_NUMBER]; diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c index 33eae01eca2..b10a611a98f 100644 --- a/hw/mips/loongson3_virt.c +++ b/hw/mips/loongson3_virt.c @@ -446,14 +446,16 @@ static inline void loongson3_virt_devices_init(MachineState *machine, pci_vga_init(pci_bus); if (defaults_enabled() && object_class_by_name("pci-ohci")) { + USBBus *usb_bus; + pci_create_simple(pci_bus, -1, "pci-ohci"); - usb_create_simple(usb_bus_find(-1), "usb-kbd"); - usb_create_simple(usb_bus_find(-1), "usb-tablet"); + usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS, + &error_abort)); + usb_create_simple(usb_bus, "usb-kbd"); + usb_create_simple(usb_bus, "usb-tablet"); } - for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); - } + pci_init_nic_devices(pci_bus, mc->default_nic); } static void mips_loongson3_virt_init(MachineState *machine) diff --git a/hw/mips/malta.c b/hw/mips/malta.c index 049de46a9e1..af74008c827 100644 --- a/hw/mips/malta.c +++ b/hw/mips/malta.c @@ -40,7 +40,6 @@ #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" #include "qemu/log.h" -#include "hw/mips/bios.h" #include "hw/ide/pci.h" #include "hw/irq.h" #include "hw/loader.h" @@ -59,6 +58,7 @@ #include "hw/qdev-clock.h" #include "target/mips/internal.h" #include "trace.h" +#include "cpu.h" #define ENVP_PADDR 0x2000 #define ENVP_VADDR cpu_mips_phys_to_kseg0(NULL, ENVP_PADDR) @@ -71,6 +71,7 @@ #define RESET_ADDRESS 0x1fc00000ULL #define FLASH_SIZE 0x400000 +#define BIOS_SIZE (4 * MiB) #define PIIX4_PCI_DEVFN PCI_DEVFN(10, 0) @@ -91,6 +92,12 @@ typedef struct { bool display_inited; } MaltaFPGAState; +#if TARGET_BIG_ENDIAN +#define BIOS_FILENAME "mips_bios.bin" +#else +#define BIOS_FILENAME "mipsel_bios.bin" +#endif + #define TYPE_MIPS_MALTA "mips-malta" OBJECT_DECLARE_SIMPLE_TYPE(MaltaState, MIPS_MALTA) @@ -605,18 +612,9 @@ static MaltaFPGAState *malta_fpga_init(MemoryRegion *address_space, /* Network support */ static void network_init(PCIBus *pci_bus) { - int i; - - for (i = 0; i < nb_nics; i++) { - NICInfo *nd = &nd_table[i]; - const char *default_devaddr = NULL; - - if (i == 0 && (!nd->model || strcmp(nd->model, "pcnet") == 0)) - /* The malta board has a PCNet card using PCI SLOT 11 */ - default_devaddr = "0b"; - - pci_nic_init_nofail(nd, pci_bus, "pcnet", default_devaddr); - } + /* The malta board has a PCNet card using PCI SLOT 11 */ + pci_init_nic_in_slot(pci_bus, "pcnet", NULL, "0b"); + pci_init_nic_devices(pci_bus, "pcnet"); } static void bl_setup_gt64120_jump_kernel(void **p, uint64_t run_addr, diff --git a/hw/mips/meson.build b/hw/mips/meson.build index 900613fc087..f06d88f3430 100644 --- a/hw/mips/meson.build +++ b/hw/mips/meson.build @@ -5,7 +5,7 @@ mips_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_bootp.c', 'loon mips_ss.add(when: 'CONFIG_MALTA', if_true: files('malta.c')) mips_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('cps.c')) -if 'CONFIG_TCG' in config_all +if 'CONFIG_TCG' in config_all_accel mips_ss.add(when: 'CONFIG_JAZZ', if_true: files('jazz.c')) mips_ss.add(when: 'CONFIG_MIPSSIM', if_true: files('mipssim.c')) mips_ss.add(when: 'CONFIG_FULOONG', if_true: files('fuloong2e.c')) diff --git a/hw/mips/mips_int.c b/hw/mips/mips_int.c index 6c32e466a3b..eef2fd2cd11 100644 --- a/hw/mips/mips_int.c +++ b/hw/mips/mips_int.c @@ -36,7 +36,7 @@ static void cpu_mips_irq_request(void *opaque, int irq, int level) return; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); if (level) { env->CP0_Cause |= 1 << (irq + CP0Ca_IP); diff --git a/hw/mips/mipssim.c b/hw/mips/mipssim.c index 4f743f37eb5..9170d6c474b 100644 --- a/hw/mips/mipssim.c +++ b/hw/mips/mipssim.c @@ -28,14 +28,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/datadir.h" +#include "exec/address-spaces.h" #include "hw/clock.h" #include "hw/mips/mips.h" #include "hw/char/serial.h" -#include "hw/isa/isa.h" #include "net/net.h" #include "sysemu/sysemu.h" #include "hw/boards.h" -#include "hw/mips/bios.h" #include "hw/loader.h" #include "elf.h" #include "hw/sysbus.h" @@ -43,6 +42,15 @@ #include "qemu/error-report.h" #include "sysemu/qtest.h" #include "sysemu/reset.h" +#include "cpu.h" + +#define BIOS_SIZE (4 * MiB) + +#if TARGET_BIG_ENDIAN +#define BIOS_FILENAME "mips_bios.bin" +#else +#define BIOS_FILENAME "mipsel_bios.bin" +#endif static struct _loaderparams { int ram_size; @@ -110,13 +118,15 @@ static void main_cpu_reset(void *opaque) } } -static void mipsnet_init(int base, qemu_irq irq, NICInfo *nd) +static void mipsnet_init(int base, qemu_irq irq) { DeviceState *dev; SysBusDevice *s; - dev = qdev_new("mipsnet"); - qdev_set_nic_properties(dev, nd); + dev = qemu_create_nic_device("mipsnet", true, NULL); + if (!dev) { + return; + } s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); @@ -196,7 +206,11 @@ mips_mipssim_init(MachineState *machine) cpu_mips_irq_init_cpu(cpu); cpu_mips_clock_init(cpu); - /* Register 64 KB of ISA IO space at 0x1fd00000. */ + /* + * Register 64 KB of ISA IO space at 0x1fd00000. But without interrupts + * (except for the hardcoded serial port interrupt) -device cannot work, + * so do not expose the ISA bus to the user. + */ memory_region_init_alias(isa, NULL, "isa_mmio", get_system_io(), 0, 0x00010000); memory_region_add_subregion(get_system_memory(), 0x1fd00000, isa); @@ -213,13 +227,12 @@ mips_mipssim_init(MachineState *machine) qdev_prop_set_uint8(dev, "endianness", DEVICE_LITTLE_ENDIAN); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, env->irq[4]); - sysbus_add_io(SYS_BUS_DEVICE(dev), 0x3f8, + memory_region_add_subregion(get_system_io(), 0x3f8, sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0)); } - if (nd_table[0].used) - /* MIPSnet uses the MIPS CPU INT0, which is interrupt 2. */ - mipsnet_init(0x4200, env->irq[2], &nd_table[0]); + /* MIPSnet uses the MIPS CPU INT0, which is interrupt 2. */ + mipsnet_init(0x4200, env->irq[2]); } static void mips_mipssim_machine_init(MachineClass *mc) diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig index cc8a8c1418f..1e08785b832 100644 --- a/hw/misc/Kconfig +++ b/hw/misc/Kconfig @@ -34,6 +34,10 @@ config PCA9552 bool depends on I2C +config PCA9554 + bool + depends on I2C + config I2C_ECHO bool default y if TEST_DEVICES @@ -87,6 +91,15 @@ config STM32F4XX_SYSCFG config STM32F4XX_EXTI bool +config STM32L4X5_EXTI + bool + +config STM32L4X5_SYSCFG + bool + +config STM32L4X5_RCC + bool + config MIPS_ITU bool diff --git a/hw/misc/a9scu.c b/hw/misc/a9scu.c index a375ebc9878..04225dfb78d 100644 --- a/hw/misc/a9scu.c +++ b/hw/misc/a9scu.c @@ -116,7 +116,7 @@ static const VMStateDescription vmstate_a9_scu = { .name = "a9-scu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, A9SCUState), VMSTATE_UINT32(status, A9SCUState), VMSTATE_END_OF_LIST() diff --git a/hw/misc/allwinner-a10-ccm.c b/hw/misc/allwinner-a10-ccm.c index 68146ee3401..575b0189524 100644 --- a/hw/misc/allwinner-a10-ccm.c +++ b/hw/misc/allwinner-a10-ccm.c @@ -193,7 +193,7 @@ static const VMStateDescription allwinner_a10_ccm_vmstate = { .name = "allwinner-a10-ccm", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwA10ClockCtlState, AW_A10_CCM_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/allwinner-a10-dramc.c b/hw/misc/allwinner-a10-dramc.c index e118b0c2fd4..a7c58fa6d06 100644 --- a/hw/misc/allwinner-a10-dramc.c +++ b/hw/misc/allwinner-a10-dramc.c @@ -147,7 +147,7 @@ static const VMStateDescription allwinner_a10_dramc_vmstate = { .name = "allwinner-a10-dramc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwA10DramControllerState, AW_A10_DRAMC_REGS_NUM), VMSTATE_END_OF_LIST() diff --git a/hw/misc/allwinner-cpucfg.c b/hw/misc/allwinner-cpucfg.c index bbd33a7dac8..31b97809695 100644 --- a/hw/misc/allwinner-cpucfg.c +++ b/hw/misc/allwinner-cpucfg.c @@ -250,7 +250,7 @@ static const VMStateDescription allwinner_cpucfg_vmstate = { .name = "allwinner-cpucfg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(gen_ctrl, AwCpuCfgState), VMSTATE_UINT32(super_standby, AwCpuCfgState), VMSTATE_UINT32(entry_addr, AwCpuCfgState), diff --git a/hw/misc/allwinner-h3-ccu.c b/hw/misc/allwinner-h3-ccu.c index 18d10745458..cfc68522d33 100644 --- a/hw/misc/allwinner-h3-ccu.c +++ b/hw/misc/allwinner-h3-ccu.c @@ -212,7 +212,7 @@ static const VMStateDescription allwinner_h3_ccu_vmstate = { .name = "allwinner-h3-ccu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwH3ClockCtlState, AW_H3_CCU_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/allwinner-h3-dramc.c b/hw/misc/allwinner-h3-dramc.c index 1d37cf422cd..e168ffe6233 100644 --- a/hw/misc/allwinner-h3-dramc.c +++ b/hw/misc/allwinner-h3-dramc.c @@ -324,7 +324,7 @@ static const VMStateDescription allwinner_h3_dramc_vmstate = { .name = "allwinner-h3-dramc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(dramcom, AwH3DramCtlState, AW_H3_DRAMCOM_REGS_NUM), VMSTATE_UINT32_ARRAY(dramctl, AwH3DramCtlState, AW_H3_DRAMCTL_REGS_NUM), VMSTATE_UINT32_ARRAY(dramphy, AwH3DramCtlState, AW_H3_DRAMPHY_REGS_NUM), diff --git a/hw/misc/allwinner-h3-sysctrl.c b/hw/misc/allwinner-h3-sysctrl.c index 1d07efa880d..2d29be83e3a 100644 --- a/hw/misc/allwinner-h3-sysctrl.c +++ b/hw/misc/allwinner-h3-sysctrl.c @@ -110,7 +110,7 @@ static const VMStateDescription allwinner_h3_sysctrl_vmstate = { .name = "allwinner-h3-sysctrl", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwH3SysCtrlState, AW_H3_SYSCTRL_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/allwinner-r40-ccu.c b/hw/misc/allwinner-r40-ccu.c index d82fee12db6..33baf4429dd 100644 --- a/hw/misc/allwinner-r40-ccu.c +++ b/hw/misc/allwinner-r40-ccu.c @@ -179,7 +179,7 @@ static const VMStateDescription allwinner_r40_ccu_vmstate = { .name = "allwinner-r40-ccu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwR40ClockCtlState, AW_R40_CCU_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/allwinner-r40-dramc.c b/hw/misc/allwinner-r40-dramc.c index 3d81ddb2e18..75b0bef4fd9 100644 --- a/hw/misc/allwinner-r40-dramc.c +++ b/hw/misc/allwinner-r40-dramc.c @@ -474,7 +474,7 @@ static const VMStateDescription allwinner_r40_dramc_vmstate = { .name = "allwinner-r40-dramc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(dramcom, AwR40DramCtlState, AW_R40_DRAMCOM_REGS_NUM), VMSTATE_UINT32_ARRAY(dramctl, AwR40DramCtlState, diff --git a/hw/misc/allwinner-sid.c b/hw/misc/allwinner-sid.c index 6d61f55b1d4..e5cd431743b 100644 --- a/hw/misc/allwinner-sid.c +++ b/hw/misc/allwinner-sid.c @@ -136,7 +136,7 @@ static const VMStateDescription allwinner_sid_vmstate = { .name = "allwinner-sid", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, AwSidState), VMSTATE_UINT32(rdkey, AwSidState), VMSTATE_UINT8_ARRAY_V(identifier.data, AwSidState, sizeof(QemuUUID), 1), diff --git a/hw/misc/allwinner-sramc.c b/hw/misc/allwinner-sramc.c index d76c24d081f..cf10ca8ffe8 100644 --- a/hw/misc/allwinner-sramc.c +++ b/hw/misc/allwinner-sramc.c @@ -116,7 +116,7 @@ static const VMStateDescription allwinner_sramc_vmstate = { .name = "allwinner-sramc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(sram_ver, AwSRAMCState), VMSTATE_UINT32(sram_soft_entry_reg0, AwSRAMCState), VMSTATE_END_OF_LIST() diff --git a/hw/misc/applesmc.c b/hw/misc/applesmc.c index 72300d0cbc3..14e3ef667d8 100644 --- a/hw/misc/applesmc.c +++ b/hw/misc/applesmc.c @@ -274,6 +274,7 @@ static void qdev_applesmc_isa_reset(DeviceState *dev) /* Remove existing entries */ QLIST_FOREACH_SAFE(d, &s->data_def, node, next) { QLIST_REMOVE(d, node); + g_free(d); } s->status = 0x00; s->status_1e = 0x00; @@ -342,7 +343,6 @@ static void applesmc_isa_realize(DeviceState *dev, Error **errp) } QLIST_INIT(&s->data_def); - qdev_applesmc_isa_reset(dev); } static Property applesmc_isa_properties[] = { diff --git a/hw/misc/arm_l2x0.c b/hw/misc/arm_l2x0.c index 75c3eb8982f..b14d0a26767 100644 --- a/hw/misc/arm_l2x0.c +++ b/hw/misc/arm_l2x0.c @@ -49,7 +49,7 @@ static const VMStateDescription vmstate_l2x0 = { .name = "l2x0", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctrl, L2x0State), VMSTATE_UINT32(aux_ctrl, L2x0State), VMSTATE_UINT32(data_ctrl, L2x0State), diff --git a/hw/misc/arm_sysctl.c b/hw/misc/arm_sysctl.c index 3e4f4b05244..5108f3eda92 100644 --- a/hw/misc/arm_sysctl.c +++ b/hw/misc/arm_sysctl.c @@ -57,7 +57,7 @@ static const VMStateDescription vmstate_arm_sysctl = { .name = "realview_sysctl", .version_id = 4, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(leds, arm_sysctl_state), VMSTATE_UINT16(lockval, arm_sysctl_state), VMSTATE_UINT32(cfgdata1, arm_sysctl_state), diff --git a/hw/misc/armsse-cpu-pwrctrl.c b/hw/misc/armsse-cpu-pwrctrl.c index 42fc38879f2..bfc51d175cb 100644 --- a/hw/misc/armsse-cpu-pwrctrl.c +++ b/hw/misc/armsse-cpu-pwrctrl.c @@ -109,7 +109,7 @@ static const VMStateDescription pwrctrl_vmstate = { .name = "armsse-cpu-pwrctrl", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cpupwrcfg, ARMSSECPUPwrCtrl), VMSTATE_END_OF_LIST() }, diff --git a/hw/misc/armsse-mhu.c b/hw/misc/armsse-mhu.c index 0be7f0fc874..55625b2cca8 100644 --- a/hw/misc/armsse-mhu.c +++ b/hw/misc/armsse-mhu.c @@ -157,7 +157,7 @@ static const VMStateDescription armsse_mhu_vmstate = { .name = "armsse-mhu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cpu0intr, ARMSSEMHU), VMSTATE_UINT32(cpu1intr, ARMSSEMHU), VMSTATE_END_OF_LIST() diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c index b07506ec04e..c06c04ddc66 100644 --- a/hw/misc/aspeed_hace.c +++ b/hw/misc/aspeed_hace.c @@ -433,7 +433,7 @@ static const VMStateDescription vmstate_aspeed_hace = { .name = TYPE_ASPEED_HACE, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS), VMSTATE_UINT32(total_req_len, AspeedHACEState), VMSTATE_UINT32(iov_count, AspeedHACEState), diff --git a/hw/misc/aspeed_i3c.c b/hw/misc/aspeed_i3c.c index d1ff6176716..827c9e522d3 100644 --- a/hw/misc/aspeed_i3c.c +++ b/hw/misc/aspeed_i3c.c @@ -168,7 +168,7 @@ static const VMStateDescription aspeed_i3c_device_vmstate = { .name = TYPE_ASPEED_I3C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32_ARRAY(regs, AspeedI3CDevice, ASPEED_I3C_DEVICE_NR_REGS), VMSTATE_END_OF_LIST(), } @@ -349,7 +349,7 @@ static const VMStateDescription vmstate_aspeed_i3c = { .name = TYPE_ASPEED_I3C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedI3CState, ASPEED_I3C_NR_REGS), VMSTATE_STRUCT_ARRAY(devices, AspeedI3CState, ASPEED_I3C_NR_DEVICES, 1, aspeed_i3c_device_vmstate, AspeedI3CDevice), diff --git a/hw/misc/aspeed_lpc.c b/hw/misc/aspeed_lpc.c index 2dddb27c35d..193f0dea591 100644 --- a/hw/misc/aspeed_lpc.c +++ b/hw/misc/aspeed_lpc.c @@ -447,7 +447,7 @@ static const VMStateDescription vmstate_aspeed_lpc = { .name = TYPE_ASPEED_LPC, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedLPCState, ASPEED_LPC_NR_REGS), VMSTATE_UINT32(subdevice_irqs_pending, AspeedLPCState), VMSTATE_END_OF_LIST(), diff --git a/hw/misc/aspeed_sbc.c b/hw/misc/aspeed_sbc.c index c6f328e3be2..8bb1f90e4e7 100644 --- a/hw/misc/aspeed_sbc.c +++ b/hw/misc/aspeed_sbc.c @@ -130,7 +130,7 @@ static const VMStateDescription vmstate_aspeed_sbc = { .name = TYPE_ASPEED_SBC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSBCState, ASPEED_SBC_NR_REGS), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c index 83353649064..1ac04b6cb02 100644 --- a/hw/misc/aspeed_scu.c +++ b/hw/misc/aspeed_scu.c @@ -531,7 +531,7 @@ static const VMStateDescription vmstate_aspeed_scu = { .name = "aspeed.scu", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSCUState, ASPEED_AST2600_SCU_NR_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/aspeed_sdmc.c b/hw/misc/aspeed_sdmc.c index abb27279339..64cd1a81dcd 100644 --- a/hw/misc/aspeed_sdmc.c +++ b/hw/misc/aspeed_sdmc.c @@ -243,7 +243,7 @@ static const VMStateDescription vmstate_aspeed_sdmc = { .name = "aspeed.sdmc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSDMCState, ASPEED_SDMC_NR_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/aspeed_xdma.c b/hw/misc/aspeed_xdma.c index 1c21577c98c..76ab8467ddb 100644 --- a/hw/misc/aspeed_xdma.c +++ b/hw/misc/aspeed_xdma.c @@ -144,7 +144,7 @@ static void aspeed_xdma_reset(DeviceState *dev) static const VMStateDescription aspeed_xdma_vmstate = { .name = TYPE_ASPEED_XDMA, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedXDMAState, ASPEED_XDMA_NUM_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/misc/axp2xx.c b/hw/misc/axp2xx.c index 41538c1cd78..af646878cd2 100644 --- a/hw/misc/axp2xx.c +++ b/hw/misc/axp2xx.c @@ -217,7 +217,7 @@ static int axp2xx_tx(I2CSlave *i2c, uint8_t data) static const VMStateDescription vmstate_axp2xx = { .name = TYPE_AXP2XX, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(regs, AXP2xxI2CState, NR_REGS), VMSTATE_UINT8(ptr, AXP2xxI2CState), VMSTATE_UINT8(count, AXP2xxI2CState), diff --git a/hw/misc/bcm2835_cprman.c b/hw/misc/bcm2835_cprman.c index 75e6c574d46..91c8f7bd170 100644 --- a/hw/misc/bcm2835_cprman.c +++ b/hw/misc/bcm2835_cprman.c @@ -125,7 +125,7 @@ static const VMStateDescription pll_vmstate = { .name = TYPE_CPRMAN_PLL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(xosc_in, CprmanPllState), VMSTATE_END_OF_LIST() } @@ -229,7 +229,7 @@ static const VMStateDescription pll_channel_vmstate = { .name = TYPE_CPRMAN_PLL_CHANNEL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(pll_in, CprmanPllChannelState), VMSTATE_END_OF_LIST() } @@ -349,7 +349,7 @@ static const VMStateDescription clock_mux_vmstate = { .name = TYPE_CPRMAN_CLOCK_MUX, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_CLOCK(srcs, CprmanClockMuxState, CPRMAN_NUM_CLOCK_MUX_SRC), VMSTATE_END_OF_LIST() @@ -404,7 +404,7 @@ static const VMStateDescription dsi0hsck_mux_vmstate = { .name = TYPE_CPRMAN_DSI0HSCK_MUX, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(plla_in, CprmanDsi0HsckMuxState), VMSTATE_CLOCK(plld_in, CprmanDsi0HsckMuxState), VMSTATE_END_OF_LIST() @@ -772,7 +772,7 @@ static const VMStateDescription cprman_vmstate = { .name = TYPE_BCM2835_CPRMAN, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, BCM2835CprmanState, CPRMAN_NUM_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/bcm2835_mbox.c b/hw/misc/bcm2835_mbox.c index 1e4e061bc1f..67bfc3bd719 100644 --- a/hw/misc/bcm2835_mbox.c +++ b/hw/misc/bcm2835_mbox.c @@ -257,7 +257,7 @@ static const VMStateDescription vmstate_bcm2835_mbox_box = { .name = TYPE_BCM2835_MBOX "_box", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, BCM2835Mbox, MBOX_SIZE), VMSTATE_UINT32(count, BCM2835Mbox), VMSTATE_UINT32(status, BCM2835Mbox), @@ -271,7 +271,7 @@ static const VMStateDescription vmstate_bcm2835_mbox = { .name = TYPE_BCM2835_MBOX, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL_ARRAY(available, BCM2835MboxState, MBOX_CHAN_COUNT), VMSTATE_STRUCT_ARRAY(mbox, BCM2835MboxState, 2, 1, vmstate_bcm2835_mbox_box, BCM2835Mbox), diff --git a/hw/misc/bcm2835_mphi.c b/hw/misc/bcm2835_mphi.c index 0428e10ba5c..f1eeda27862 100644 --- a/hw/misc/bcm2835_mphi.c +++ b/hw/misc/bcm2835_mphi.c @@ -156,7 +156,7 @@ const VMStateDescription vmstate_mphi_state = { .name = "mphi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(outdda, BCM2835MphiState), VMSTATE_UINT32(outddb, BCM2835MphiState), VMSTATE_UINT32(ctrl, BCM2835MphiState), diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c index 976f3d34e5e..1649da86689 100644 --- a/hw/misc/bcm2835_powermgt.c +++ b/hw/misc/bcm2835_powermgt.c @@ -109,7 +109,7 @@ static const VMStateDescription vmstate_bcm2835_powermgt = { .name = TYPE_BCM2835_POWERMGT, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(rstc, BCM2835PowerMgtState), VMSTATE_UINT32(rsts, BCM2835PowerMgtState), VMSTATE_UINT32(wdog, BCM2835PowerMgtState), diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c index ff55a4e2cd2..bdd9a6bbcec 100644 --- a/hw/misc/bcm2835_property.c +++ b/hw/misc/bcm2835_property.c @@ -19,6 +19,8 @@ #include "trace.h" #include "hw/arm/raspi_platform.h" +#define VCHI_BUSADDR_SIZE sizeof(uint32_t) + /* https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface */ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) @@ -138,6 +140,13 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) resplen = 8; break; + case RPI_FWREQ_GET_CLOCKS: + /* TODO: add more clock IDs if needed */ + stl_le_phys(&s->dma_as, value + 12, 0); + stl_le_phys(&s->dma_as, value + 16, RPI_FIRMWARE_ARM_CLK_ID); + resplen = 8; + break; + case RPI_FWREQ_SET_CLOCK_RATE: case RPI_FWREQ_SET_MAX_CLOCK_RATE: case RPI_FWREQ_SET_MIN_CLOCK_RATE: @@ -276,6 +285,7 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) stl_le_phys(&s->dma_as, value + 12, 0); resplen = 4; break; + case RPI_FWREQ_FRAMEBUFFER_GET_NUM_DISPLAYS: stl_le_phys(&s->dma_as, value + 12, 1); resplen = 4; @@ -301,6 +311,17 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) resplen); break; + case RPI_FWREQ_GET_THROTTLED: + stl_le_phys(&s->dma_as, value + 12, 0); + resplen = 4; + break; + + case RPI_FWREQ_VCHIQ_INIT: + stl_le_phys(&s->dma_as, + value + offsetof(rpi_firmware_prop_request_t, payload), + 0); + resplen = VCHI_BUSADDR_SIZE; + break; default: qemu_log_mask(LOG_UNIMP, "bcm2835_property: unhandled tag 0x%08x\n", tag); @@ -384,7 +405,7 @@ static const VMStateDescription vmstate_bcm2835_property = { .name = TYPE_BCM2835_PROPERTY, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_MACADDR(macaddr, BCM2835PropertyState), VMSTATE_UINT32(addr, BCM2835PropertyState), VMSTATE_BOOL(pending, BCM2835PropertyState), diff --git a/hw/misc/bcm2835_rng.c b/hw/misc/bcm2835_rng.c index b3c80cf1867..10e741b11d1 100644 --- a/hw/misc/bcm2835_rng.c +++ b/hw/misc/bcm2835_rng.c @@ -99,7 +99,7 @@ static const VMStateDescription vmstate_bcm2835_rng = { .name = TYPE_BCM2835_RNG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(rng_ctrl, BCM2835RngState), VMSTATE_UINT32(rng_status, BCM2835RngState), VMSTATE_END_OF_LIST() diff --git a/hw/misc/bcm2835_thermal.c b/hw/misc/bcm2835_thermal.c index c6f3b1ad605..ee7816b8a5d 100644 --- a/hw/misc/bcm2835_thermal.c +++ b/hw/misc/bcm2835_thermal.c @@ -105,7 +105,7 @@ static const VMStateDescription bcm2835_thermal_vmstate = { .name = "bcm2835_thermal", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctl, Bcm2835ThermalState), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/djmemc.c b/hw/misc/djmemc.c index fd02640838b..9b69656c3a8 100644 --- a/hw/misc/djmemc.c +++ b/hw/misc/djmemc.c @@ -107,7 +107,7 @@ static const VMStateDescription vmstate_djmemc = { .name = "djMEMC", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, DJMEMCState, DJMEMC_NUM_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/eccmemctl.c b/hw/misc/eccmemctl.c index c65806e3d9a..5a14a489991 100644 --- a/hw/misc/eccmemctl.c +++ b/hw/misc/eccmemctl.c @@ -272,7 +272,7 @@ static const VMStateDescription vmstate_ecc = { .name ="ECC", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, ECCState, ECC_NREGS), VMSTATE_BUFFER(diag, ECCState), VMSTATE_UINT32(version, ECCState), diff --git a/hw/misc/edu.c b/hw/misc/edu.c index e64a246d3fe..2a976ca2b15 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -355,9 +355,9 @@ static void *edu_fact_thread(void *opaque) smp_mb__after_rmw(); if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) { - qemu_mutex_lock_iothread(); + bql_lock(); edu_raise_irq(edu, FACT_IRQ); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } diff --git a/hw/misc/exynos4210_clk.c b/hw/misc/exynos4210_clk.c index 58cec282f75..4566a426faa 100644 --- a/hw/misc/exynos4210_clk.c +++ b/hw/misc/exynos4210_clk.c @@ -135,7 +135,7 @@ static const VMStateDescription exynos4210_clk_vmstate = { .name = TYPE_EXYNOS4210_CLK, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, Exynos4210ClkState, EXYNOS4210_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/exynos4210_pmu.c b/hw/misc/exynos4210_pmu.c index e24139c630b..7e28e790d7c 100644 --- a/hw/misc/exynos4210_pmu.c +++ b/hw/misc/exynos4210_pmu.c @@ -492,7 +492,7 @@ static const VMStateDescription exynos4210_pmu_vmstate = { .name = "exynos4210.pmu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, Exynos4210PmuState, PMU_NUM_OF_REGISTERS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/exynos4210_rng.c b/hw/misc/exynos4210_rng.c index 9214ec14cc0..0756bd32059 100644 --- a/hw/misc/exynos4210_rng.c +++ b/hw/misc/exynos4210_rng.c @@ -243,7 +243,7 @@ static const VMStateDescription exynos4210_rng_vmstate = { .name = TYPE_EXYNOS4210_RNG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_ARRAY(randr_value, Exynos4210RngState, EXYNOS4210_RNG_PRNG_NUM), VMSTATE_UINT32(seed_set, Exynos4210RngState), diff --git a/hw/misc/imx25_ccm.c b/hw/misc/imx25_ccm.c index ff996e2f2ca..d888966014d 100644 --- a/hw/misc/imx25_ccm.c +++ b/hw/misc/imx25_ccm.c @@ -101,7 +101,7 @@ static const VMStateDescription vmstate_imx25_ccm = { .name = TYPE_IMX25_CCM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, IMX25CCMState, IMX25_CCM_MAX_REG), VMSTATE_END_OF_LIST() }, diff --git a/hw/misc/imx31_ccm.c b/hw/misc/imx31_ccm.c index ad30a4b2c0c..a9059bb1f72 100644 --- a/hw/misc/imx31_ccm.c +++ b/hw/misc/imx31_ccm.c @@ -98,7 +98,7 @@ static const VMStateDescription vmstate_imx31_ccm = { .name = TYPE_IMX31_CCM, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, IMX31CCMState, IMX31_CCM_MAX_REG), VMSTATE_END_OF_LIST() }, diff --git a/hw/misc/imx6_ccm.c b/hw/misc/imx6_ccm.c index 85af466c2be..56489d8b57b 100644 --- a/hw/misc/imx6_ccm.c +++ b/hw/misc/imx6_ccm.c @@ -235,7 +235,7 @@ static const VMStateDescription vmstate_imx6_ccm = { .name = TYPE_IMX6_CCM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(ccm, IMX6CCMState, CCM_MAX), VMSTATE_UINT32_ARRAY(analog, IMX6CCMState, CCM_ANALOG_MAX), VMSTATE_END_OF_LIST() diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c index a9c64d06ebc..0c6003559f5 100644 --- a/hw/misc/imx6_src.c +++ b/hw/misc/imx6_src.c @@ -77,7 +77,7 @@ static const VMStateDescription vmstate_imx6_src = { .name = TYPE_IMX6_SRC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IMX6SRCState, SRC_MAX), VMSTATE_END_OF_LIST() }, @@ -131,7 +131,7 @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data) struct SRCSCRResetInfo *ri = data.host_ptr; IMX6SRCState *s = ri->s; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0); DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", diff --git a/hw/misc/imx6ul_ccm.c b/hw/misc/imx6ul_ccm.c index e01bb68ac72..bbc0be99211 100644 --- a/hw/misc/imx6ul_ccm.c +++ b/hw/misc/imx6ul_ccm.c @@ -285,7 +285,7 @@ static const VMStateDescription vmstate_imx6ul_ccm = { .name = TYPE_IMX6UL_CCM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(ccm, IMX6ULCCMState, CCM_MAX), VMSTATE_UINT32_ARRAY(analog, IMX6ULCCMState, CCM_ANALOG_MAX), VMSTATE_END_OF_LIST() diff --git a/hw/misc/imx7_ccm.c b/hw/misc/imx7_ccm.c index 7539f7fb457..88354f020ee 100644 --- a/hw/misc/imx7_ccm.c +++ b/hw/misc/imx7_ccm.c @@ -214,7 +214,7 @@ static const VMStateDescription vmstate_imx7_ccm = { .name = TYPE_IMX7_CCM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(ccm, IMX7CCMState, CCM_MAX), VMSTATE_END_OF_LIST() }, @@ -286,7 +286,7 @@ static const VMStateDescription vmstate_imx7_analog = { .name = TYPE_IMX7_ANALOG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(analog, IMX7AnalogState, ANALOG_MAX), VMSTATE_UINT32_ARRAY(pmu, IMX7AnalogState, PMU_MAX), VMSTATE_END_OF_LIST() diff --git a/hw/misc/imx7_snvs.c b/hw/misc/imx7_snvs.c index a245f96cd4e..edb2df215a6 100644 --- a/hw/misc/imx7_snvs.c +++ b/hw/misc/imx7_snvs.c @@ -13,28 +13,100 @@ */ #include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/timer.h" +#include "migration/vmstate.h" #include "hw/misc/imx7_snvs.h" +#include "qemu/cutils.h" #include "qemu/module.h" +#include "sysemu/sysemu.h" +#include "sysemu/rtc.h" #include "sysemu/runstate.h" #include "trace.h" +#define RTC_FREQ 32768ULL + +static const VMStateDescription vmstate_imx7_snvs = { + .name = TYPE_IMX7_SNVS, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT64(tick_offset, IMX7SNVSState), + VMSTATE_UINT64(lpcr, IMX7SNVSState), + VMSTATE_END_OF_LIST() + } +}; + +static uint64_t imx7_snvs_get_count(IMX7SNVSState *s) +{ + uint64_t ticks = muldiv64(qemu_clock_get_ns(rtc_clock), RTC_FREQ, + NANOSECONDS_PER_SECOND); + return s->tick_offset + ticks; +} + static uint64_t imx7_snvs_read(void *opaque, hwaddr offset, unsigned size) { - trace_imx7_snvs_read(offset, 0); + IMX7SNVSState *s = IMX7_SNVS(opaque); + uint64_t ret = 0; + + switch (offset) { + case SNVS_LPSRTCMR: + ret = extract64(imx7_snvs_get_count(s), 32, 15); + break; + case SNVS_LPSRTCLR: + ret = extract64(imx7_snvs_get_count(s), 0, 32); + break; + case SNVS_LPCR: + ret = s->lpcr; + break; + } - return 0; + trace_imx7_snvs_read(offset, ret, size); + + return ret; +} + +static void imx7_snvs_reset(DeviceState *dev) +{ + IMX7SNVSState *s = IMX7_SNVS(dev); + + s->lpcr = 0; } static void imx7_snvs_write(void *opaque, hwaddr offset, uint64_t v, unsigned size) { - const uint32_t value = v; - const uint32_t mask = SNVS_LPCR_TOP | SNVS_LPCR_DP_EN; + trace_imx7_snvs_write(offset, v, size); + + IMX7SNVSState *s = IMX7_SNVS(opaque); - trace_imx7_snvs_write(offset, value); + uint64_t new_value = 0, snvs_count = 0; - if (offset == SNVS_LPCR && ((value & mask) == mask)) { - qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + if (offset == SNVS_LPSRTCMR || offset == SNVS_LPSRTCLR) { + snvs_count = imx7_snvs_get_count(s); + } + + switch (offset) { + case SNVS_LPSRTCMR: + new_value = deposit64(snvs_count, 32, 32, v); + break; + case SNVS_LPSRTCLR: + new_value = deposit64(snvs_count, 0, 32, v); + break; + case SNVS_LPCR: { + s->lpcr = v; + + const uint32_t mask = SNVS_LPCR_TOP | SNVS_LPCR_DP_EN; + + if ((v & mask) == mask) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } + break; + } + } + + if (offset == SNVS_LPSRTCMR || offset == SNVS_LPSRTCLR) { + s->tick_offset += new_value - snvs_count; } } @@ -59,17 +131,24 @@ static void imx7_snvs_init(Object *obj) { SysBusDevice *sd = SYS_BUS_DEVICE(obj); IMX7SNVSState *s = IMX7_SNVS(obj); + struct tm tm; memory_region_init_io(&s->mmio, obj, &imx7_snvs_ops, s, TYPE_IMX7_SNVS, 0x1000); sysbus_init_mmio(sd, &s->mmio); + + qemu_get_timedate(&tm, 0); + s->tick_offset = mktimegm(&tm) - + qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND; } static void imx7_snvs_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->reset = imx7_snvs_reset; + dc->vmsd = &vmstate_imx7_snvs; dc->desc = "i.MX7 Secure Non-Volatile Storage Module"; } diff --git a/hw/misc/imx7_src.c b/hw/misc/imx7_src.c index 983251e86f7..b3725ff6e72 100644 --- a/hw/misc/imx7_src.c +++ b/hw/misc/imx7_src.c @@ -84,7 +84,7 @@ static const VMStateDescription vmstate_imx7_src = { .name = TYPE_IMX7_SRC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IMX7SRCState, SRC_MAX), VMSTATE_END_OF_LIST() }, @@ -136,7 +136,7 @@ static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data) struct SRCSCRResetInfo *ri = data.host_ptr; IMX7SRCState *s = ri->s; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); s->regs[SRC_A7RCR0] = deposit32(s->regs[SRC_A7RCR0], ri->reset_bit, 1, 0); diff --git a/hw/misc/imx_rngc.c b/hw/misc/imx_rngc.c index 082c6980ad5..ab7775e0952 100644 --- a/hw/misc/imx_rngc.c +++ b/hw/misc/imx_rngc.c @@ -245,7 +245,7 @@ static const VMStateDescription vmstate_imx_rngc = { .name = RNGC_NAME, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(op_self_test, IMXRNGCState), VMSTATE_UINT8(op_seed, IMXRNGCState), VMSTATE_UINT8(mask, IMXRNGCState), diff --git a/hw/misc/iosb.c b/hw/misc/iosb.c index e7e9dcca476..e20305e8013 100644 --- a/hw/misc/iosb.c +++ b/hw/misc/iosb.c @@ -105,7 +105,7 @@ static const VMStateDescription vmstate_iosb = { .name = "IOSB", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IOSBState, IOSB_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/iotkit-secctl.c b/hw/misc/iotkit-secctl.c index b5a9e30a2c6..f9c45f60bf3 100644 --- a/hw/misc/iotkit-secctl.c +++ b/hw/misc/iotkit-secctl.c @@ -753,7 +753,7 @@ static const VMStateDescription iotkit_secctl_ppc_vmstate = { .name = "iotkit-secctl-ppc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ns, IoTKitSecCtlPPC), VMSTATE_UINT32(sp, IoTKitSecCtlPPC), VMSTATE_UINT32(nsp, IoTKitSecCtlPPC), @@ -765,7 +765,7 @@ static const VMStateDescription iotkit_secctl_mpcintstatus_vmstate = { .name = "iotkit-secctl-mpcintstatus", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(mpcintstatus, IoTKitSecCtl), VMSTATE_END_OF_LIST() } @@ -781,7 +781,7 @@ static const VMStateDescription iotkit_secctl_msc_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = needed_always, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(secmscintstat, IoTKitSecCtl), VMSTATE_UINT32(secmscinten, IoTKitSecCtl), VMSTATE_UINT32(nsmscexp, IoTKitSecCtl), @@ -793,7 +793,7 @@ static const VMStateDescription iotkit_secctl_vmstate = { .name = "iotkit-secctl", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(secppcintstat, IoTKitSecCtl), VMSTATE_UINT32(secppcinten, IoTKitSecCtl), VMSTATE_UINT32(secrespcfg, IoTKitSecCtl), @@ -807,7 +807,7 @@ static const VMStateDescription iotkit_secctl_vmstate = { iotkit_secctl_ppc_vmstate, IoTKitSecCtlPPC), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &iotkit_secctl_mpcintstatus_vmstate, &iotkit_secctl_msc_vmstate, NULL diff --git a/hw/misc/iotkit-sysctl.c b/hw/misc/iotkit-sysctl.c index e664215ee67..45393e84ba4 100644 --- a/hw/misc/iotkit-sysctl.c +++ b/hw/misc/iotkit-sysctl.c @@ -777,7 +777,7 @@ static const VMStateDescription iotkit_sysctl_sse300_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = sse300_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pwrctrl, IoTKitSysCtl), VMSTATE_UINT32(pdcm_pd_cpu0_sense, IoTKitSysCtl), VMSTATE_UINT32(pdcm_pd_vmr0_sense, IoTKitSysCtl), @@ -798,7 +798,7 @@ static const VMStateDescription iotkit_sysctl_sse200_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = sse200_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(scsecctrl, IoTKitSysCtl), VMSTATE_UINT32(fclk_div, IoTKitSysCtl), VMSTATE_UINT32(sysclk_div, IoTKitSysCtl), @@ -818,7 +818,7 @@ static const VMStateDescription iotkit_sysctl_vmstate = { .name = "iotkit-sysctl", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(secure_debug, IoTKitSysCtl), VMSTATE_UINT32(reset_syndrome, IoTKitSysCtl), VMSTATE_UINT32(reset_mask, IoTKitSysCtl), @@ -828,7 +828,7 @@ static const VMStateDescription iotkit_sysctl_vmstate = { VMSTATE_UINT32(wicctrl, IoTKitSysCtl), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &iotkit_sysctl_sse200_vmstate, &iotkit_sysctl_sse300_vmstate, NULL diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index 04478880298..de49d1b8a82 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -476,7 +476,6 @@ static void setup_interrupt(IVShmemState *s, int vector, Error **errp) static void process_msg_shmem(IVShmemState *s, int fd, Error **errp) { - Error *local_err = NULL; struct stat buf; size_t size; @@ -496,10 +495,9 @@ static void process_msg_shmem(IVShmemState *s, int fd, Error **errp) size = buf.st_size; /* mmap the region and map into the BAR2 */ - memory_region_init_ram_from_fd(&s->server_bar2, OBJECT(s), "ivshmem.bar2", - size, RAM_SHARED, fd, 0, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram_from_fd(&s->server_bar2, OBJECT(s), + "ivshmem.bar2", size, RAM_SHARED, + fd, 0, errp)) { return; } @@ -834,6 +832,7 @@ static void ivshmem_write_config(PCIDevice *pdev, uint32_t address, static void ivshmem_common_realize(PCIDevice *dev, Error **errp) { + ERRP_GUARD(); IVShmemState *s = IVSHMEM_COMMON(dev); Error *err = NULL; uint8_t *pci_conf; @@ -1015,7 +1014,7 @@ static const VMStateDescription ivshmem_plain_vmsd = { .minimum_version_id = 0, .pre_load = ivshmem_pre_load, .post_load = ivshmem_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), VMSTATE_UINT32(intrstatus, IVShmemState), VMSTATE_UINT32(intrmask, IVShmemState), @@ -1069,7 +1068,7 @@ static const VMStateDescription ivshmem_doorbell_vmsd = { .minimum_version_id = 0, .pre_load = ivshmem_pre_load, .post_load = ivshmem_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), VMSTATE_MSIX(parent_obj, IVShmemState), VMSTATE_UINT32(intrstatus, IVShmemState), diff --git a/hw/misc/lasi.c b/hw/misc/lasi.c index ff9dc893ae6..970fc98b5c7 100644 --- a/hw/misc/lasi.c +++ b/hw/misc/lasi.c @@ -36,9 +36,13 @@ static bool lasi_chip_mem_valid(void *opaque, hwaddr addr, case LASI_IAR: case LASI_LPT: + case LASI_AUDIO: + case LASI_AUDIO + 4: case LASI_UART: case LASI_LAN: + case LASI_LAN + 12: /* LASI LAN MAC */ case LASI_RTC: + case LASI_FDC: case LASI_PCR ... LASI_AMR: ret = true; @@ -78,6 +82,8 @@ static MemTxResult lasi_chip_read_with_attrs(void *opaque, hwaddr addr, case LASI_LPT: case LASI_UART: case LASI_LAN: + case LASI_LAN + 12: + case LASI_FDC: val = 0; break; case LASI_RTC: @@ -143,12 +149,19 @@ static MemTxResult lasi_chip_write_with_attrs(void *opaque, hwaddr addr, case LASI_LPT: /* XXX: reset parallel port */ break; + case LASI_AUDIO: + case LASI_AUDIO + 4: + /* XXX: reset audio port */ + break; case LASI_UART: /* XXX: reset serial port */ break; case LASI_LAN: /* XXX: reset LAN card */ break; + case LASI_FDC: + /* XXX: reset Floppy controller */ + break; case LASI_RTC: s->rtc_ref = val - time(NULL); break; @@ -196,7 +209,7 @@ static const VMStateDescription vmstate_lasi = { .name = "Lasi", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(irr, LasiState), VMSTATE_UINT32(imr, LasiState), VMSTATE_UINT32(ipr, LasiState), diff --git a/hw/misc/led.c b/hw/misc/led.c index 42bb43a39a2..d9998ab8954 100644 --- a/hw/misc/led.c +++ b/hw/misc/led.c @@ -77,7 +77,7 @@ static const VMStateDescription vmstate_led = { .name = TYPE_LED, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(intensity_percent, LEDState), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c index b6206ef73ca..db6142b5f41 100644 --- a/hw/misc/mac_via.c +++ b/hw/misc/mac_via.c @@ -1292,7 +1292,7 @@ static const VMStateDescription vmstate_q800_via1 = { .version_id = 0, .minimum_version_id = 0, .post_load = via1_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, MOS6522Q800VIA1State, 0, vmstate_mos6522, MOS6522State), VMSTATE_UINT8(last_b, MOS6522Q800VIA1State), @@ -1411,7 +1411,7 @@ static const VMStateDescription vmstate_q800_via2 = { .name = "q800-via2", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, MOS6522Q800VIA2State, 0, vmstate_mos6522, MOS6522State), VMSTATE_END_OF_LIST() diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c index 6336dcb1948..41934e2cf8e 100644 --- a/hw/misc/macio/cuda.c +++ b/hw/misc/macio/cuda.c @@ -487,7 +487,7 @@ static const VMStateDescription vmstate_cuda = { .name = "cuda", .version_id = 6, .minimum_version_id = 6, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(mos6522_cuda.parent_obj, CUDAState, 0, vmstate_mos6522, MOS6522State), VMSTATE_UINT8(last_b, CUDAState), diff --git a/hw/misc/macio/gpio.c b/hw/misc/macio/gpio.c index 4deb3304719..549563747dc 100644 --- a/hw/misc/macio/gpio.c +++ b/hw/misc/macio/gpio.c @@ -168,7 +168,7 @@ static const VMStateDescription vmstate_macio_gpio = { .name = "macio_gpio", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(gpio_levels, MacIOGPIOState, 8), VMSTATE_UINT8_ARRAY(gpio_regs, MacIOGPIOState, 36), VMSTATE_END_OF_LIST() diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c index 80a789f32b8..2a528ea08ca 100644 --- a/hw/misc/macio/mac_dbdma.c +++ b/hw/misc/macio/mac_dbdma.c @@ -807,7 +807,7 @@ static const VMStateDescription vmstate_dbdma_io = { .name = "dbdma_io", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(addr, struct DBDMA_io), VMSTATE_INT32(len, struct DBDMA_io), VMSTATE_INT32(is_last, struct DBDMA_io), @@ -821,7 +821,7 @@ static const VMStateDescription vmstate_dbdma_cmd = { .name = "dbdma_cmd", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(req_count, dbdma_cmd), VMSTATE_UINT16(command, dbdma_cmd), VMSTATE_UINT32(phy_addr, dbdma_cmd), @@ -836,7 +836,7 @@ static const VMStateDescription vmstate_dbdma_channel = { .name = "dbdma_channel", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS), VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io), VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd, @@ -849,7 +849,7 @@ static const VMStateDescription vmstate_dbdma = { .name = "dbdma", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1, vmstate_dbdma_channel, DBDMA_channel), VMSTATE_END_OF_LIST() diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c index 265c0bbd8db..3f449f91c00 100644 --- a/hw/misc/macio/macio.c +++ b/hw/misc/macio/macio.c @@ -123,14 +123,17 @@ static bool macio_realize_ide(MacIOState *s, MACIOIDEState *ide, { SysBusDevice *sbd = SYS_BUS_DEVICE(ide); - sysbus_connect_irq(sbd, 0, irq0); - sysbus_connect_irq(sbd, 1, irq1); qdev_prop_set_uint32(DEVICE(ide), "channel", dmaid); object_property_set_link(OBJECT(ide), "dbdma", OBJECT(&s->dbdma), &error_abort); macio_ide_register_dma(ide); + if (!qdev_realize(DEVICE(ide), BUS(&s->macio_bus), errp)) { + return false; + } + sysbus_connect_irq(sbd, 0, irq0); + sysbus_connect_irq(sbd, 1, irq1); - return qdev_realize(DEVICE(ide), BUS(&s->macio_bus), errp); + return true; } static void macio_oldworld_realize(PCIDevice *d, Error **errp) @@ -376,7 +379,7 @@ static const VMStateDescription vmstate_macio_oldworld = { .name = "macio-oldworld", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent, OldWorldMacIOState), VMSTATE_END_OF_LIST() } @@ -396,7 +399,7 @@ static const VMStateDescription vmstate_macio_newworld = { .name = "macio-newworld", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent, NewWorldMacIOState), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c index 58316d18713..e40c51bf529 100644 --- a/hw/misc/macio/pmu.c +++ b/hw/misc/macio/pmu.c @@ -668,7 +668,7 @@ static const VMStateDescription vmstate_pmu_adb = { .version_id = 1, .minimum_version_id = 1, .needed = pmu_adb_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(adb_reply_size, PMUState), VMSTATE_BUFFER(adb_reply, PMUState), VMSTATE_END_OF_LIST() @@ -679,7 +679,7 @@ static const VMStateDescription vmstate_pmu = { .name = "pmu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(mos6522_pmu.parent_obj, PMUState, 0, vmstate_mos6522, MOS6522State), VMSTATE_UINT8(last_b, PMUState), @@ -698,7 +698,7 @@ static const VMStateDescription vmstate_pmu = { VMSTATE_INT64(one_sec_target, PMUState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_pmu_adb, NULL } @@ -737,8 +737,7 @@ static void pmu_realize(DeviceState *dev, Error **errp) timer_mod(s->one_sec_timer, s->one_sec_target); if (s->has_adb) { - qbus_init(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, - dev, "adb.0"); + qbus_init(adb_bus, sizeof(*adb_bus), TYPE_ADB_BUS, dev, "adb.0"); adb_register_autopoll_callback(adb_bus, pmu_adb_poll, s); } } diff --git a/hw/misc/meson.build b/hw/misc/meson.build index 36c20d5637f..86596a38881 100644 --- a/hw/misc/meson.build +++ b/hw/misc/meson.build @@ -3,7 +3,6 @@ system_ss.add(when: 'CONFIG_EDU', if_true: files('edu.c')) system_ss.add(when: 'CONFIG_FW_CFG_DMA', if_true: files('vmcoreinfo.c')) system_ss.add(when: 'CONFIG_ISA_DEBUG', if_true: files('debugexit.c')) system_ss.add(when: 'CONFIG_ISA_TESTDEV', if_true: files('pc-testdev.c')) -system_ss.add(when: 'CONFIG_PCA9552', if_true: files('pca9552.c')) system_ss.add(when: 'CONFIG_PCI_TESTDEV', if_true: files('pci-testdev.c')) system_ss.add(when: 'CONFIG_UNIMP', if_true: files('unimp.c')) system_ss.add(when: 'CONFIG_EMPTY_SLOT', if_true: files('empty_slot.c')) @@ -96,8 +95,8 @@ system_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c')) system_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c')) system_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-crf.c')) system_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-apu-ctrl.c')) -specific_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-crl.c')) system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files( + 'xlnx-versal-crl.c', 'xlnx-versal-xramc.c', 'xlnx-versal-pmc-iou-slcr.c', 'xlnx-versal-cfu.c', @@ -110,6 +109,9 @@ system_ss.add(when: 'CONFIG_XLNX_VERSAL_TRNG', if_true: files( system_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c')) system_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c')) system_ss.add(when: 'CONFIG_STM32F4XX_EXTI', if_true: files('stm32f4xx_exti.c')) +system_ss.add(when: 'CONFIG_STM32L4X5_EXTI', if_true: files('stm32l4x5_exti.c')) +system_ss.add(when: 'CONFIG_STM32L4X5_SYSCFG', if_true: files('stm32l4x5_syscfg.c')) +system_ss.add(when: 'CONFIG_STM32L4X5_RCC', if_true: files('stm32l4x5_rcc.c')) system_ss.add(when: 'CONFIG_MPS2_FPGAIO', if_true: files('mps2-fpgaio.c')) system_ss.add(when: 'CONFIG_MPS2_SCC', if_true: files('mps2-scc.c')) diff --git a/hw/misc/mips_cmgcr.c b/hw/misc/mips_cmgcr.c index 66eb11662c7..2703040f459 100644 --- a/hw/misc/mips_cmgcr.c +++ b/hw/misc/mips_cmgcr.c @@ -205,7 +205,7 @@ static const VMStateDescription vmstate_mips_gcr = { .name = "mips-gcr", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(cpc_base, MIPSGCRState), VMSTATE_END_OF_LIST() }, diff --git a/hw/misc/mips_cpc.c b/hw/misc/mips_cpc.c index 4a94c870546..1e8fd2e6996 100644 --- a/hw/misc/mips_cpc.c +++ b/hw/misc/mips_cpc.c @@ -157,7 +157,7 @@ static const VMStateDescription vmstate_mips_cpc = { .name = "mips-cpc", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(vp_running, MIPSCPCState), VMSTATE_END_OF_LIST() }, diff --git a/hw/misc/mips_itu.c b/hw/misc/mips_itu.c index 5a83ccc4e8b..f8acfb3ee26 100644 --- a/hw/misc/mips_itu.c +++ b/hw/misc/mips_itu.c @@ -22,9 +22,10 @@ #include "qemu/log.h" #include "qemu/module.h" #include "qapi/error.h" -#include "exec/exec-all.h" +#include "hw/core/cpu.h" #include "hw/misc/mips_itu.h" #include "hw/qdev-properties.h" +#include "target/mips/cpu.h" #define ITC_TAG_ADDRSPACE_SZ (ITC_ADDRESSMAP_NUM * 8) /* Initialize as 4kB area to fit all 32 cells with default 128B grain. @@ -85,7 +86,7 @@ static uint64_t itc_tag_read(void *opaque, hwaddr addr, unsigned size) return tag->ITCAddressMap[index]; } -void itc_reconfigure(MIPSITUState *tag) +static void itc_reconfigure(MIPSITUState *tag) { uint64_t *am = &tag->ITCAddressMap[0]; MemoryRegion *mr = &tag->storage_io; @@ -93,12 +94,6 @@ void itc_reconfigure(MIPSITUState *tag) uint64_t size = (1 * KiB) + (am[1] & ITC_AM1_ADDR_MASK_MASK); bool is_enabled = (am[0] & ITC_AM0_EN_MASK) != 0; - if (tag->saar) { - address = (tag->saar[0] & 0xFFFFFFFFE000ULL) << 4; - size = 1ULL << ((tag->saar[0] >> 1) & 0x1f); - is_enabled = tag->saar[0] & 1; - } - memory_region_transaction_begin(); if (!(size & (size - 1))) { memory_region_set_size(mr, size); @@ -157,12 +152,7 @@ static inline ITCView get_itc_view(hwaddr addr) static inline int get_cell_stride_shift(const MIPSITUState *s) { /* Minimum interval (for EntryGain = 0) is 128 B */ - if (s->saar) { - return 7 + ((s->icr0 >> ITC_ICR0_BLK_GRAIN) & - ITC_ICR0_BLK_GRAIN_MASK); - } else { - return 7 + (s->ITCAddressMap[1] & ITC_AM1_ENTRY_GRAIN_MASK); - } + return 7 + (s->ITCAddressMap[1] & ITC_AM1_ENTRY_GRAIN_MASK); } static inline ITCStorageCell *get_cell(MIPSITUState *s, @@ -515,7 +505,6 @@ static void mips_itu_init(Object *obj) static void mips_itu_realize(DeviceState *dev, Error **errp) { MIPSITUState *s = MIPS_ITU(dev); - CPUMIPSState *env; if (s->num_fifo > ITC_FIFO_NUM_MAX) { error_setg(errp, "Exceed maximum number of FIFO cells: %d", @@ -527,15 +516,6 @@ static void mips_itu_realize(DeviceState *dev, Error **errp) s->num_semaphores); return; } - if (!s->cpu0) { - error_setg(errp, "Missing 'cpu[0]' property"); - return; - } - - env = &MIPS_CPU(s->cpu0)->env; - if (env->saarp) { - s->saar = env->CP0_SAAR; - } s->cell = g_new(ITCStorageCell, get_num_cells(s)); } @@ -544,15 +524,10 @@ static void mips_itu_reset(DeviceState *dev) { MIPSITUState *s = MIPS_ITU(dev); - if (s->saar) { - s->saar[0] = 0x11 << 1; - s->icr0 = get_num_cells(s) << ITC_ICR0_CELL_NUM; - } else { - s->ITCAddressMap[0] = 0; - s->ITCAddressMap[1] = + s->ITCAddressMap[0] = 0; + s->ITCAddressMap[1] = ((ITC_STORAGE_ADDRSPACE_SZ - 1) & ITC_AM1_ADDR_MASK_MASK) | (get_num_cells(s) << ITC_AM1_NUMENTRIES_OFS); - } itc_reconfigure(s); itc_reset_cells(s); @@ -563,7 +538,6 @@ static Property mips_itu_properties[] = { ITC_FIFO_NUM_MAX), DEFINE_PROP_UINT32("num-semaphores", MIPSITUState, num_semaphores, ITC_SEMAPH_NUM_MAX), - DEFINE_PROP_LINK("cpu[0]", MIPSITUState, cpu0, TYPE_MIPS_CPU, ArchCPU *), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/misc/mos6522.c b/hw/misc/mos6522.c index d6ba47bde97..e3fe87c20ca 100644 --- a/hw/misc/mos6522.c +++ b/hw/misc/mos6522.c @@ -611,7 +611,7 @@ static const VMStateDescription vmstate_mos6522_timer = { .name = "mos6522_timer", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(latch, MOS6522Timer), VMSTATE_UINT16(counter_value, MOS6522Timer), VMSTATE_INT64(load_time, MOS6522Timer), @@ -625,7 +625,7 @@ const VMStateDescription vmstate_mos6522 = { .name = "mos6522", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(a, MOS6522State), VMSTATE_UINT8(b, MOS6522State), VMSTATE_UINT8(dira, MOS6522State), diff --git a/hw/misc/mps2-fpgaio.c b/hw/misc/mps2-fpgaio.c index 07b8cbdad28..aa1bb83e721 100644 --- a/hw/misc/mps2-fpgaio.c +++ b/hw/misc/mps2-fpgaio.c @@ -305,7 +305,7 @@ static const VMStateDescription mps2_fpgaio_vmstate = { .name = "mps2-fpgaio", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(led0, MPS2FPGAIO), VMSTATE_UINT32(prescale, MPS2FPGAIO), VMSTATE_UINT32(misc, MPS2FPGAIO), diff --git a/hw/misc/mps2-scc.c b/hw/misc/mps2-scc.c index fe5034db140..18be74157ee 100644 --- a/hw/misc/mps2-scc.c +++ b/hw/misc/mps2-scc.c @@ -37,6 +37,7 @@ REG32(CFG3, 0xc) REG32(CFG4, 0x10) REG32(CFG5, 0x14) REG32(CFG6, 0x18) +REG32(CFG7, 0x1c) REG32(CFGDATA_RTN, 0xa0) REG32(CFGDATA_OUT, 0xa4) REG32(CFGCTRL, 0xa8) @@ -59,6 +60,51 @@ static int scc_partno(MPS2SCC *s) return extract32(s->id, 4, 8); } +/* Is CFG_REG2 present? */ +static bool have_cfg2(MPS2SCC *s) +{ + return scc_partno(s) == 0x524 || scc_partno(s) == 0x547 || + scc_partno(s) == 0x536; +} + +/* Is CFG_REG3 present? */ +static bool have_cfg3(MPS2SCC *s) +{ + return scc_partno(s) != 0x524 && scc_partno(s) != 0x547 && + scc_partno(s) != 0x536; +} + +/* Is CFG_REG5 present? */ +static bool have_cfg5(MPS2SCC *s) +{ + return scc_partno(s) == 0x524 || scc_partno(s) == 0x547 || + scc_partno(s) == 0x536; +} + +/* Is CFG_REG6 present? */ +static bool have_cfg6(MPS2SCC *s) +{ + return scc_partno(s) == 0x524 || scc_partno(s) == 0x536; +} + +/* Is CFG_REG7 present? */ +static bool have_cfg7(MPS2SCC *s) +{ + return scc_partno(s) == 0x536; +} + +/* Does CFG_REG0 drive the 'remap' GPIO output? */ +static bool cfg0_is_remap(MPS2SCC *s) +{ + return scc_partno(s) != 0x536; +} + +/* Is CFG_REG1 driving a set of LEDs? */ +static bool cfg1_is_leds(MPS2SCC *s) +{ + return scc_partno(s) != 0x536; +} + /* Handle a write via the SYS_CFG channel to the specified function/device. * Return false on error (reported to guest via SYS_CFGCTRL ERROR bit). */ @@ -111,19 +157,25 @@ static uint64_t mps2_scc_read(void *opaque, hwaddr offset, unsigned size) r = s->cfg1; break; case A_CFG2: - if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) { - /* CFG2 reserved on other boards */ + if (!have_cfg2(s)) { goto bad_offset; } r = s->cfg2; break; case A_CFG3: - if (scc_partno(s) == 0x524 && scc_partno(s) == 0x547) { - /* CFG3 reserved on AN524 */ + if (!have_cfg3(s)) { goto bad_offset; } - /* These are user-settable DIP switches on the board. We don't + /* + * These are user-settable DIP switches on the board. We don't * model that, so just return zeroes. + * + * TODO: for AN536 this is MCC_MSB_ADDR "additional MCC addressing + * bits". These change which part of the DDR4 the motherboard + * configuration controller can see in its memory map (see the + * appnote section 2.4). QEMU doesn't model the MCC at all, so these + * bits are not interesting to us; read-as-zero is as good as anything + * else. */ r = 0; break; @@ -131,19 +183,23 @@ static uint64_t mps2_scc_read(void *opaque, hwaddr offset, unsigned size) r = s->cfg4; break; case A_CFG5: - if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) { - /* CFG5 reserved on other boards */ + if (!have_cfg5(s)) { goto bad_offset; } r = s->cfg5; break; case A_CFG6: - if (scc_partno(s) != 0x524) { - /* CFG6 reserved on other boards */ + if (!have_cfg6(s)) { goto bad_offset; } r = s->cfg6; break; + case A_CFG7: + if (!have_cfg7(s)) { + goto bad_offset; + } + r = s->cfg7; + break; case A_CFGDATA_RTN: r = s->cfgdata_rtn; break; @@ -191,38 +247,58 @@ static void mps2_scc_write(void *opaque, hwaddr offset, uint64_t value, * we always reflect bit 0 in the 'remap' GPIO output line, * and let the board wire it up or not as it chooses. * TODO on some boards bit 1 is CPU_WAIT. + * + * TODO: on the AN536 this register controls reset and halt + * for both CPUs. For the moment we don't implement this, so the + * register just reads as written. */ s->cfg0 = value; - qemu_set_irq(s->remap, s->cfg0 & 1); + if (cfg0_is_remap(s)) { + qemu_set_irq(s->remap, s->cfg0 & 1); + } break; case A_CFG1: s->cfg1 = value; - for (size_t i = 0; i < ARRAY_SIZE(s->led); i++) { - led_set_state(s->led[i], extract32(value, i, 1)); + /* + * On most boards this register drives LEDs. + * + * TODO: for AN536 this controls whether flash and ATCM are + * enabled or disabled on reset. QEMU doesn't model this, and + * always wires up RAM in the ATCM area and ROM in the flash area. + */ + if (cfg1_is_leds(s)) { + for (size_t i = 0; i < ARRAY_SIZE(s->led); i++) { + led_set_state(s->led[i], extract32(value, i, 1)); + } } break; case A_CFG2: - if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) { - /* CFG2 reserved on other boards */ + if (!have_cfg2(s)) { goto bad_offset; } - /* AN524: QSPI Select signal */ + /* AN524, AN536: QSPI Select signal */ s->cfg2 = value; break; case A_CFG5: - if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) { - /* CFG5 reserved on other boards */ + if (!have_cfg5(s)) { goto bad_offset; } - /* AN524: ACLK frequency in Hz */ + /* AN524, AN536: ACLK frequency in Hz */ s->cfg5 = value; break; case A_CFG6: - if (scc_partno(s) != 0x524) { - /* CFG6 reserved on other boards */ + if (!have_cfg6(s)) { goto bad_offset; } /* AN524: Clock divider for BRAM */ + /* AN536: Core 0 vector table base address */ + s->cfg6 = value; + break; + case A_CFG7: + if (!have_cfg7(s)) { + goto bad_offset; + } + /* AN536: Core 1 vector table base address */ s->cfg6 = value; break; case A_CFGDATA_OUT: @@ -336,11 +412,29 @@ static void mps2_scc_finalize(Object *obj) g_free(s->oscclk_reset); } +static bool cfg7_needed(void *opaque) +{ + MPS2SCC *s = opaque; + + return have_cfg7(s); +} + +static const VMStateDescription vmstate_cfg7 = { + .name = "mps2-scc/cfg7", + .version_id = 1, + .minimum_version_id = 1, + .needed = cfg7_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(cfg7, MPS2SCC), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription mps2_scc_vmstate = { .name = "mps2-scc", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cfg0, MPS2SCC), VMSTATE_UINT32(cfg1, MPS2SCC), VMSTATE_UINT32(cfg2, MPS2SCC), @@ -355,6 +449,10 @@ static const VMStateDescription mps2_scc_vmstate = { VMSTATE_VARRAY_UINT32(oscclk, MPS2SCC, num_oscclk, 0, vmstate_info_uint32, uint32_t), VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * const []) { + &vmstate_cfg7, + NULL } }; diff --git a/hw/misc/msf2-sysreg.c b/hw/misc/msf2-sysreg.c index 2dce55c3649..f54382a816c 100644 --- a/hw/misc/msf2-sysreg.c +++ b/hw/misc/msf2-sysreg.c @@ -112,7 +112,7 @@ static const VMStateDescription vmstate_msf2_sysreg = { .name = TYPE_MSF2_SYSREG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, MSF2SysregState, MSF2_SYSREG_MMIO_SIZE / 4), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/mst_fpga.c b/hw/misc/mst_fpga.c index 7692825867d..2d7bfa5ad9e 100644 --- a/hw/misc/mst_fpga.c +++ b/hw/misc/mst_fpga.c @@ -227,7 +227,7 @@ static const VMStateDescription vmstate_mst_fpga_regs = { .version_id = 0, .minimum_version_id = 0, .post_load = mst_fpga_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(prev_level, mst_irq_state), VMSTATE_UINT32(leddat1, mst_irq_state), VMSTATE_UINT32(leddat2, mst_irq_state), diff --git a/hw/misc/npcm7xx_clk.c b/hw/misc/npcm7xx_clk.c index bc2b879feb5..ac1622c38aa 100644 --- a/hw/misc/npcm7xx_clk.c +++ b/hw/misc/npcm7xx_clk.c @@ -976,7 +976,7 @@ static const VMStateDescription vmstate_npcm7xx_clk_pll = { .name = "npcm7xx-clock-pll", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clock_in, NPCM7xxClockPLLState), VMSTATE_END_OF_LIST(), }, @@ -986,7 +986,7 @@ static const VMStateDescription vmstate_npcm7xx_clk_sel = { .name = "npcm7xx-clock-sel", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(clock_in, NPCM7xxClockSELState, NPCM7XX_CLK_SEL_MAX_INPUT, 0, vmstate_clock, Clock), VMSTATE_END_OF_LIST(), @@ -997,7 +997,7 @@ static const VMStateDescription vmstate_npcm7xx_clk_divider = { .name = "npcm7xx-clock-divider", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clock_in, NPCM7xxClockDividerState), VMSTATE_END_OF_LIST(), }, @@ -1008,7 +1008,7 @@ static const VMStateDescription vmstate_npcm7xx_clk = { .version_id = 1, .minimum_version_id = 1, .post_load = npcm7xx_clk_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, NPCM7xxCLKState, NPCM7XX_CLK_NR_REGS), VMSTATE_INT64(ref_ns, NPCM7xxCLKState), VMSTATE_CLOCK(clkref, NPCM7xxCLKState), diff --git a/hw/misc/npcm7xx_gcr.c b/hw/misc/npcm7xx_gcr.c index eace9e1967a..9252f9d1488 100644 --- a/hw/misc/npcm7xx_gcr.c +++ b/hw/misc/npcm7xx_gcr.c @@ -227,7 +227,7 @@ static const VMStateDescription vmstate_npcm7xx_gcr = { .name = "npcm7xx-gcr", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, NPCM7xxGCRState, NPCM7XX_GCR_NR_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/misc/npcm7xx_mft.c b/hw/misc/npcm7xx_mft.c index a30583a1b05..9a848584e18 100644 --- a/hw/misc/npcm7xx_mft.c +++ b/hw/misc/npcm7xx_mft.c @@ -503,7 +503,7 @@ static const VMStateDescription vmstate_npcm7xx_mft = { .name = "npcm7xx-mft-module", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clock_in, NPCM7xxMFTState), VMSTATE_CLOCK(clock_1, NPCM7xxMFTState), VMSTATE_CLOCK(clock_2, NPCM7xxMFTState), diff --git a/hw/misc/npcm7xx_pwm.c b/hw/misc/npcm7xx_pwm.c index 2be5bd25c6c..fca2dd2e5af 100644 --- a/hw/misc/npcm7xx_pwm.c +++ b/hw/misc/npcm7xx_pwm.c @@ -511,7 +511,7 @@ static const VMStateDescription vmstate_npcm7xx_pwm = { .name = "npcm7xx-pwm", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(running, NPCM7xxPWM), VMSTATE_BOOL(inverted, NPCM7xxPWM), VMSTATE_UINT8(index, NPCM7xxPWM), @@ -529,7 +529,7 @@ static const VMStateDescription vmstate_npcm7xx_pwm_module = { .name = "npcm7xx-pwm-module", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clock, NPCM7xxPWMState), VMSTATE_STRUCT_ARRAY(pwm, NPCM7xxPWMState, NPCM7XX_PWM_PER_MODULE, 0, vmstate_npcm7xx_pwm, diff --git a/hw/misc/npcm7xx_rng.c b/hw/misc/npcm7xx_rng.c index b01df7cdb25..7f7e5eca626 100644 --- a/hw/misc/npcm7xx_rng.c +++ b/hw/misc/npcm7xx_rng.c @@ -150,7 +150,7 @@ static const VMStateDescription vmstate_npcm7xx_rng = { .name = "npcm7xx-rng", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(rngcs, NPCM7xxRNGState), VMSTATE_UINT8(rngd, NPCM7xxRNGState), VMSTATE_UINT8(rngmode, NPCM7xxRNGState), diff --git a/hw/misc/nrf51_rng.c b/hw/misc/nrf51_rng.c index fc86e1b6979..2d76c457182 100644 --- a/hw/misc/nrf51_rng.c +++ b/hw/misc/nrf51_rng.c @@ -231,7 +231,7 @@ static const VMStateDescription vmstate_rng = { .name = "nrf51_soc.rng", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(active, NRF51RNGState), VMSTATE_UINT32(event_valrdy, NRF51RNGState), VMSTATE_UINT32(shortcut_stop_on_valrdy, NRF51RNGState), diff --git a/hw/misc/pvpanic-pci.c b/hw/misc/pvpanic-pci.c index fbcaa50731b..83be95d0d24 100644 --- a/hw/misc/pvpanic-pci.c +++ b/hw/misc/pvpanic-pci.c @@ -37,7 +37,7 @@ static const VMStateDescription vmstate_pvpanic_pci = { .name = "pvpanic-pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PVPanicPCIState), VMSTATE_END_OF_LIST() } @@ -48,7 +48,7 @@ static void pvpanic_pci_realizefn(PCIDevice *dev, Error **errp) PVPanicPCIState *s = PVPANIC_PCI_DEVICE(dev); PVPanicState *ps = &s->pvpanic; - pvpanic_setup_io(&s->pvpanic, DEVICE(s), 2); + pvpanic_setup_io(ps, DEVICE(s), 2); pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &ps->mr); } diff --git a/hw/misc/slavio_misc.c b/hw/misc/slavio_misc.c index e8eb71570a8..94369e4cc88 100644 --- a/hw/misc/slavio_misc.c +++ b/hw/misc/slavio_misc.c @@ -408,7 +408,7 @@ static const VMStateDescription vmstate_misc = { .name ="slavio_misc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(dummy, MiscState), VMSTATE_UINT8(config, MiscState), VMSTATE_UINT8(aux1, MiscState), diff --git a/hw/misc/stm32f4xx_exti.c b/hw/misc/stm32f4xx_exti.c index 02e78100461..7bd3afcd7cc 100644 --- a/hw/misc/stm32f4xx_exti.c +++ b/hw/misc/stm32f4xx_exti.c @@ -153,7 +153,7 @@ static const VMStateDescription vmstate_stm32f4xx_exti = { .name = TYPE_STM32F4XX_EXTI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(exti_imr, STM32F4xxExtiState), VMSTATE_UINT32(exti_emr, STM32F4xxExtiState), VMSTATE_UINT32(exti_rtsr, STM32F4xxExtiState), diff --git a/hw/misc/stm32f4xx_syscfg.c b/hw/misc/stm32f4xx_syscfg.c index f960e4ea1ee..854fce6a952 100644 --- a/hw/misc/stm32f4xx_syscfg.c +++ b/hw/misc/stm32f4xx_syscfg.c @@ -137,7 +137,7 @@ static const VMStateDescription vmstate_stm32f4xx_syscfg = { .name = TYPE_STM32F4XX_SYSCFG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(syscfg_memrmp, STM32F4xxSyscfgState), VMSTATE_UINT32(syscfg_pmc, STM32F4xxSyscfgState), VMSTATE_UINT32_ARRAY(syscfg_exticr, STM32F4xxSyscfgState, diff --git a/hw/misc/stm32l4x5_exti.c b/hw/misc/stm32l4x5_exti.c new file mode 100644 index 00000000000..9fd859160d4 --- /dev/null +++ b/hw/misc/stm32l4x5_exti.c @@ -0,0 +1,290 @@ +/* + * STM32L4x5 EXTI (Extended interrupts and events controller) + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Samuel Tardieu + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This work is based on the stm32f4xx_exti by Alistair Francis. + * Original code is licensed under the MIT License: + * + * Copyright (c) 2014 Alistair Francis + */ + +/* + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/misc/stm32l4x5_exti.h" + +#define EXTI_IMR1 0x00 +#define EXTI_EMR1 0x04 +#define EXTI_RTSR1 0x08 +#define EXTI_FTSR1 0x0C +#define EXTI_SWIER1 0x10 +#define EXTI_PR1 0x14 +#define EXTI_IMR2 0x20 +#define EXTI_EMR2 0x24 +#define EXTI_RTSR2 0x28 +#define EXTI_FTSR2 0x2C +#define EXTI_SWIER2 0x30 +#define EXTI_PR2 0x34 + +#define EXTI_NUM_GPIO_EVENT_IN_LINES 16 +#define EXTI_MAX_IRQ_PER_BANK 32 +#define EXTI_IRQS_BANK0 32 +#define EXTI_IRQS_BANK1 8 + +static const unsigned irqs_per_bank[EXTI_NUM_REGISTER] = { + EXTI_IRQS_BANK0, + EXTI_IRQS_BANK1, +}; + +static const uint32_t exti_romask[EXTI_NUM_REGISTER] = { + 0xff820000, /* 0b11111111_10000010_00000000_00000000 */ + 0x00000087, /* 0b00000000_00000000_00000000_10000111 */ +}; + +static unsigned regbank_index_by_irq(unsigned irq) +{ + return irq >= EXTI_MAX_IRQ_PER_BANK ? 1 : 0; +} + +static unsigned regbank_index_by_addr(hwaddr addr) +{ + return addr >= EXTI_IMR2 ? 1 : 0; +} + +static unsigned valid_mask(unsigned bank) +{ + return MAKE_64BIT_MASK(0, irqs_per_bank[bank]); +} + +static unsigned configurable_mask(unsigned bank) +{ + return valid_mask(bank) & ~exti_romask[bank]; +} + +static void stm32l4x5_exti_reset_hold(Object *obj) +{ + Stm32l4x5ExtiState *s = STM32L4X5_EXTI(obj); + + for (unsigned bank = 0; bank < EXTI_NUM_REGISTER; bank++) { + s->imr[bank] = exti_romask[bank]; + s->emr[bank] = 0x00000000; + s->rtsr[bank] = 0x00000000; + s->ftsr[bank] = 0x00000000; + s->swier[bank] = 0x00000000; + s->pr[bank] = 0x00000000; + } +} + +static void stm32l4x5_exti_set_irq(void *opaque, int irq, int level) +{ + Stm32l4x5ExtiState *s = opaque; + const unsigned bank = regbank_index_by_irq(irq); + const int oirq = irq; + + trace_stm32l4x5_exti_set_irq(irq, level); + + /* Shift the value to enable access in x2 registers. */ + irq %= EXTI_MAX_IRQ_PER_BANK; + + /* If the interrupt is masked, pr won't be raised */ + if (!extract32(s->imr[bank], irq, 1)) { + return; + } + + if (((1 << irq) & s->rtsr[bank]) && level) { + /* Rising Edge */ + s->pr[bank] |= 1 << irq; + qemu_irq_pulse(s->irq[oirq]); + } else if (((1 << irq) & s->ftsr[bank]) && !level) { + /* Falling Edge */ + s->pr[bank] |= 1 << irq; + qemu_irq_pulse(s->irq[oirq]); + } + /* + * In the following situations : + * - falling edge but rising trigger selected + * - rising edge but falling trigger selected + * - no trigger selected + * No action is required + */ +} + +static uint64_t stm32l4x5_exti_read(void *opaque, hwaddr addr, + unsigned int size) +{ + Stm32l4x5ExtiState *s = opaque; + uint32_t r = 0; + const unsigned bank = regbank_index_by_addr(addr); + + switch (addr) { + case EXTI_IMR1: + case EXTI_IMR2: + r = s->imr[bank]; + break; + case EXTI_EMR1: + case EXTI_EMR2: + r = s->emr[bank]; + break; + case EXTI_RTSR1: + case EXTI_RTSR2: + r = s->rtsr[bank]; + break; + case EXTI_FTSR1: + case EXTI_FTSR2: + r = s->ftsr[bank]; + break; + case EXTI_SWIER1: + case EXTI_SWIER2: + r = s->swier[bank]; + break; + case EXTI_PR1: + case EXTI_PR2: + r = s->pr[bank]; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "STM32L4X5_exti_read: Bad offset 0x%" HWADDR_PRIx "\n", + addr); + break; + } + + trace_stm32l4x5_exti_read(addr, r); + + return r; +} + +static void stm32l4x5_exti_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + Stm32l4x5ExtiState *s = opaque; + const unsigned bank = regbank_index_by_addr(addr); + + trace_stm32l4x5_exti_write(addr, val64); + + switch (addr) { + case EXTI_IMR1: + case EXTI_IMR2: + s->imr[bank] = val64 & valid_mask(bank); + return; + case EXTI_EMR1: + case EXTI_EMR2: + s->emr[bank] = val64 & valid_mask(bank); + return; + case EXTI_RTSR1: + case EXTI_RTSR2: + s->rtsr[bank] = val64 & configurable_mask(bank); + return; + case EXTI_FTSR1: + case EXTI_FTSR2: + s->ftsr[bank] = val64 & configurable_mask(bank); + return; + case EXTI_SWIER1: + case EXTI_SWIER2: { + const uint32_t set = val64 & configurable_mask(bank); + const uint32_t pend = set & ~s->swier[bank] & s->imr[bank] & + ~s->pr[bank]; + s->swier[bank] = set; + s->pr[bank] |= pend; + for (unsigned i = 0; i < irqs_per_bank[bank]; i++) { + if (extract32(pend, i, 1)) { + qemu_irq_pulse(s->irq[i + 32 * bank]); + } + } + return; + } + case EXTI_PR1: + case EXTI_PR2: { + const uint32_t cleared = s->pr[bank] & val64 & configurable_mask(bank); + /* This bit is cleared by writing a 1 to it */ + s->pr[bank] &= ~cleared; + /* Software triggered interrupts are cleared as well */ + s->swier[bank] &= ~cleared; + return; + } + default: + qemu_log_mask(LOG_GUEST_ERROR, + "STM32L4X5_exti_write: Bad offset 0x%" HWADDR_PRIx "\n", + addr); + } +} + +static const MemoryRegionOps stm32l4x5_exti_ops = { + .read = stm32l4x5_exti_read, + .write = stm32l4x5_exti_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .impl.unaligned = false, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static void stm32l4x5_exti_init(Object *obj) +{ + Stm32l4x5ExtiState *s = STM32L4X5_EXTI(obj); + + for (size_t i = 0; i < EXTI_NUM_INTERRUPT_OUT_LINES; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq[i]); + } + + memory_region_init_io(&s->mmio, obj, &stm32l4x5_exti_ops, s, + TYPE_STM32L4X5_EXTI, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + qdev_init_gpio_in(DEVICE(obj), stm32l4x5_exti_set_irq, + EXTI_NUM_GPIO_EVENT_IN_LINES); +} + +static const VMStateDescription vmstate_stm32l4x5_exti = { + .name = TYPE_STM32L4X5_EXTI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(imr, Stm32l4x5ExtiState, EXTI_NUM_REGISTER), + VMSTATE_UINT32_ARRAY(emr, Stm32l4x5ExtiState, EXTI_NUM_REGISTER), + VMSTATE_UINT32_ARRAY(rtsr, Stm32l4x5ExtiState, EXTI_NUM_REGISTER), + VMSTATE_UINT32_ARRAY(ftsr, Stm32l4x5ExtiState, EXTI_NUM_REGISTER), + VMSTATE_UINT32_ARRAY(swier, Stm32l4x5ExtiState, EXTI_NUM_REGISTER), + VMSTATE_UINT32_ARRAY(pr, Stm32l4x5ExtiState, EXTI_NUM_REGISTER), + VMSTATE_END_OF_LIST() + } +}; + +static void stm32l4x5_exti_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->vmsd = &vmstate_stm32l4x5_exti; + rc->phases.hold = stm32l4x5_exti_reset_hold; +} + +static const TypeInfo stm32l4x5_exti_types[] = { + { + .name = TYPE_STM32L4X5_EXTI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Stm32l4x5ExtiState), + .instance_init = stm32l4x5_exti_init, + .class_init = stm32l4x5_exti_class_init, + } +}; + +DEFINE_TYPES(stm32l4x5_exti_types) diff --git a/hw/misc/stm32l4x5_rcc.c b/hw/misc/stm32l4x5_rcc.c new file mode 100644 index 00000000000..ed2dbd9dc3f --- /dev/null +++ b/hw/misc/stm32l4x5_rcc.c @@ -0,0 +1,1462 @@ +/* + * STM32L4X5 RCC (Reset and clock control) + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * + * Inspired by the BCM2835 CPRMAN clock manager implementation by Luc Michel. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/misc/stm32l4x5_rcc.h" +#include "hw/misc/stm32l4x5_rcc_internals.h" +#include "hw/clock.h" +#include "hw/irq.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/registerfields.h" +#include "trace.h" + +#define HSE_DEFAULT_FRQ 48000000ULL +#define HSI_FRQ 16000000ULL +#define MSI_DEFAULT_FRQ 4000000ULL +#define LSE_FRQ 32768ULL +#define LSI_FRQ 32000ULL + +/* + * Function to simply acknowledge and propagate changes in a clock mux + * frequency. + * `bypass_source` allows to bypass the period of the current source and just + * consider it equal to 0. This is useful during the hold phase of reset. + */ +static void clock_mux_update(RccClockMuxState *mux, bool bypass_source) +{ + uint64_t src_freq; + Clock *current_source = mux->srcs[mux->src]; + uint32_t freq_multiplier = 0; + bool clk_changed = false; + + /* + * To avoid rounding errors, we use the clock period instead of the + * frequency. + * This means that the multiplier of the mux becomes the divider of + * the clock and the divider of the mux becomes the multiplier of the + * clock. + */ + if (!bypass_source && mux->enabled && mux->divider) { + freq_multiplier = mux->divider; + } + + clk_changed |= clock_set_mul_div(mux->out, freq_multiplier, mux->multiplier); + clk_changed |= clock_set(mux->out, clock_get(current_source)); + if (clk_changed) { + clock_propagate(mux->out); + } + + src_freq = clock_get_hz(current_source); + /* TODO: can we simply detect if the config changed so that we reduce log spam ? */ + trace_stm32l4x5_rcc_mux_update(mux->id, mux->src, src_freq, + mux->multiplier, mux->divider); +} + +static void clock_mux_src_update(void *opaque, ClockEvent event) +{ + RccClockMuxState **backref = opaque; + RccClockMuxState *s = *backref; + /* + * The backref value is equal to: + * s->backref + (sizeof(RccClockMuxState *) * update_src). + * By subtracting we can get back the index of the updated clock. + */ + const uint32_t update_src = backref - s->backref; + /* Only update if the clock that was updated is the current source */ + if (update_src == s->src) { + clock_mux_update(s, false); + } +} + +static void clock_mux_init(Object *obj) +{ + RccClockMuxState *s = RCC_CLOCK_MUX(obj); + size_t i; + + for (i = 0; i < RCC_NUM_CLOCK_MUX_SRC; i++) { + char *name = g_strdup_printf("srcs[%zu]", i); + s->backref[i] = s; + s->srcs[i] = qdev_init_clock_in(DEVICE(s), name, + clock_mux_src_update, + &s->backref[i], + ClockUpdate); + g_free(name); + } + + s->out = qdev_init_clock_out(DEVICE(s), "out"); +} + +static void clock_mux_reset_enter(Object *obj, ResetType type) +{ + RccClockMuxState *s = RCC_CLOCK_MUX(obj); + set_clock_mux_init_info(s, s->id); +} + +static void clock_mux_reset_hold(Object *obj) +{ + RccClockMuxState *s = RCC_CLOCK_MUX(obj); + clock_mux_update(s, true); +} + +static void clock_mux_reset_exit(Object *obj) +{ + RccClockMuxState *s = RCC_CLOCK_MUX(obj); + clock_mux_update(s, false); +} + +static const VMStateDescription clock_mux_vmstate = { + .name = TYPE_RCC_CLOCK_MUX, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(id, RccClockMuxState), + VMSTATE_ARRAY_CLOCK(srcs, RccClockMuxState, + RCC_NUM_CLOCK_MUX_SRC), + VMSTATE_BOOL(enabled, RccClockMuxState), + VMSTATE_UINT32(src, RccClockMuxState), + VMSTATE_UINT32(multiplier, RccClockMuxState), + VMSTATE_UINT32(divider, RccClockMuxState), + VMSTATE_END_OF_LIST() + } +}; + +static void clock_mux_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.enter = clock_mux_reset_enter; + rc->phases.hold = clock_mux_reset_hold; + rc->phases.exit = clock_mux_reset_exit; + dc->vmsd = &clock_mux_vmstate; +} + +static void clock_mux_set_enable(RccClockMuxState *mux, bool enabled) +{ + if (mux->enabled == enabled) { + return; + } + + if (enabled) { + trace_stm32l4x5_rcc_mux_enable(mux->id); + } else { + trace_stm32l4x5_rcc_mux_disable(mux->id); + } + + mux->enabled = enabled; + clock_mux_update(mux, false); +} + +static void clock_mux_set_factor(RccClockMuxState *mux, + uint32_t multiplier, uint32_t divider) +{ + if (mux->multiplier == multiplier && mux->divider == divider) { + return; + } + trace_stm32l4x5_rcc_mux_set_factor(mux->id, + mux->multiplier, multiplier, mux->divider, divider); + + mux->multiplier = multiplier; + mux->divider = divider; + clock_mux_update(mux, false); +} + +static void clock_mux_set_source(RccClockMuxState *mux, RccClockMuxSource src) +{ + if (mux->src == src) { + return; + } + + trace_stm32l4x5_rcc_mux_set_src(mux->id, mux->src, src); + mux->src = src; + clock_mux_update(mux, false); +} + +/* + * Acknowledge and propagate changes in a PLL frequency. + * `bypass_source` allows to bypass the period of the current source and just + * consider it equal to 0. This is useful during the hold phase of reset. + */ +static void pll_update(RccPllState *pll, bool bypass_source) +{ + uint64_t vco_freq, old_channel_freq, channel_freq; + int i; + + /* The common PLLM factor is handled by the PLL mux */ + vco_freq = muldiv64(clock_get_hz(pll->in), pll->vco_multiplier, 1); + + for (i = 0; i < RCC_NUM_CHANNEL_PLL_OUT; i++) { + if (!pll->channel_exists[i]) { + continue; + } + + old_channel_freq = clock_get_hz(pll->channels[i]); + if (bypass_source || + !pll->enabled || + !pll->channel_enabled[i] || + !pll->channel_divider[i]) { + channel_freq = 0; + } else { + channel_freq = muldiv64(vco_freq, + 1, + pll->channel_divider[i]); + } + + /* No change, early continue to avoid log spam and useless propagation */ + if (old_channel_freq == channel_freq) { + continue; + } + + clock_update_hz(pll->channels[i], channel_freq); + trace_stm32l4x5_rcc_pll_update(pll->id, i, vco_freq, + old_channel_freq, channel_freq); + } +} + +static void pll_src_update(void *opaque, ClockEvent event) +{ + RccPllState *s = opaque; + pll_update(s, false); +} + +static void pll_init(Object *obj) +{ + RccPllState *s = RCC_PLL(obj); + size_t i; + + s->in = qdev_init_clock_in(DEVICE(s), "in", + pll_src_update, s, ClockUpdate); + + const char *names[] = { + "out-p", "out-q", "out-r", + }; + + for (i = 0; i < RCC_NUM_CHANNEL_PLL_OUT; i++) { + s->channels[i] = qdev_init_clock_out(DEVICE(s), names[i]); + } +} + +static void pll_reset_enter(Object *obj, ResetType type) +{ + RccPllState *s = RCC_PLL(obj); + set_pll_init_info(s, s->id); +} + +static void pll_reset_hold(Object *obj) +{ + RccPllState *s = RCC_PLL(obj); + pll_update(s, true); +} + +static void pll_reset_exit(Object *obj) +{ + RccPllState *s = RCC_PLL(obj); + pll_update(s, false); +} + +static const VMStateDescription pll_vmstate = { + .name = TYPE_RCC_PLL, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(id, RccPllState), + VMSTATE_CLOCK(in, RccPllState), + VMSTATE_ARRAY_CLOCK(channels, RccPllState, + RCC_NUM_CHANNEL_PLL_OUT), + VMSTATE_BOOL(enabled, RccPllState), + VMSTATE_UINT32(vco_multiplier, RccPllState), + VMSTATE_BOOL_ARRAY(channel_enabled, RccPllState, RCC_NUM_CHANNEL_PLL_OUT), + VMSTATE_BOOL_ARRAY(channel_exists, RccPllState, RCC_NUM_CHANNEL_PLL_OUT), + VMSTATE_UINT32_ARRAY(channel_divider, RccPllState, RCC_NUM_CHANNEL_PLL_OUT), + VMSTATE_END_OF_LIST() + } +}; + +static void pll_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.enter = pll_reset_enter; + rc->phases.hold = pll_reset_hold; + rc->phases.exit = pll_reset_exit; + dc->vmsd = &pll_vmstate; +} + +static void pll_set_vco_multiplier(RccPllState *pll, uint32_t vco_multiplier) +{ + if (pll->vco_multiplier == vco_multiplier) { + return; + } + + if (vco_multiplier < 8 || vco_multiplier > 86) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: VCO multiplier is out of bound (%u) for PLL %u\n", + __func__, vco_multiplier, pll->id); + return; + } + + trace_stm32l4x5_rcc_pll_set_vco_multiplier(pll->id, + pll->vco_multiplier, vco_multiplier); + + pll->vco_multiplier = vco_multiplier; + pll_update(pll, false); +} + +static void pll_set_enable(RccPllState *pll, bool enabled) +{ + if (pll->enabled == enabled) { + return; + } + + pll->enabled = enabled; + pll_update(pll, false); +} + +static void pll_set_channel_enable(RccPllState *pll, + PllCommonChannels channel, + bool enabled) +{ + if (pll->channel_enabled[channel] == enabled) { + return; + } + + if (enabled) { + trace_stm32l4x5_rcc_pll_channel_enable(pll->id, channel); + } else { + trace_stm32l4x5_rcc_pll_channel_disable(pll->id, channel); + } + + pll->channel_enabled[channel] = enabled; + pll_update(pll, false); +} + +static void pll_set_channel_divider(RccPllState *pll, + PllCommonChannels channel, + uint32_t divider) +{ + if (pll->channel_divider[channel] == divider) { + return; + } + + trace_stm32l4x5_rcc_pll_set_channel_divider(pll->id, + channel, pll->channel_divider[channel], divider); + + pll->channel_divider[channel] = divider; + pll_update(pll, false); +} + +static void rcc_update_irq(Stm32l4x5RccState *s) +{ + /* + * TODO: Handle LSECSSF and CSSF flags when the CSS is implemented. + */ + if (s->cifr & CIFR_IRQ_MASK) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static void rcc_update_msi(Stm32l4x5RccState *s, uint32_t previous_value) +{ + uint32_t val; + + static const uint32_t msirange[] = { + 100000, 200000, 400000, 800000, 1000000, 2000000, + 4000000, 8000000, 16000000, 24000000, 32000000, 48000000 + }; + /* MSIRANGE and MSIRGSEL */ + val = extract32(s->cr, R_CR_MSIRGSEL_SHIFT, R_CR_MSIRGSEL_LENGTH); + if (val) { + /* MSIRGSEL is set, use the MSIRANGE field */ + val = extract32(s->cr, R_CR_MSIRANGE_SHIFT, R_CR_MSIRANGE_LENGTH); + } else { + /* MSIRGSEL is not set, use the MSISRANGE field */ + val = extract32(s->csr, R_CSR_MSISRANGE_SHIFT, R_CSR_MSISRANGE_LENGTH); + } + + if (val < ARRAY_SIZE(msirange)) { + clock_update_hz(s->msi_rc, msirange[val]); + } else { + /* + * There is a hardware write protection if the value is out of bound. + * Restore the previous value. + */ + s->cr = (s->cr & ~R_CSR_MSISRANGE_MASK) | + (previous_value & R_CSR_MSISRANGE_MASK); + } +} + +/* + * TODO: Add write-protection for all registers: + * DONE: CR + */ + +static void rcc_update_cr_register(Stm32l4x5RccState *s, uint32_t previous_value) +{ + int val; + const RccClockMuxSource current_pll_src = + CLOCK_MUX_INIT_INFO[RCC_CLOCK_MUX_PLL_INPUT].src_mapping[ + s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT].src]; + + /* PLLSAI2ON and update PLLSAI2RDY */ + val = FIELD_EX32(s->cr, CR, PLLSAI2ON); + pll_set_enable(&s->plls[RCC_PLL_PLLSAI2], val); + s->cr = (s->cr & ~R_CR_PLLSAI2RDY_MASK) | + (val << R_CR_PLLSAI2RDY_SHIFT); + if (s->cier & R_CIER_PLLSAI2RDYIE_MASK) { + s->cifr |= R_CIFR_PLLSAI2RDYF_MASK; + } + + /* PLLSAI1ON and update PLLSAI1RDY */ + val = FIELD_EX32(s->cr, CR, PLLSAI1ON); + pll_set_enable(&s->plls[RCC_PLL_PLLSAI1], val); + s->cr = (s->cr & ~R_CR_PLLSAI1RDY_MASK) | + (val << R_CR_PLLSAI1RDY_SHIFT); + if (s->cier & R_CIER_PLLSAI1RDYIE_MASK) { + s->cifr |= R_CIFR_PLLSAI1RDYF_MASK; + } + + /* + * PLLON and update PLLRDY + * PLLON cannot be reset if the PLL clock is used as the system clock. + */ + val = FIELD_EX32(s->cr, CR, PLLON); + if (FIELD_EX32(s->cfgr, CFGR, SWS) != 0b11) { + pll_set_enable(&s->plls[RCC_PLL_PLL], val); + s->cr = (s->cr & ~R_CR_PLLRDY_MASK) | + (val << R_CR_PLLRDY_SHIFT); + if (s->cier & R_CIER_PLLRDYIE_MASK) { + s->cifr |= R_CIFR_PLLRDYF_MASK; + } + } else { + s->cr |= R_CR_PLLON_MASK; + } + + /* CSSON: TODO */ + /* HSEBYP: TODO */ + + /* + * HSEON and update HSERDY. + * HSEON cannot be reset if the HSE oscillator is used directly or + * indirectly as the system clock. + */ + val = FIELD_EX32(s->cr, CR, HSEON); + if (FIELD_EX32(s->cfgr, CFGR, SWS) != 0b10 && + current_pll_src != RCC_CLOCK_MUX_SRC_HSE) { + s->cr = (s->cr & ~R_CR_HSERDY_MASK) | + (val << R_CR_HSERDY_SHIFT); + if (val) { + clock_update_hz(s->hse, s->hse_frequency); + if (s->cier & R_CIER_HSERDYIE_MASK) { + s->cifr |= R_CIFR_HSERDYF_MASK; + } + } else { + clock_update(s->hse, 0); + } + } else { + s->cr |= R_CR_HSEON_MASK; + } + + /* HSIAFS: TODO*/ + /* HSIKERON: TODO*/ + + /* + * HSION and update HSIRDY + * HSION is set by hardware if the HSI16 is used directly + * or indirectly as system clock. + */ + if (FIELD_EX32(s->cfgr, CFGR, SWS) == 0b01 || + current_pll_src == RCC_CLOCK_MUX_SRC_HSI) { + s->cr |= (R_CR_HSION_MASK | R_CR_HSIRDY_MASK); + clock_update_hz(s->hsi16_rc, HSI_FRQ); + if (s->cier & R_CIER_HSIRDYIE_MASK) { + s->cifr |= R_CIFR_HSIRDYF_MASK; + } + } else { + val = FIELD_EX32(s->cr, CR, HSION); + if (val) { + clock_update_hz(s->hsi16_rc, HSI_FRQ); + s->cr |= R_CR_HSIRDY_MASK; + if (s->cier & R_CIER_HSIRDYIE_MASK) { + s->cifr |= R_CIFR_HSIRDYF_MASK; + } + } else { + clock_update(s->hsi16_rc, 0); + s->cr &= ~R_CR_HSIRDY_MASK; + } + } + + /* MSIPLLEN: TODO */ + + /* + * MSION and update MSIRDY + * Set by hardware when used directly or indirectly as system clock. + */ + if (FIELD_EX32(s->cfgr, CFGR, SWS) == 0b00 || + current_pll_src == RCC_CLOCK_MUX_SRC_MSI) { + s->cr |= (R_CR_MSION_MASK | R_CR_MSIRDY_MASK); + if (!(previous_value & R_CR_MSION_MASK) && (s->cier & R_CIER_MSIRDYIE_MASK)) { + s->cifr |= R_CIFR_MSIRDYF_MASK; + } + rcc_update_msi(s, previous_value); + } else { + val = FIELD_EX32(s->cr, CR, MSION); + if (val) { + s->cr |= R_CR_MSIRDY_MASK; + rcc_update_msi(s, previous_value); + if (s->cier & R_CIER_MSIRDYIE_MASK) { + s->cifr |= R_CIFR_MSIRDYF_MASK; + } + } else { + s->cr &= ~R_CR_MSIRDY_MASK; + clock_update(s->msi_rc, 0); + } + } + rcc_update_irq(s); +} + +static void rcc_update_cfgr_register(Stm32l4x5RccState *s) +{ + uint32_t val; + /* MCOPRE */ + val = FIELD_EX32(s->cfgr, CFGR, MCOPRE); + assert(val <= 0b100); + clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_MCO], + 1, 1 << val); + + /* MCOSEL */ + val = FIELD_EX32(s->cfgr, CFGR, MCOSEL); + assert(val <= 0b111); + if (val == 0) { + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false); + } else { + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], true); + clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_MCO], + val - 1); + } + + /* STOPWUCK */ + /* TODO */ + + /* PPRE2 */ + val = FIELD_EX32(s->cfgr, CFGR, PPRE2); + if (val < 0b100) { + clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_PCLK2], + 1, 1); + } else { + clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_PCLK2], + 1, 1 << (val - 0b11)); + } + + /* PPRE1 */ + val = FIELD_EX32(s->cfgr, CFGR, PPRE1); + if (val < 0b100) { + clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_PCLK1], + 1, 1); + } else { + clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_PCLK1], + 1, 1 << (val - 0b11)); + } + + /* HPRE */ + val = FIELD_EX32(s->cfgr, CFGR, HPRE); + if (val < 0b1000) { + clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_HCLK], + 1, 1); + } else { + clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_HCLK], + 1, 1 << (val - 0b111)); + } + + /* Update SWS */ + val = FIELD_EX32(s->cfgr, CFGR, SW); + clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_SYSCLK], + val); + s->cfgr &= ~R_CFGR_SWS_MASK; + s->cfgr |= val << R_CFGR_SWS_SHIFT; +} + +static void rcc_update_ahb1enr(Stm32l4x5RccState *s) +{ + #define AHB1ENR_SET_ENABLE(_peripheral_name) \ + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \ + FIELD_EX32(s->ahb1enr, AHB1ENR, _peripheral_name##EN)) + + /* DMA2DEN: reserved for STM32L475xx */ + AHB1ENR_SET_ENABLE(TSC); + AHB1ENR_SET_ENABLE(CRC); + AHB1ENR_SET_ENABLE(FLASH); + AHB1ENR_SET_ENABLE(DMA2); + AHB1ENR_SET_ENABLE(DMA1); + + #undef AHB1ENR_SET_ENABLE +} + +static void rcc_update_ahb2enr(Stm32l4x5RccState *s) +{ + #define AHB2ENR_SET_ENABLE(_peripheral_name) \ + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \ + FIELD_EX32(s->ahb2enr, AHB2ENR, _peripheral_name##EN)) + + AHB2ENR_SET_ENABLE(RNG); + /* HASHEN: reserved for STM32L475xx */ + AHB2ENR_SET_ENABLE(AES); + /* DCMIEN: reserved for STM32L475xx */ + AHB2ENR_SET_ENABLE(ADC); + AHB2ENR_SET_ENABLE(OTGFS); + /* GPIOIEN: reserved for STM32L475xx */ + AHB2ENR_SET_ENABLE(GPIOA); + AHB2ENR_SET_ENABLE(GPIOB); + AHB2ENR_SET_ENABLE(GPIOC); + AHB2ENR_SET_ENABLE(GPIOD); + AHB2ENR_SET_ENABLE(GPIOE); + AHB2ENR_SET_ENABLE(GPIOF); + AHB2ENR_SET_ENABLE(GPIOG); + AHB2ENR_SET_ENABLE(GPIOH); + + #undef AHB2ENR_SET_ENABLE +} + +static void rcc_update_ahb3enr(Stm32l4x5RccState *s) +{ + #define AHB3ENR_SET_ENABLE(_peripheral_name) \ + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \ + FIELD_EX32(s->ahb3enr, AHB3ENR, _peripheral_name##EN)) + + AHB3ENR_SET_ENABLE(QSPI); + AHB3ENR_SET_ENABLE(FMC); + + #undef AHB3ENR_SET_ENABLE +} + +static void rcc_update_apb1enr(Stm32l4x5RccState *s) +{ + #define APB1ENR1_SET_ENABLE(_peripheral_name) \ + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \ + FIELD_EX32(s->apb1enr1, APB1ENR1, _peripheral_name##EN)) + #define APB1ENR2_SET_ENABLE(_peripheral_name) \ + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \ + FIELD_EX32(s->apb1enr2, APB1ENR2, _peripheral_name##EN)) + + /* APB1ENR1 */ + APB1ENR1_SET_ENABLE(LPTIM1); + APB1ENR1_SET_ENABLE(OPAMP); + APB1ENR1_SET_ENABLE(DAC1); + APB1ENR1_SET_ENABLE(PWR); + /* CAN2: reserved for STM32L4x5 */ + APB1ENR1_SET_ENABLE(CAN1); + /* CRSEN: reserved for STM32L4x5 */ + APB1ENR1_SET_ENABLE(I2C3); + APB1ENR1_SET_ENABLE(I2C2); + APB1ENR1_SET_ENABLE(I2C1); + APB1ENR1_SET_ENABLE(UART5); + APB1ENR1_SET_ENABLE(UART4); + APB1ENR1_SET_ENABLE(USART3); + APB1ENR1_SET_ENABLE(USART2); + APB1ENR1_SET_ENABLE(SPI3); + APB1ENR1_SET_ENABLE(SPI2); + APB1ENR1_SET_ENABLE(WWDG); + /* RTCAPB: reserved for STM32L4x5 */ + APB1ENR1_SET_ENABLE(LCD); + APB1ENR1_SET_ENABLE(TIM7); + APB1ENR1_SET_ENABLE(TIM6); + APB1ENR1_SET_ENABLE(TIM5); + APB1ENR1_SET_ENABLE(TIM4); + APB1ENR1_SET_ENABLE(TIM3); + APB1ENR1_SET_ENABLE(TIM2); + + /* APB1ENR2 */ + APB1ENR2_SET_ENABLE(LPTIM2); + APB1ENR2_SET_ENABLE(SWPMI1); + /* I2C4EN: reserved for STM32L4x5 */ + APB1ENR2_SET_ENABLE(LPUART1); + + #undef APB1ENR1_SET_ENABLE + #undef APB1ENR2_SET_ENABLE +} + +static void rcc_update_apb2enr(Stm32l4x5RccState *s) +{ + #define APB2ENR_SET_ENABLE(_peripheral_name) \ + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \ + FIELD_EX32(s->apb2enr, APB2ENR, _peripheral_name##EN)) + + APB2ENR_SET_ENABLE(DFSDM1); + APB2ENR_SET_ENABLE(SAI2); + APB2ENR_SET_ENABLE(SAI1); + APB2ENR_SET_ENABLE(TIM17); + APB2ENR_SET_ENABLE(TIM16); + APB2ENR_SET_ENABLE(TIM15); + APB2ENR_SET_ENABLE(USART1); + APB2ENR_SET_ENABLE(TIM8); + APB2ENR_SET_ENABLE(SPI1); + APB2ENR_SET_ENABLE(TIM1); + APB2ENR_SET_ENABLE(SDMMC1); + APB2ENR_SET_ENABLE(FW); + APB2ENR_SET_ENABLE(SYSCFG); + + #undef APB2ENR_SET_ENABLE +} + +/* + * The 3 PLLs share the same register layout + * so we can use the same function for all of them + * Note: no frequency bounds checking is done here. + */ +static void rcc_update_pllsaixcfgr(Stm32l4x5RccState *s, RccPll pll_id) +{ + uint32_t reg, val; + switch (pll_id) { + case RCC_PLL_PLL: + reg = s->pllcfgr; + break; + case RCC_PLL_PLLSAI1: + reg = s->pllsai1cfgr; + break; + case RCC_PLL_PLLSAI2: + reg = s->pllsai2cfgr; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid PLL ID: %u\n", __func__, pll_id); + return; + } + + /* PLLPDIV */ + val = FIELD_EX32(reg, PLLCFGR, PLLPDIV); + /* 1 is a reserved value */ + if (val == 0) { + /* Get PLLP value */ + val = FIELD_EX32(reg, PLLCFGR, PLLP); + pll_set_channel_divider(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_P, + (val ? 17 : 7)); + } else if (val > 1) { + pll_set_channel_divider(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_P, + val); + } + + + /* PLLR */ + val = FIELD_EX32(reg, PLLCFGR, PLLR); + pll_set_channel_divider(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_R, + 2 * (val + 1)); + + /* PLLREN */ + val = FIELD_EX32(reg, PLLCFGR, PLLREN); + pll_set_channel_enable(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_R, val); + + /* PLLQ */ + val = FIELD_EX32(reg, PLLCFGR, PLLQ); + pll_set_channel_divider(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_Q, + 2 * (val + 1)); + + /* PLLQEN */ + val = FIELD_EX32(reg, PLLCFGR, PLLQEN); + pll_set_channel_enable(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_Q, val); + + /* PLLPEN */ + val = FIELD_EX32(reg, PLLCFGR, PLLPEN); + pll_set_channel_enable(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_P, val); + + /* PLLN */ + val = FIELD_EX32(reg, PLLCFGR, PLLN); + pll_set_vco_multiplier(&s->plls[pll_id], val); +} + +static void rcc_update_pllcfgr(Stm32l4x5RccState *s) +{ + int val; + + /* Use common layout */ + rcc_update_pllsaixcfgr(s, RCC_PLL_PLL); + + /* Fetch specific fields for pllcfgr */ + + /* PLLM */ + val = FIELD_EX32(s->pllcfgr, PLLCFGR, PLLM); + clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT], 1, (val + 1)); + + /* PLLSRC */ + val = FIELD_EX32(s->pllcfgr, PLLCFGR, PLLSRC); + if (val == 0) { + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT], false); + } else { + clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT], val - 1); + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT], true); + } +} + +static void rcc_update_ccipr(Stm32l4x5RccState *s) +{ + #define CCIPR_SET_SOURCE(_peripheral_name) \ + clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \ + FIELD_EX32(s->ccipr, CCIPR, _peripheral_name##SEL)) + + CCIPR_SET_SOURCE(DFSDM1); + CCIPR_SET_SOURCE(SWPMI1); + CCIPR_SET_SOURCE(ADC); + CCIPR_SET_SOURCE(CLK48); + CCIPR_SET_SOURCE(SAI2); + CCIPR_SET_SOURCE(SAI1); + CCIPR_SET_SOURCE(LPTIM2); + CCIPR_SET_SOURCE(LPTIM1); + CCIPR_SET_SOURCE(I2C3); + CCIPR_SET_SOURCE(I2C2); + CCIPR_SET_SOURCE(I2C1); + CCIPR_SET_SOURCE(LPUART1); + CCIPR_SET_SOURCE(UART5); + CCIPR_SET_SOURCE(UART4); + CCIPR_SET_SOURCE(USART3); + CCIPR_SET_SOURCE(USART2); + CCIPR_SET_SOURCE(USART1); + + #undef CCIPR_SET_SOURCE +} + +static void rcc_update_bdcr(Stm32l4x5RccState *s) +{ + int val; + + /* LSCOSEL */ + val = FIELD_EX32(s->bdcr, BDCR, LSCOSEL); + clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_LSCO], val); + + val = FIELD_EX32(s->bdcr, BDCR, LSCOEN); + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_LSCO], val); + + /* BDRST */ + /* + * The documentation is not clear if the RTCEN flag disables the RTC and + * the LCD common mux or if it only affects the RTC. + * As the LCDEN flag exists, we assume here that it only affects the RTC. + */ + val = FIELD_EX32(s->bdcr, BDCR, RTCEN); + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_RTC], val); + /* LCD and RTC share the same clock */ + val = FIELD_EX32(s->bdcr, BDCR, RTCSEL); + clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_LCD_AND_RTC_COMMON], val); + + /* LSECSSON */ + /* LSEDRV[1:0] */ + /* LSEBYP */ + + /* LSEON: Update LSERDY at the same time */ + val = FIELD_EX32(s->bdcr, BDCR, LSEON); + if (val) { + clock_update_hz(s->lse_crystal, LSE_FRQ); + s->bdcr |= R_BDCR_LSERDY_MASK; + if (s->cier & R_CIER_LSERDYIE_MASK) { + s->cifr |= R_CIFR_LSERDYF_MASK; + } + } else { + clock_update(s->lse_crystal, 0); + s->bdcr &= ~R_BDCR_LSERDY_MASK; + } + + rcc_update_irq(s); +} + +static void rcc_update_csr(Stm32l4x5RccState *s) +{ + int val; + + /* Reset flags: Not implemented */ + /* MSISRANGE: Not implemented after reset */ + + /* LSION: Update LSIRDY at the same time */ + val = FIELD_EX32(s->csr, CSR, LSION); + if (val) { + clock_update_hz(s->lsi_rc, LSI_FRQ); + s->csr |= R_CSR_LSIRDY_MASK; + if (s->cier & R_CIER_LSIRDYIE_MASK) { + s->cifr |= R_CIFR_LSIRDYF_MASK; + } + } else { + /* + * TODO: Handle when the LSI is set independently of LSION. + * E.g. when the LSI is set by the RTC. + * See the reference manual for more details. + */ + clock_update(s->lsi_rc, 0); + s->csr &= ~R_CSR_LSIRDY_MASK; + } + + rcc_update_irq(s); +} + +static void stm32l4x5_rcc_reset_hold(Object *obj) +{ + Stm32l4x5RccState *s = STM32L4X5_RCC(obj); + s->cr = 0x00000063; + /* + * Factory-programmed calibration data + * From the reference manual: 0x10XX 00XX + * Value taken from a real card. + */ + s->icscr = 0x106E0082; + s->cfgr = 0x0; + s->pllcfgr = 0x00001000; + s->pllsai1cfgr = 0x00001000; + s->pllsai2cfgr = 0x00001000; + s->cier = 0x0; + s->cifr = 0x0; + s->ahb1rstr = 0x0; + s->ahb2rstr = 0x0; + s->ahb3rstr = 0x0; + s->apb1rstr1 = 0x0; + s->apb1rstr2 = 0x0; + s->apb2rstr = 0x0; + s->ahb1enr = 0x00000100; + s->ahb2enr = 0x0; + s->ahb3enr = 0x0; + s->apb1enr1 = 0x0; + s->apb1enr2 = 0x0; + s->apb2enr = 0x0; + s->ahb1smenr = 0x00011303; + s->ahb2smenr = 0x000532FF; + s->ahb3smenr = 0x00000101; + s->apb1smenr1 = 0xF2FECA3F; + s->apb1smenr2 = 0x00000025; + s->apb2smenr = 0x01677C01; + s->ccipr = 0x0; + s->bdcr = 0x0; + s->csr = 0x0C000600; +} + +static uint64_t stm32l4x5_rcc_read(void *opaque, hwaddr addr, + unsigned int size) +{ + Stm32l4x5RccState *s = opaque; + uint64_t retvalue = 0; + + switch (addr) { + case A_CR: + retvalue = s->cr; + break; + case A_ICSCR: + retvalue = s->icscr; + break; + case A_CFGR: + retvalue = s->cfgr; + break; + case A_PLLCFGR: + retvalue = s->pllcfgr; + break; + case A_PLLSAI1CFGR: + retvalue = s->pllsai1cfgr; + break; + case A_PLLSAI2CFGR: + retvalue = s->pllsai2cfgr; + break; + case A_CIER: + retvalue = s->cier; + break; + case A_CIFR: + retvalue = s->cifr; + break; + case A_CICR: + /* CICR is write only, return the reset value = 0 */ + break; + case A_AHB1RSTR: + retvalue = s->ahb1rstr; + break; + case A_AHB2RSTR: + retvalue = s->ahb2rstr; + break; + case A_AHB3RSTR: + retvalue = s->ahb3rstr; + break; + case A_APB1RSTR1: + retvalue = s->apb1rstr1; + break; + case A_APB1RSTR2: + retvalue = s->apb1rstr2; + break; + case A_APB2RSTR: + retvalue = s->apb2rstr; + break; + case A_AHB1ENR: + retvalue = s->ahb1enr; + break; + case A_AHB2ENR: + retvalue = s->ahb2enr; + break; + case A_AHB3ENR: + retvalue = s->ahb3enr; + break; + case A_APB1ENR1: + retvalue = s->apb1enr1; + break; + case A_APB1ENR2: + retvalue = s->apb1enr2; + break; + case A_APB2ENR: + retvalue = s->apb2enr; + break; + case A_AHB1SMENR: + retvalue = s->ahb1smenr; + break; + case A_AHB2SMENR: + retvalue = s->ahb2smenr; + break; + case A_AHB3SMENR: + retvalue = s->ahb3smenr; + break; + case A_APB1SMENR1: + retvalue = s->apb1smenr1; + break; + case A_APB1SMENR2: + retvalue = s->apb1smenr2; + break; + case A_APB2SMENR: + retvalue = s->apb2smenr; + break; + case A_CCIPR: + retvalue = s->ccipr; + break; + case A_BDCR: + retvalue = s->bdcr; + break; + case A_CSR: + retvalue = s->csr; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + break; + } + + trace_stm32l4x5_rcc_read(addr, retvalue); + + return retvalue; +} + +static void stm32l4x5_rcc_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + Stm32l4x5RccState *s = opaque; + uint32_t previous_value = 0; + const uint32_t value = val64; + + trace_stm32l4x5_rcc_write(addr, value); + + switch (addr) { + case A_CR: + previous_value = s->cr; + s->cr = (s->cr & CR_READ_SET_MASK) | + (value & (CR_READ_SET_MASK | ~CR_READ_ONLY_MASK)); + rcc_update_cr_register(s, previous_value); + break; + case A_ICSCR: + s->icscr = value & ~ICSCR_READ_ONLY_MASK; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for ICSCR\n", __func__); + break; + case A_CFGR: + s->cfgr = value & ~CFGR_READ_ONLY_MASK; + rcc_update_cfgr_register(s); + break; + case A_PLLCFGR: + s->pllcfgr = value; + rcc_update_pllcfgr(s); + break; + case A_PLLSAI1CFGR: + s->pllsai1cfgr = value; + rcc_update_pllsaixcfgr(s, RCC_PLL_PLLSAI1); + break; + case A_PLLSAI2CFGR: + s->pllsai2cfgr = value; + rcc_update_pllsaixcfgr(s, RCC_PLL_PLLSAI2); + break; + case A_CIER: + s->cier = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for CIER\n", __func__); + break; + case A_CIFR: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write attempt into read-only register (CIFR) 0x%"PRIx32"\n", + __func__, value); + break; + case A_CICR: + /* Clear interrupt flags by writing a 1 to the CICR register */ + s->cifr &= ~value; + rcc_update_irq(s); + break; + /* Reset behaviors are not implemented */ + case A_AHB1RSTR: + s->ahb1rstr = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for AHB1RSTR\n", __func__); + break; + case A_AHB2RSTR: + s->ahb2rstr = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for AHB2RSTR\n", __func__); + break; + case A_AHB3RSTR: + s->ahb3rstr = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for AHB3RSTR\n", __func__); + break; + case A_APB1RSTR1: + s->apb1rstr1 = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for APB1RSTR1\n", __func__); + break; + case A_APB1RSTR2: + s->apb1rstr2 = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for APB1RSTR2\n", __func__); + break; + case A_APB2RSTR: + s->apb2rstr = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for APB2RSTR\n", __func__); + break; + case A_AHB1ENR: + s->ahb1enr = value; + rcc_update_ahb1enr(s); + break; + case A_AHB2ENR: + s->ahb2enr = value; + rcc_update_ahb2enr(s); + break; + case A_AHB3ENR: + s->ahb3enr = value; + rcc_update_ahb3enr(s); + break; + case A_APB1ENR1: + s->apb1enr1 = value; + rcc_update_apb1enr(s); + break; + case A_APB1ENR2: + s->apb1enr2 = value; + rcc_update_apb1enr(s); + break; + case A_APB2ENR: + s->apb2enr = (s->apb2enr & APB2ENR_READ_SET_MASK) | value; + rcc_update_apb2enr(s); + break; + /* Behaviors for Sleep and Stop modes are not implemented */ + case A_AHB1SMENR: + s->ahb1smenr = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for AHB1SMENR\n", __func__); + break; + case A_AHB2SMENR: + s->ahb2smenr = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for AHB2SMENR\n", __func__); + break; + case A_AHB3SMENR: + s->ahb3smenr = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for AHB3SMENR\n", __func__); + break; + case A_APB1SMENR1: + s->apb1smenr1 = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for APB1SMENR1\n", __func__); + break; + case A_APB1SMENR2: + s->apb1smenr2 = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for APB1SMENR2\n", __func__); + break; + case A_APB2SMENR: + s->apb2smenr = value; + qemu_log_mask(LOG_UNIMP, + "%s: Side-effects not implemented for APB2SMENR\n", __func__); + break; + case A_CCIPR: + s->ccipr = value; + rcc_update_ccipr(s); + break; + case A_BDCR: + s->bdcr = value & ~BDCR_READ_ONLY_MASK; + rcc_update_bdcr(s); + break; + case A_CSR: + s->csr = value & ~CSR_READ_ONLY_MASK; + rcc_update_csr(s); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + } +} + +static const MemoryRegionOps stm32l4x5_rcc_ops = { + .read = stm32l4x5_rcc_read, + .write = stm32l4x5_rcc_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .max_access_size = 4, + .min_access_size = 4, + .unaligned = false + }, + .impl = { + .max_access_size = 4, + .min_access_size = 4, + .unaligned = false + }, +}; + +static const ClockPortInitArray stm32l4x5_rcc_clocks = { + QDEV_CLOCK_IN(Stm32l4x5RccState, hsi16_rc, NULL, 0), + QDEV_CLOCK_IN(Stm32l4x5RccState, msi_rc, NULL, 0), + QDEV_CLOCK_IN(Stm32l4x5RccState, hse, NULL, 0), + QDEV_CLOCK_IN(Stm32l4x5RccState, lsi_rc, NULL, 0), + QDEV_CLOCK_IN(Stm32l4x5RccState, lse_crystal, NULL, 0), + QDEV_CLOCK_IN(Stm32l4x5RccState, sai1_extclk, NULL, 0), + QDEV_CLOCK_IN(Stm32l4x5RccState, sai2_extclk, NULL, 0), + QDEV_CLOCK_END +}; + + +static void stm32l4x5_rcc_init(Object *obj) +{ + Stm32l4x5RccState *s = STM32L4X5_RCC(obj); + size_t i; + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, &stm32l4x5_rcc_ops, s, + TYPE_STM32L4X5_RCC, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + qdev_init_clocks(DEVICE(s), stm32l4x5_rcc_clocks); + + for (i = 0; i < RCC_NUM_PLL; i++) { + object_initialize_child(obj, PLL_INIT_INFO[i].name, + &s->plls[i], TYPE_RCC_PLL); + set_pll_init_info(&s->plls[i], i); + } + + for (i = 0; i < RCC_NUM_CLOCK_MUX; i++) { + char *alias; + + object_initialize_child(obj, CLOCK_MUX_INIT_INFO[i].name, + &s->clock_muxes[i], + TYPE_RCC_CLOCK_MUX); + set_clock_mux_init_info(&s->clock_muxes[i], i); + + if (!CLOCK_MUX_INIT_INFO[i].hidden) { + /* Expose muxes output as RCC outputs */ + alias = g_strdup_printf("%s-out", CLOCK_MUX_INIT_INFO[i].name); + qdev_alias_clock(DEVICE(&s->clock_muxes[i]), "out", DEVICE(obj), alias); + g_free(alias); + } + } + + s->gnd = clock_new(obj, "gnd"); +} + +static void connect_mux_sources(Stm32l4x5RccState *s, + RccClockMuxState *mux, + const RccClockMuxSource *clk_mapping) +{ + size_t i; + + Clock * const CLK_SRC_MAPPING[] = { + [RCC_CLOCK_MUX_SRC_GND] = s->gnd, + [RCC_CLOCK_MUX_SRC_HSI] = s->hsi16_rc, + [RCC_CLOCK_MUX_SRC_HSE] = s->hse, + [RCC_CLOCK_MUX_SRC_MSI] = s->msi_rc, + [RCC_CLOCK_MUX_SRC_LSI] = s->lsi_rc, + [RCC_CLOCK_MUX_SRC_LSE] = s->lse_crystal, + [RCC_CLOCK_MUX_SRC_SAI1_EXTCLK] = s->sai1_extclk, + [RCC_CLOCK_MUX_SRC_SAI2_EXTCLK] = s->sai2_extclk, + [RCC_CLOCK_MUX_SRC_PLL] = + s->plls[RCC_PLL_PLL].channels[RCC_PLL_CHANNEL_PLLCLK], + [RCC_CLOCK_MUX_SRC_PLLSAI1] = + s->plls[RCC_PLL_PLLSAI1].channels[RCC_PLLSAI1_CHANNEL_PLLSAI1CLK], + [RCC_CLOCK_MUX_SRC_PLLSAI2] = + s->plls[RCC_PLL_PLLSAI2].channels[RCC_PLLSAI2_CHANNEL_PLLSAI2CLK], + [RCC_CLOCK_MUX_SRC_PLLSAI3] = + s->plls[RCC_PLL_PLL].channels[RCC_PLL_CHANNEL_PLLSAI3CLK], + [RCC_CLOCK_MUX_SRC_PLL48M1] = + s->plls[RCC_PLL_PLL].channels[RCC_PLL_CHANNEL_PLL48M1CLK], + [RCC_CLOCK_MUX_SRC_PLL48M2] = + s->plls[RCC_PLL_PLLSAI1].channels[RCC_PLLSAI1_CHANNEL_PLL48M2CLK], + [RCC_CLOCK_MUX_SRC_PLLADC1] = + s->plls[RCC_PLL_PLLSAI1].channels[RCC_PLLSAI1_CHANNEL_PLLADC1CLK], + [RCC_CLOCK_MUX_SRC_PLLADC2] = + s->plls[RCC_PLL_PLLSAI2] .channels[RCC_PLLSAI2_CHANNEL_PLLADC2CLK], + [RCC_CLOCK_MUX_SRC_SYSCLK] = s->clock_muxes[RCC_CLOCK_MUX_SYSCLK].out, + [RCC_CLOCK_MUX_SRC_HCLK] = s->clock_muxes[RCC_CLOCK_MUX_HCLK].out, + [RCC_CLOCK_MUX_SRC_PCLK1] = s->clock_muxes[RCC_CLOCK_MUX_PCLK1].out, + [RCC_CLOCK_MUX_SRC_PCLK2] = s->clock_muxes[RCC_CLOCK_MUX_PCLK2].out, + [RCC_CLOCK_MUX_SRC_HSE_OVER_32] = s->clock_muxes[RCC_CLOCK_MUX_HSE_OVER_32].out, + [RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON] = + s->clock_muxes[RCC_CLOCK_MUX_LCD_AND_RTC_COMMON].out, + }; + + assert(ARRAY_SIZE(CLK_SRC_MAPPING) == RCC_CLOCK_MUX_SRC_NUMBER); + + for (i = 0; i < RCC_NUM_CLOCK_MUX_SRC; i++) { + RccClockMuxSource mapping = clk_mapping[i]; + clock_set_source(mux->srcs[i], CLK_SRC_MAPPING[mapping]); + } +} + + +static const VMStateDescription vmstate_stm32l4x5_rcc = { + .name = TYPE_STM32L4X5_RCC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cr, Stm32l4x5RccState), + VMSTATE_UINT32(icscr, Stm32l4x5RccState), + VMSTATE_UINT32(cfgr, Stm32l4x5RccState), + VMSTATE_UINT32(pllcfgr, Stm32l4x5RccState), + VMSTATE_UINT32(pllsai1cfgr, Stm32l4x5RccState), + VMSTATE_UINT32(pllsai2cfgr, Stm32l4x5RccState), + VMSTATE_UINT32(cier, Stm32l4x5RccState), + VMSTATE_UINT32(cifr, Stm32l4x5RccState), + VMSTATE_UINT32(ahb1rstr, Stm32l4x5RccState), + VMSTATE_UINT32(ahb2rstr, Stm32l4x5RccState), + VMSTATE_UINT32(ahb3rstr, Stm32l4x5RccState), + VMSTATE_UINT32(apb1rstr1, Stm32l4x5RccState), + VMSTATE_UINT32(apb1rstr2, Stm32l4x5RccState), + VMSTATE_UINT32(apb2rstr, Stm32l4x5RccState), + VMSTATE_UINT32(ahb1enr, Stm32l4x5RccState), + VMSTATE_UINT32(ahb2enr, Stm32l4x5RccState), + VMSTATE_UINT32(ahb3enr, Stm32l4x5RccState), + VMSTATE_UINT32(apb1enr1, Stm32l4x5RccState), + VMSTATE_UINT32(apb1enr2, Stm32l4x5RccState), + VMSTATE_UINT32(apb2enr, Stm32l4x5RccState), + VMSTATE_UINT32(ahb1smenr, Stm32l4x5RccState), + VMSTATE_UINT32(ahb2smenr, Stm32l4x5RccState), + VMSTATE_UINT32(ahb3smenr, Stm32l4x5RccState), + VMSTATE_UINT32(apb1smenr1, Stm32l4x5RccState), + VMSTATE_UINT32(apb1smenr2, Stm32l4x5RccState), + VMSTATE_UINT32(apb2smenr, Stm32l4x5RccState), + VMSTATE_UINT32(ccipr, Stm32l4x5RccState), + VMSTATE_UINT32(bdcr, Stm32l4x5RccState), + VMSTATE_UINT32(csr, Stm32l4x5RccState), + VMSTATE_CLOCK(hsi16_rc, Stm32l4x5RccState), + VMSTATE_CLOCK(msi_rc, Stm32l4x5RccState), + VMSTATE_CLOCK(hse, Stm32l4x5RccState), + VMSTATE_CLOCK(lsi_rc, Stm32l4x5RccState), + VMSTATE_CLOCK(lse_crystal, Stm32l4x5RccState), + VMSTATE_CLOCK(sai1_extclk, Stm32l4x5RccState), + VMSTATE_CLOCK(sai2_extclk, Stm32l4x5RccState), + VMSTATE_END_OF_LIST() + } +}; + + +static void stm32l4x5_rcc_realize(DeviceState *dev, Error **errp) +{ + Stm32l4x5RccState *s = STM32L4X5_RCC(dev); + size_t i; + + if (s->hse_frequency < 4000000ULL || + s->hse_frequency > 48000000ULL) { + error_setg(errp, + "HSE frequency is outside of the allowed [4-48]Mhz range: %" PRIx64 "", + s->hse_frequency); + return; + } + + for (i = 0; i < RCC_NUM_PLL; i++) { + RccPllState *pll = &s->plls[i]; + + clock_set_source(pll->in, s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT].out); + + if (!qdev_realize(DEVICE(pll), NULL, errp)) { + return; + } + } + + for (i = 0; i < RCC_NUM_CLOCK_MUX; i++) { + RccClockMuxState *clock_mux = &s->clock_muxes[i]; + + connect_mux_sources(s, clock_mux, CLOCK_MUX_INIT_INFO[i].src_mapping); + + if (!qdev_realize(DEVICE(clock_mux), NULL, errp)) { + return; + } + } + + /* + * Start clocks after everything is connected + * to propagate the frequencies along the tree. + */ + clock_update_hz(s->msi_rc, MSI_DEFAULT_FRQ); + clock_update_hz(s->sai1_extclk, s->sai1_extclk_frequency); + clock_update_hz(s->sai2_extclk, s->sai2_extclk_frequency); + clock_update(s->gnd, 0); +} + +static Property stm32l4x5_rcc_properties[] = { + DEFINE_PROP_UINT64("hse_frequency", Stm32l4x5RccState, + hse_frequency, HSE_DEFAULT_FRQ), + DEFINE_PROP_UINT64("sai1_extclk_frequency", Stm32l4x5RccState, + sai1_extclk_frequency, 0), + DEFINE_PROP_UINT64("sai2_extclk_frequency", Stm32l4x5RccState, + sai2_extclk_frequency, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void stm32l4x5_rcc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + assert(ARRAY_SIZE(CLOCK_MUX_INIT_INFO) == RCC_NUM_CLOCK_MUX); + + rc->phases.hold = stm32l4x5_rcc_reset_hold; + device_class_set_props(dc, stm32l4x5_rcc_properties); + dc->realize = stm32l4x5_rcc_realize; + dc->vmsd = &vmstate_stm32l4x5_rcc; +} + +static const TypeInfo stm32l4x5_rcc_types[] = { + { + .name = TYPE_STM32L4X5_RCC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Stm32l4x5RccState), + .instance_init = stm32l4x5_rcc_init, + .class_init = stm32l4x5_rcc_class_init, + }, { + .name = TYPE_RCC_CLOCK_MUX, + .parent = TYPE_DEVICE, + .instance_size = sizeof(RccClockMuxState), + .instance_init = clock_mux_init, + .class_init = clock_mux_class_init, + }, { + .name = TYPE_RCC_PLL, + .parent = TYPE_DEVICE, + .instance_size = sizeof(RccPllState), + .instance_init = pll_init, + .class_init = pll_class_init, + } +}; + +DEFINE_TYPES(stm32l4x5_rcc_types) diff --git a/hw/misc/stm32l4x5_syscfg.c b/hw/misc/stm32l4x5_syscfg.c new file mode 100644 index 00000000000..3dafc00b49d --- /dev/null +++ b/hw/misc/stm32l4x5_syscfg.c @@ -0,0 +1,267 @@ +/* + * STM32L4x5 SYSCFG (System Configuration Controller) + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This work is based on the stm32f4xx_syscfg by Alistair Francis. + * Original code is licensed under the MIT License: + * + * Copyright (c) 2014 Alistair Francis + */ + +/* + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/misc/stm32l4x5_syscfg.h" +#include "hw/gpio/stm32l4x5_gpio.h" + +#define SYSCFG_MEMRMP 0x00 +#define SYSCFG_CFGR1 0x04 +#define SYSCFG_EXTICR1 0x08 +#define SYSCFG_EXTICR2 0x0C +#define SYSCFG_EXTICR3 0x10 +#define SYSCFG_EXTICR4 0x14 +#define SYSCFG_SCSR 0x18 +#define SYSCFG_CFGR2 0x1C +#define SYSCFG_SWPR 0x20 +#define SYSCFG_SKR 0x24 +#define SYSCFG_SWPR2 0x28 + +/* 00000000_00000000_00000001_00000111 */ +#define ACTIVABLE_BITS_MEMRP 0x00000107 + +/* 11111100_11111111_00000001_00000000 */ +#define ACTIVABLE_BITS_CFGR1 0xFCFF0100 +/* 00000000_00000000_00000000_00000001 */ +#define FIREWALL_DISABLE_CFGR1 0x00000001 + +/* 00000000_00000000_11111111_11111111 */ +#define ACTIVABLE_BITS_EXTICR 0x0000FFFF + +/* 00000000_00000000_00000000_00000011 */ +/* #define ACTIVABLE_BITS_SCSR 0x00000003 */ + +/* 00000000_00000000_00000000_00001111 */ +#define ECC_LOCK_CFGR2 0x0000000F +/* 00000000_00000000_00000001_00000000 */ +#define SRAM2_PARITY_ERROR_FLAG_CFGR2 0x00000100 + +/* 00000000_00000000_00000000_11111111 */ +#define ACTIVABLE_BITS_SKR 0x000000FF + +#define NUM_LINES_PER_EXTICR_REG 4 + +static void stm32l4x5_syscfg_hold_reset(Object *obj) +{ + Stm32l4x5SyscfgState *s = STM32L4X5_SYSCFG(obj); + + s->memrmp = 0x00000000; + s->cfgr1 = 0x7C000001; + s->exticr[0] = 0x00000000; + s->exticr[1] = 0x00000000; + s->exticr[2] = 0x00000000; + s->exticr[3] = 0x00000000; + s->scsr = 0x00000000; + s->cfgr2 = 0x00000000; + s->swpr = 0x00000000; + s->skr = 0x00000000; + s->swpr2 = 0x00000000; +} + +static void stm32l4x5_syscfg_set_irq(void *opaque, int irq, int level) +{ + Stm32l4x5SyscfgState *s = opaque; + const uint8_t gpio = irq / GPIO_NUM_PINS; + const int line = irq % GPIO_NUM_PINS; + + const int exticr_reg = line / NUM_LINES_PER_EXTICR_REG; + const int startbit = (line % NUM_LINES_PER_EXTICR_REG) * 4; + + g_assert(gpio < NUM_GPIOS); + trace_stm32l4x5_syscfg_set_irq(gpio, line, level); + + if (extract32(s->exticr[exticr_reg], startbit, 4) == gpio) { + trace_stm32l4x5_syscfg_forward_exti(line); + qemu_set_irq(s->gpio_out[line], level); + } +} + +static uint64_t stm32l4x5_syscfg_read(void *opaque, hwaddr addr, + unsigned int size) +{ + Stm32l4x5SyscfgState *s = opaque; + + trace_stm32l4x5_syscfg_read(addr); + + switch (addr) { + case SYSCFG_MEMRMP: + return s->memrmp; + case SYSCFG_CFGR1: + return s->cfgr1; + case SYSCFG_EXTICR1...SYSCFG_EXTICR4: + return s->exticr[(addr - SYSCFG_EXTICR1) / 4]; + case SYSCFG_SCSR: + return s->scsr; + case SYSCFG_CFGR2: + return s->cfgr2; + case SYSCFG_SWPR: + return s->swpr; + case SYSCFG_SKR: + return s->skr; + case SYSCFG_SWPR2: + return s->swpr2; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + return 0; + } +} +static void stm32l4x5_syscfg_write(void *opaque, hwaddr addr, + uint64_t value, unsigned int size) +{ + Stm32l4x5SyscfgState *s = opaque; + + trace_stm32l4x5_syscfg_write(addr, value); + + switch (addr) { + case SYSCFG_MEMRMP: + qemu_log_mask(LOG_UNIMP, + "%s: Changing the memory mapping isn't supported\n", + __func__); + s->memrmp = value & ACTIVABLE_BITS_MEMRP; + return; + case SYSCFG_CFGR1: + qemu_log_mask(LOG_UNIMP, + "%s: Functions in CFGRx aren't supported\n", + __func__); + /* bit 0 (firewall dis.) is cleared by software, set only by reset. */ + s->cfgr1 = (s->cfgr1 & value & FIREWALL_DISABLE_CFGR1) | + (value & ACTIVABLE_BITS_CFGR1); + return; + case SYSCFG_EXTICR1...SYSCFG_EXTICR4: + s->exticr[(addr - SYSCFG_EXTICR1) / 4] = + (value & ACTIVABLE_BITS_EXTICR); + return; + case SYSCFG_SCSR: + qemu_log_mask(LOG_UNIMP, + "%s: Erasing SRAM2 isn't supported\n", + __func__); + /* + * only non reserved bits are : + * bit 0 (write-protected by a passkey), bit 1 (meant to be read) + * so it serves no purpose yet to add : + * s->scsr = value & 0x3; + */ + return; + case SYSCFG_CFGR2: + qemu_log_mask(LOG_UNIMP, + "%s: Functions in CFGRx aren't supported\n", + __func__); + /* bit 8 (SRAM2 PEF) is cleared by software by writing a '1'.*/ + /* bits[3:0] (ECC Lock) are set by software, cleared only by reset.*/ + s->cfgr2 = (s->cfgr2 | (value & ECC_LOCK_CFGR2)) & + ~(value & SRAM2_PARITY_ERROR_FLAG_CFGR2); + return; + case SYSCFG_SWPR: + qemu_log_mask(LOG_UNIMP, + "%s: Write protecting SRAM2 isn't supported\n", + __func__); + /* These bits are set by software and cleared only by reset.*/ + s->swpr |= value; + return; + case SYSCFG_SKR: + qemu_log_mask(LOG_UNIMP, + "%s: Erasing SRAM2 isn't supported\n", + __func__); + s->skr = value & ACTIVABLE_BITS_SKR; + return; + case SYSCFG_SWPR2: + qemu_log_mask(LOG_UNIMP, + "%s: Write protecting SRAM2 isn't supported\n", + __func__); + /* These bits are set by software and cleared only by reset.*/ + s->swpr2 |= value; + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + } +} + +static const MemoryRegionOps stm32l4x5_syscfg_ops = { + .read = stm32l4x5_syscfg_read, + .write = stm32l4x5_syscfg_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .impl.unaligned = false, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static void stm32l4x5_syscfg_init(Object *obj) +{ + Stm32l4x5SyscfgState *s = STM32L4X5_SYSCFG(obj); + + memory_region_init_io(&s->mmio, obj, &stm32l4x5_syscfg_ops, s, + TYPE_STM32L4X5_SYSCFG, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + qdev_init_gpio_in(DEVICE(obj), stm32l4x5_syscfg_set_irq, + GPIO_NUM_PINS * NUM_GPIOS); + qdev_init_gpio_out(DEVICE(obj), s->gpio_out, GPIO_NUM_PINS); +} + +static const VMStateDescription vmstate_stm32l4x5_syscfg = { + .name = TYPE_STM32L4X5_SYSCFG, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(memrmp, Stm32l4x5SyscfgState), + VMSTATE_UINT32(cfgr1, Stm32l4x5SyscfgState), + VMSTATE_UINT32_ARRAY(exticr, Stm32l4x5SyscfgState, + SYSCFG_NUM_EXTICR), + VMSTATE_UINT32(scsr, Stm32l4x5SyscfgState), + VMSTATE_UINT32(cfgr2, Stm32l4x5SyscfgState), + VMSTATE_UINT32(swpr, Stm32l4x5SyscfgState), + VMSTATE_UINT32(skr, Stm32l4x5SyscfgState), + VMSTATE_UINT32(swpr2, Stm32l4x5SyscfgState), + VMSTATE_END_OF_LIST() + } +}; + +static void stm32l4x5_syscfg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->vmsd = &vmstate_stm32l4x5_syscfg; + rc->phases.hold = stm32l4x5_syscfg_hold_reset; +} + +static const TypeInfo stm32l4x5_syscfg_info[] = { + { + .name = TYPE_STM32L4X5_SYSCFG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Stm32l4x5SyscfgState), + .instance_init = stm32l4x5_syscfg_init, + .class_init = stm32l4x5_syscfg_class_init, + } +}; + +DEFINE_TYPES(stm32l4x5_syscfg_info) diff --git a/hw/misc/trace-events b/hw/misc/trace-events index 05ff692441b..5d241cb40aa 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -116,8 +116,8 @@ imx7_gpr_read(uint64_t offset) "addr 0x%08" PRIx64 imx7_gpr_write(uint64_t offset, uint64_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx64 # imx7_snvs.c -imx7_snvs_read(uint64_t offset, uint32_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx32 -imx7_snvs_write(uint64_t offset, uint32_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx32 +imx7_snvs_read(uint64_t offset, uint64_t value, unsigned size) "i.MX SNVS read: offset 0x%08" PRIx64 " value 0x%08" PRIx64 " size %u" +imx7_snvs_write(uint64_t offset, uint64_t value, unsigned size) "i.MX SNVS write: offset 0x%08" PRIx64 " value 0x%08" PRIx64 " size %u" # mos6522.c mos6522_set_counter(int index, unsigned int val) "T%d.counter=%d" @@ -163,6 +163,31 @@ stm32f4xx_exti_set_irq(int irq, int level) "Set EXTI: %d to %d" stm32f4xx_exti_read(uint64_t addr) "reg read: addr: 0x%" PRIx64 " " stm32f4xx_exti_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" +# stm32l4x5_syscfg.c +stm32l4x5_syscfg_set_irq(int gpio, int line, int level) "irq from GPIO: %d, line: %d, level: %d" +stm32l4x5_syscfg_forward_exti(int irq) "irq %d forwarded to EXTI" +stm32l4x5_syscfg_read(uint64_t addr) "reg read: addr: 0x%" PRIx64 " " +stm32l4x5_syscfg_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" + +# stm32l4x5_exti.c +stm32l4x5_exti_set_irq(int irq, int level) "Set EXTI: %d to %d" +stm32l4x5_exti_read(uint64_t addr, uint64_t data) "reg read: addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" +stm32l4x5_exti_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" + +# stm32l4x5_rcc.c +stm32l4x5_rcc_read(uint64_t addr, uint32_t data) "RCC: Read <0x%" PRIx64 "> -> 0x%" PRIx32 +stm32l4x5_rcc_write(uint64_t addr, uint32_t data) "RCC: Write <0x%" PRIx64 "> <- 0x%" PRIx32 +stm32l4x5_rcc_mux_enable(uint32_t mux_id) "RCC: Mux %d enabled" +stm32l4x5_rcc_mux_disable(uint32_t mux_id) "RCC: Mux %d disabled" +stm32l4x5_rcc_mux_set_factor(uint32_t mux_id, uint32_t old_multiplier, uint32_t new_multiplier, uint32_t old_divider, uint32_t new_divider) "RCC: Mux %d factor changed: multiplier (%u -> %u), divider (%u -> %u)" +stm32l4x5_rcc_mux_set_src(uint32_t mux_id, uint32_t old_src, uint32_t new_src) "RCC: Mux %d source changed: from %u to %u" +stm32l4x5_rcc_mux_update(uint32_t mux_id, uint32_t src, uint64_t src_freq, uint32_t multiplier, uint32_t divider) "RCC: Mux %d src %d update: src_freq %" PRIu64 " multiplier %" PRIu32 " divider %" PRIu32 +stm32l4x5_rcc_pll_set_vco_multiplier(uint32_t pll_id, uint32_t old_multiplier, uint32_t new_multiplier) "RCC: PLL %u: vco_multiplier changed (%u -> %u)" +stm32l4x5_rcc_pll_channel_enable(uint32_t pll_id, uint32_t channel_id) "RCC: PLL %u, channel %u enabled" +stm32l4x5_rcc_pll_channel_disable(uint32_t pll_id, uint32_t channel_id) "RCC: PLL %u, channel %u disabled" +stm32l4x5_rcc_pll_set_channel_divider(uint32_t pll_id, uint32_t channel_id, uint32_t old_divider, uint32_t new_divider) "RCC: PLL %u, channel %u: divider changed (%u -> %u)" +stm32l4x5_rcc_pll_update(uint32_t pll_id, uint32_t channel_id, uint64_t vco_freq, uint64_t old_freq, uint64_t new_freq) "RCC: PLL %d channel %d update: vco_freq %" PRIu64 " old_freq %" PRIu64 " new_freq %" PRIu64 + # tz-mpc.c tz_mpc_reg_read(uint32_t offset, uint64_t data, unsigned size) "TZ MPC regs read: offset 0x%x data 0x%" PRIx64 " size %u" tz_mpc_reg_write(uint32_t offset, uint64_t data, unsigned size) "TZ MPC regs write: offset 0x%x data 0x%" PRIx64 " size %u" @@ -302,10 +327,6 @@ grlib_apb_pnp_read(uint64_t addr, unsigned size, uint32_t value) "APB PnP read a led_set_intensity(const char *color, const char *desc, uint8_t intensity_percent) "LED desc:'%s' color:%s intensity: %u%%" led_change_intensity(const char *color, const char *desc, uint8_t old_intensity_percent, uint8_t new_intensity_percent) "LED desc:'%s' color:%s intensity %u%% -> %u%%" -# pca9552.c -pca955x_gpio_status(const char *description, const char *buf) "%s GPIOs 0-15 [%s]" -pca955x_gpio_change(const char *description, unsigned id, unsigned prev_state, unsigned current_state) "%s GPIO id:%u status: %u -> %u" - # bcm2835_cprman.c bcm2835_cprman_read(uint64_t offset, uint64_t value) "offset:0x%" PRIx64 " value:0x%" PRIx64 bcm2835_cprman_write(uint64_t offset, uint64_t value) "offset:0x%" PRIx64 " value:0x%" PRIx64 diff --git a/hw/misc/tz-mpc.c b/hw/misc/tz-mpc.c index 30481e1c909..92b994919be 100644 --- a/hw/misc/tz-mpc.c +++ b/hw/misc/tz-mpc.c @@ -574,7 +574,7 @@ static const VMStateDescription tz_mpc_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = tz_mpc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctrl, TZMPC), VMSTATE_UINT32(blk_idx, TZMPC), VMSTATE_UINT32(int_stat, TZMPC), diff --git a/hw/misc/tz-msc.c b/hw/misc/tz-msc.c index acbe94400ba..de5a3126cca 100644 --- a/hw/misc/tz-msc.c +++ b/hw/misc/tz-msc.c @@ -269,7 +269,7 @@ static const VMStateDescription tz_msc_vmstate = { .name = "tz-msc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(cfg_nonsec, TZMSC), VMSTATE_BOOL(cfg_sec_resp, TZMSC), VMSTATE_BOOL(irq_clear, TZMSC), diff --git a/hw/misc/tz-ppc.c b/hw/misc/tz-ppc.c index 36495c68e76..64507787209 100644 --- a/hw/misc/tz-ppc.c +++ b/hw/misc/tz-ppc.c @@ -290,7 +290,7 @@ static const VMStateDescription tz_ppc_vmstate = { .name = "tz-ppc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL_ARRAY(cfg_nonsec, TZPPC, 16), VMSTATE_BOOL_ARRAY(cfg_ap, TZPPC, 16), VMSTATE_BOOL(cfg_sec_resp, TZPPC), diff --git a/hw/misc/virt_ctrl.c b/hw/misc/virt_ctrl.c index e75d1e7e17b..1a6c744bac2 100644 --- a/hw/misc/virt_ctrl.c +++ b/hw/misc/virt_ctrl.c @@ -108,7 +108,7 @@ static const VMStateDescription vmstate_virt_ctrl = { .name = "virt-ctrl", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(irq_enabled, VirtCtrlState), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/vmcoreinfo.c b/hw/misc/vmcoreinfo.c index a9d718fc236..833773ade52 100644 --- a/hw/misc/vmcoreinfo.c +++ b/hw/misc/vmcoreinfo.c @@ -73,7 +73,7 @@ static const VMStateDescription vmstate_vmcoreinfo = { .name = "vmcoreinfo", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(has_vmcoreinfo, VMCoreInfoState), VMSTATE_UINT16(vmcoreinfo.host_format, VMCoreInfoState), VMSTATE_UINT16(vmcoreinfo.guest_format, VMCoreInfoState), diff --git a/hw/misc/xlnx-versal-cframe-reg.c b/hw/misc/xlnx-versal-cframe-reg.c index 8e8ec0715ab..a6ab287b019 100644 --- a/hw/misc/xlnx-versal-cframe-reg.c +++ b/hw/misc/xlnx-versal-cframe-reg.c @@ -697,7 +697,7 @@ static const VMStateDescription vmstate_cframe = { .name = "cframe", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(data, XlnxCFrame, FRAME_NUM_WORDS), VMSTATE_END_OF_LIST() } @@ -707,7 +707,7 @@ static const VMStateDescription vmstate_cframe_reg = { .name = TYPE_XLNX_VERSAL_CFRAME_REG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFrameReg, 4), VMSTATE_UINT32_ARRAY(regs, XlnxVersalCFrameReg, CFRAME_REG_R_MAX), VMSTATE_BOOL(rowon, XlnxVersalCFrameReg), @@ -765,7 +765,7 @@ static const VMStateDescription vmstate_cframe_bcast_reg = { .name = TYPE_XLNX_VERSAL_CFRAME_BCAST_REG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFrameBcastReg, 4), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/xlnx-versal-cfu.c b/hw/misc/xlnx-versal-cfu.c index 8e588ac1d83..6bb82e51c15 100644 --- a/hw/misc/xlnx-versal-cfu.c +++ b/hw/misc/xlnx-versal-cfu.c @@ -463,7 +463,7 @@ static const VMStateDescription vmstate_cfu_apb = { .name = TYPE_XLNX_VERSAL_CFU_APB, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFUAPB, 4), VMSTATE_UINT32_ARRAY(regs, XlnxVersalCFUAPB, R_MAX), VMSTATE_UINT8(fdri_row_addr, XlnxVersalCFUAPB), @@ -475,7 +475,7 @@ static const VMStateDescription vmstate_cfu_fdro = { .name = TYPE_XLNX_VERSAL_CFU_FDRO, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO32(fdro_data, XlnxVersalCFUFDRO), VMSTATE_END_OF_LIST(), } @@ -485,7 +485,7 @@ static const VMStateDescription vmstate_cfu_sfr = { .name = TYPE_XLNX_VERSAL_CFU_SFR, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFUSFR, 4), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/xlnx-versal-crl.c b/hw/misc/xlnx-versal-crl.c index 767106b7a30..1f1762ef163 100644 --- a/hw/misc/xlnx-versal-crl.c +++ b/hw/misc/xlnx-versal-crl.c @@ -19,6 +19,7 @@ #include "hw/resettable.h" #include "target/arm/arm-powerctl.h" +#include "target/arm/multiprocessing.h" #include "hw/misc/xlnx-versal-crl.h" #ifndef XLNX_VERSAL_CRL_ERR_DEBUG @@ -67,9 +68,9 @@ static void crl_reset_cpu(XlnxVersalCRL *s, ARMCPU *armcpu, bool rst_old, bool rst_new) { if (rst_new) { - arm_set_cpu_off(armcpu->mp_affinity); + arm_set_cpu_off(arm_cpu_mp_affinity(armcpu)); } else { - arm_set_cpu_on_and_reset(armcpu->mp_affinity); + arm_set_cpu_on_and_reset(arm_cpu_mp_affinity(armcpu)); } } @@ -387,7 +388,7 @@ static const VMStateDescription vmstate_crl = { .name = TYPE_XLNX_VERSAL_CRL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxVersalCRL, CRL_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/xlnx-versal-pmc-iou-slcr.c b/hw/misc/xlnx-versal-pmc-iou-slcr.c index 07b7ebc2173..60e13a78ab8 100644 --- a/hw/misc/xlnx-versal-pmc-iou-slcr.c +++ b/hw/misc/xlnx-versal-pmc-iou-slcr.c @@ -1412,7 +1412,7 @@ static const VMStateDescription vmstate_pmc_iou_slcr = { .name = TYPE_XILINX_VERSAL_PMC_IOU_SLCR, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxVersalPmcIouSlcr, XILINX_VERSAL_PMC_IOU_SLCR_R_MAX), VMSTATE_END_OF_LIST(), diff --git a/hw/misc/xlnx-versal-trng.c b/hw/misc/xlnx-versal-trng.c index 4d41c262c48..6495188dc74 100644 --- a/hw/misc/xlnx-versal-trng.c +++ b/hw/misc/xlnx-versal-trng.c @@ -644,8 +644,7 @@ static void trng_prop_fault_event_set(Object *obj, Visitor *v, Property *prop = opaque; uint32_t *events = object_field_prop_ptr(obj, prop); - visit_type_uint32(v, name, events, errp); - if (*errp) { + if (!visit_type_uint32(v, name, events, errp)) { return; } @@ -674,7 +673,7 @@ static const VMStateDescription vmstate_trng = { .name = TYPE_XLNX_VERSAL_TRNG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(rand_count, XlnxVersalTRng), VMSTATE_UINT64(rand_reseed, XlnxVersalTRng), VMSTATE_UINT64(forced_prng_count, XlnxVersalTRng), diff --git a/hw/misc/xlnx-versal-xramc.c b/hw/misc/xlnx-versal-xramc.c index e5b719a0ed4..a5f78c190eb 100644 --- a/hw/misc/xlnx-versal-xramc.c +++ b/hw/misc/xlnx-versal-xramc.c @@ -212,7 +212,7 @@ static const VMStateDescription vmstate_xram_ctrl = { .name = TYPE_XLNX_XRAM_CTRL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxXramCtrl, XRAM_CTRL_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/xlnx-zynqmp-apu-ctrl.c b/hw/misc/xlnx-zynqmp-apu-ctrl.c index 3d2be95e6db..1d441b41dfe 100644 --- a/hw/misc/xlnx-zynqmp-apu-ctrl.c +++ b/hw/misc/xlnx-zynqmp-apu-ctrl.c @@ -218,7 +218,7 @@ static const VMStateDescription vmstate_zynqmp_apu = { .name = TYPE_XLNX_ZYNQMP_APU_CTRL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPAPUCtrl, APU_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/xlnx-zynqmp-crf.c b/hw/misc/xlnx-zynqmp-crf.c index 57bc8cf49ae..a83efb44e31 100644 --- a/hw/misc/xlnx-zynqmp-crf.c +++ b/hw/misc/xlnx-zynqmp-crf.c @@ -233,7 +233,7 @@ static const VMStateDescription vmstate_crf = { .name = TYPE_XLNX_ZYNQMP_CRF, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPCRF, CRF_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c index 41f38a98e98..d2ac2e77f26 100644 --- a/hw/misc/zynq_slcr.c +++ b/hw/misc/zynq_slcr.c @@ -603,7 +603,7 @@ static const VMStateDescription vmstate_zynq_slcr = { .name = "zynq_slcr", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, ZynqSLCRState, ZYNQ_SLCR_NUM_REGS), VMSTATE_CLOCK_V(ps_clk, ZynqSLCRState, 3), VMSTATE_END_OF_LIST() diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c index cc350d40e59..108ae9c8535 100644 --- a/hw/net/allwinner-sun8i-emac.c +++ b/hw/net/allwinner-sun8i-emac.c @@ -851,7 +851,7 @@ static const VMStateDescription vmstate_aw_emac = { .version_id = 1, .minimum_version_id = 1, .post_load = allwinner_sun8i_emac_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(mii_phy_addr, AwSun8iEmacState), VMSTATE_UINT32(mii_cmd, AwSun8iEmacState), VMSTATE_UINT32(mii_data, AwSun8iEmacState), diff --git a/hw/net/allwinner_emac.c b/hw/net/allwinner_emac.c index e10965de140..989839784a9 100644 --- a/hw/net/allwinner_emac.c +++ b/hw/net/allwinner_emac.c @@ -472,7 +472,7 @@ static const VMStateDescription vmstate_mii = { .name = "rtl8201cp", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(bmcr, RTL8201CPState), VMSTATE_UINT16(bmsr, RTL8201CPState), VMSTATE_UINT16(anar, RTL8201CPState), @@ -495,7 +495,7 @@ static const VMStateDescription vmstate_aw_emac = { .version_id = 1, .minimum_version_id = 1, .post_load = aw_emac_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(mii, AwEmacState, 1, vmstate_mii, RTL8201CPState), VMSTATE_UINT32(ctl, AwEmacState), VMSTATE_UINT32(tx_mode, AwEmacState), diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index 472ce9c8cfd..ec7bf562e57 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -1771,7 +1771,7 @@ static const VMStateDescription vmstate_cadence_gem = { .name = "cadence_gem", .version_id = 4, .minimum_version_id = 4, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG), VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32), VMSTATE_UINT8(phy_loop, CadenceGEMState), diff --git a/hw/net/can/can_kvaser_pci.c b/hw/net/can/can_kvaser_pci.c index 2cd90cef1e1..bf41e6b2612 100644 --- a/hw/net/can/can_kvaser_pci.c +++ b/hw/net/can/can_kvaser_pci.c @@ -266,7 +266,7 @@ static const VMStateDescription vmstate_kvaser_pci = { .name = "kvaser_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, KvaserPCIState), /* Load this before sja_state. */ VMSTATE_UINT32(s5920_intcsr, KvaserPCIState), diff --git a/hw/net/can/can_mioe3680_pci.c b/hw/net/can/can_mioe3680_pci.c index b9918773b3f..308b17e0c00 100644 --- a/hw/net/can/can_mioe3680_pci.c +++ b/hw/net/can/can_mioe3680_pci.c @@ -203,7 +203,7 @@ static const VMStateDescription vmstate_mioe3680_pci = { .name = "mioe3680_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, Mioe3680PCIState), VMSTATE_STRUCT(sja_state[0], Mioe3680PCIState, 0, vmstate_can_sja, CanSJA1000State), diff --git a/hw/net/can/can_pcm3680_pci.c b/hw/net/can/can_pcm3680_pci.c index 8ef3e4659cc..e4c8d93b984 100644 --- a/hw/net/can/can_pcm3680_pci.c +++ b/hw/net/can/can_pcm3680_pci.c @@ -204,7 +204,7 @@ static const VMStateDescription vmstate_pcm3680i_pci = { .name = "pcm3680i_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, Pcm3680iPCIState), VMSTATE_STRUCT(sja_state[0], Pcm3680iPCIState, 0, vmstate_can_sja, CanSJA1000State), diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c index 575df7d2f8d..6694d7bfd84 100644 --- a/hw/net/can/can_sja1000.c +++ b/hw/net/can/can_sja1000.c @@ -929,7 +929,7 @@ const VMStateDescription vmstate_qemu_can_filter = { .name = "qemu_can_filter", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(can_id, qemu_can_filter), VMSTATE_UINT32(can_mask, qemu_can_filter), VMSTATE_END_OF_LIST() @@ -953,7 +953,7 @@ const VMStateDescription vmstate_can_sja = { .version_id = 1, .minimum_version_id = 1, .post_load = can_sja_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(mode, CanSJA1000State), VMSTATE_UINT8(status_pel, CanSJA1000State), diff --git a/hw/net/can/ctucan_core.c b/hw/net/can/ctucan_core.c index f2c3b6a7061..812b83e93e1 100644 --- a/hw/net/can/ctucan_core.c +++ b/hw/net/can/ctucan_core.c @@ -617,7 +617,7 @@ const VMStateDescription vmstate_qemu_ctucan_tx_buffer = { .name = "qemu_ctucan_tx_buffer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(data, CtuCanCoreMsgBuffer, CTUCAN_CORE_MSG_MAX_LEN), VMSTATE_END_OF_LIST() } @@ -636,7 +636,7 @@ const VMStateDescription vmstate_ctucan = { .version_id = 1, .minimum_version_id = 1, .post_load = ctucan_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(mode_settings.u32, CtuCanCoreState), VMSTATE_UINT32(status.u32, CtuCanCoreState), VMSTATE_UINT32(int_stat.u32, CtuCanCoreState), diff --git a/hw/net/can/ctucan_pci.c b/hw/net/can/ctucan_pci.c index ea079e2af56..d8f7344ddc6 100644 --- a/hw/net/can/ctucan_pci.c +++ b/hw/net/can/ctucan_pci.c @@ -215,7 +215,7 @@ static const VMStateDescription vmstate_ctucan_pci = { .name = "ctucan_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, CtuCanPCIState), VMSTATE_STRUCT(ctucan_state[0], CtuCanPCIState, 0, vmstate_ctucan, CtuCanCoreState), diff --git a/hw/net/can/xlnx-versal-canfd.c b/hw/net/can/xlnx-versal-canfd.c index 5b8ce0a285e..47a14cfe633 100644 --- a/hw/net/can/xlnx-versal-canfd.c +++ b/hw/net/can/xlnx-versal-canfd.c @@ -2060,7 +2060,7 @@ static const VMStateDescription vmstate_canfd = { .name = TYPE_XILINX_CANFD, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxVersalCANFDState, XLNX_VERSAL_CANFD_R_MAX), VMSTATE_PTIMER(canfd_timer, XlnxVersalCANFDState), diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c index f60e480c3ab..ca0ce4e8bbf 100644 --- a/hw/net/can/xlnx-zynqmp-can.c +++ b/hw/net/can/xlnx-zynqmp-can.c @@ -1159,7 +1159,7 @@ static const VMStateDescription vmstate_can = { .name = TYPE_XLNX_ZYNQMP_CAN, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO32(rx_fifo, XlnxZynqMPCANState), VMSTATE_FIFO32(tx_fifo, XlnxZynqMPCANState), VMSTATE_FIFO32(txhpb_fifo, XlnxZynqMPCANState), diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c index b16b18b3c3c..bf0652da1b4 100644 --- a/hw/net/dp8393x.c +++ b/hw/net/dp8393x.c @@ -924,7 +924,7 @@ static const VMStateDescription vmstate_dp8393x = { .name = "dp8393x", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT16_2DARRAY(cam, dp8393xState, 16, 3), VMSTATE_UINT16_ARRAY(regs, dp8393xState, SONIC_REG_COUNT), VMSTATE_END_OF_LIST() diff --git a/hw/net/e1000.c b/hw/net/e1000.c index 8ffe1077f19..43f3a4a7011 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -1437,7 +1437,7 @@ static const VMStateDescription vmstate_e1000_mit_state = { .name = "e1000/mit_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(mac_reg[RDTR], E1000State), VMSTATE_UINT32(mac_reg[RADV], E1000State), VMSTATE_UINT32(mac_reg[TADV], E1000State), @@ -1452,7 +1452,7 @@ static const VMStateDescription vmstate_e1000_full_mac_state = { .version_id = 1, .minimum_version_id = 1, .needed = e1000_full_mac_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000), VMSTATE_END_OF_LIST() } @@ -1464,7 +1464,7 @@ static const VMStateDescription vmstate_e1000_tx_tso_state = { .minimum_version_id = 1, .needed = e1000_tso_state_needed, .post_load = e1000_tx_tso_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(tx.tso_props.ipcss, E1000State), VMSTATE_UINT8(tx.tso_props.ipcso, E1000State), VMSTATE_UINT16(tx.tso_props.ipcse, E1000State), @@ -1486,7 +1486,7 @@ static const VMStateDescription vmstate_e1000 = { .minimum_version_id = 1, .pre_save = e1000_pre_save, .post_load = e1000_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, E1000State), VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */ VMSTATE_UNUSED(4), /* Was mmio_base. */ @@ -1558,7 +1558,7 @@ static const VMStateDescription vmstate_e1000 = { E1000_VLAN_FILTER_TBL_SIZE), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_e1000_mit_state, &vmstate_e1000_full_mac_state, &vmstate_e1000_tx_tso_state, diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c index e41a6c10385..7c6f6029518 100644 --- a/hw/net/e1000e.c +++ b/hw/net/e1000e.c @@ -564,7 +564,7 @@ static const VMStateDescription e1000e_vmstate_tx = { .name = "e1000e-tx", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(sum_needed, struct e1000e_tx), VMSTATE_UINT8(props.ipcss, struct e1000e_tx), VMSTATE_UINT8(props.ipcso, struct e1000e_tx), @@ -588,7 +588,7 @@ static const VMStateDescription e1000e_vmstate_intr_timer = { .name = "e1000e-intr-timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, E1000IntrDelayTimer), VMSTATE_BOOL(running, E1000IntrDelayTimer), VMSTATE_END_OF_LIST() @@ -609,7 +609,7 @@ static const VMStateDescription e1000e_vmstate = { .minimum_version_id = 1, .pre_save = e1000e_pre_save, .post_load = e1000e_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, E1000EState), VMSTATE_MSIX(parent_obj, E1000EState), diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index e324c02dd58..3ae2a184d5d 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -123,14 +123,6 @@ e1000e_intmgr_timer_resume(E1000IntrDelayTimer *timer) } } -static void -e1000e_intmgr_timer_pause(E1000IntrDelayTimer *timer) -{ - if (timer->running) { - timer_del(timer->timer); - } -} - static inline void e1000e_intrmgr_stop_timer(E1000IntrDelayTimer *timer) { @@ -398,24 +390,6 @@ e1000e_intrmgr_resume(E1000ECore *core) } } -static void -e1000e_intrmgr_pause(E1000ECore *core) -{ - int i; - - e1000e_intmgr_timer_pause(&core->radv); - e1000e_intmgr_timer_pause(&core->rdtr); - e1000e_intmgr_timer_pause(&core->raid); - e1000e_intmgr_timer_pause(&core->tidv); - e1000e_intmgr_timer_pause(&core->tadv); - - e1000e_intmgr_timer_pause(&core->itr); - - for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { - e1000e_intmgr_timer_pause(&core->eitr[i]); - } -} - static void e1000e_intrmgr_reset(E1000ECore *core) { @@ -3334,12 +3308,6 @@ e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size) return 0; } -static inline void -e1000e_autoneg_pause(E1000ECore *core) -{ - timer_del(core->autoneg_timer); -} - static void e1000e_autoneg_resume(E1000ECore *core) { @@ -3351,22 +3319,6 @@ e1000e_autoneg_resume(E1000ECore *core) } } -static void -e1000e_vm_state_change(void *opaque, bool running, RunState state) -{ - E1000ECore *core = opaque; - - if (running) { - trace_e1000e_vm_state_running(); - e1000e_intrmgr_resume(core); - e1000e_autoneg_resume(core); - } else { - trace_e1000e_vm_state_stopped(); - e1000e_autoneg_pause(core); - e1000e_intrmgr_pause(core); - } -} - void e1000e_core_pci_realize(E1000ECore *core, const uint16_t *eeprom_templ, @@ -3379,9 +3331,6 @@ e1000e_core_pci_realize(E1000ECore *core, e1000e_autoneg_timer, core); e1000e_intrmgr_pci_realize(core); - core->vmstate = - qemu_add_vm_change_state_handler(e1000e_vm_state_change, core); - for (i = 0; i < E1000E_NUM_QUEUES; i++) { net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS); } @@ -3405,8 +3354,6 @@ e1000e_core_pci_uninit(E1000ECore *core) e1000e_intrmgr_pci_unint(core); - qemu_del_vm_change_state_handler(core->vmstate); - for (i = 0; i < E1000E_NUM_QUEUES; i++) { net_tx_pkt_uninit(core->tx[i].tx_pkt); } @@ -3576,5 +3523,12 @@ e1000e_core_post_load(E1000ECore *core) */ nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0; + /* + * we need to restart intrmgr timers, as an older version of + * QEMU can have stopped them before migration + */ + e1000e_intrmgr_resume(core); + e1000e_autoneg_resume(core); + return 0; } diff --git a/hw/net/e1000e_core.h b/hw/net/e1000e_core.h index 66b025cc43f..01510ca78b4 100644 --- a/hw/net/e1000e_core.h +++ b/hw/net/e1000e_core.h @@ -98,8 +98,6 @@ struct E1000Core { E1000IntrDelayTimer eitr[E1000E_MSIX_VEC_NUM]; - VMChangeStateEntry *vmstate; - uint32_t itr_guest_value; uint32_t eitr_guest_value[E1000E_MSIX_VEC_NUM]; diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c index 69e1c4bb891..d9a70c4544d 100644 --- a/hw/net/eepro100.c +++ b/hw/net/eepro100.c @@ -1772,7 +1772,7 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size) static const VMStateDescription vmstate_eepro100 = { .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, EEPRO100State), VMSTATE_UNUSED(32), VMSTATE_BUFFER(mult, EEPRO100State), diff --git a/hw/net/etraxfs_eth.c b/hw/net/etraxfs_eth.c index ba57a978d15..5faf20c782c 100644 --- a/hw/net/etraxfs_eth.c +++ b/hw/net/etraxfs_eth.c @@ -647,15 +647,14 @@ static void etraxfs_eth_class_init(ObjectClass *klass, void *data) /* Instantiate an ETRAXFS Ethernet MAC. */ DeviceState * -etraxfs_eth_init(NICInfo *nd, hwaddr base, int phyaddr, +etraxfs_eth_init(hwaddr base, int phyaddr, struct etraxfs_dma_client *dma_out, struct etraxfs_dma_client *dma_in) { DeviceState *dev; - qemu_check_nic_model(nd, "fseth"); dev = qdev_new("etraxfs-eth"); - qdev_set_nic_properties(dev, nd); + qemu_configure_nic_device(dev, true, "fseth"); qdev_prop_set_uint32(dev, "phyaddr", phyaddr); /* diff --git a/hw/net/ftgmac100.c b/hw/net/ftgmac100.c index 78e594afa4e..74b6c3d9a75 100644 --- a/hw/net/ftgmac100.c +++ b/hw/net/ftgmac100.c @@ -1119,7 +1119,7 @@ static const VMStateDescription vmstate_ftgmac100 = { .name = TYPE_FTGMAC100, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(irq_state, FTGMAC100State), VMSTATE_UINT32(isr, FTGMAC100State), VMSTATE_UINT32(ier, FTGMAC100State), @@ -1304,7 +1304,7 @@ static const VMStateDescription vmstate_aspeed_mii = { .name = TYPE_ASPEED_MII, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(phycr, FTGMAC100State), VMSTATE_UINT32(phydata, FTGMAC100State), VMSTATE_END_OF_LIST() diff --git a/hw/net/i82596.c b/hw/net/i82596.c index a907f0df8c4..6cc8292a65a 100644 --- a/hw/net/i82596.c +++ b/hw/net/i82596.c @@ -713,7 +713,7 @@ const VMStateDescription vmstate_i82596 = { .name = "i82596", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(lnkst, I82596State), VMSTATE_TIMER_PTR(flush_queue_timer, I82596State), VMSTATE_END_OF_LIST() diff --git a/hw/net/igb.c b/hw/net/igb.c index 8089acfea41..9b37523d6df 100644 --- a/hw/net/igb.c +++ b/hw/net/igb.c @@ -488,12 +488,10 @@ static void igb_pci_uninit(PCIDevice *pci_dev) static void igb_qdev_reset_hold(Object *obj) { - PCIDevice *d = PCI_DEVICE(obj); IGBState *s = IGB(obj); trace_e1000e_cb_qdev_reset_hold(); - pcie_sriov_pf_disable_vfs(d); igb_core_reset(&s->core); } @@ -520,7 +518,7 @@ static const VMStateDescription igb_vmstate_tx_ctx = { .name = "igb-tx-ctx", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vlan_macip_lens, struct e1000_adv_tx_context_desc), VMSTATE_UINT32(seqnum_seed, struct e1000_adv_tx_context_desc), VMSTATE_UINT32(type_tucmd_mlhl, struct e1000_adv_tx_context_desc), @@ -533,7 +531,7 @@ static const VMStateDescription igb_vmstate_tx = { .name = "igb-tx", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(ctx, struct igb_tx, 2, 0, igb_vmstate_tx_ctx, struct e1000_adv_tx_context_desc), VMSTATE_UINT32(first_cmd_type_len, struct igb_tx), @@ -548,7 +546,7 @@ static const VMStateDescription igb_vmstate_intr_timer = { .name = "igb-intr-timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, IGBIntrDelayTimer), VMSTATE_BOOL(running, IGBIntrDelayTimer), VMSTATE_END_OF_LIST() @@ -569,7 +567,7 @@ static const VMStateDescription igb_vmstate = { .minimum_version_id = 1, .pre_save = igb_pre_save, .post_load = igb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, IGBState), VMSTATE_MSIX(parent_obj, IGBState), diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c index 2a7a11aa9ed..bcd5f6cd9cd 100644 --- a/hw/net/igb_core.c +++ b/hw/net/igb_core.c @@ -160,14 +160,6 @@ igb_intmgr_timer_resume(IGBIntrDelayTimer *timer) } } -static void -igb_intmgr_timer_pause(IGBIntrDelayTimer *timer) -{ - if (timer->running) { - timer_del(timer->timer); - } -} - static void igb_intrmgr_on_msix_throttling_timer(void *opaque) { @@ -212,16 +204,6 @@ igb_intrmgr_resume(IGBCore *core) } } -static void -igb_intrmgr_pause(IGBCore *core) -{ - int i; - - for (i = 0; i < IGB_INTR_NUM; i++) { - igb_intmgr_timer_pause(&core->eitr[i]); - } -} - static void igb_intrmgr_reset(IGBCore *core) { @@ -4290,12 +4272,6 @@ igb_core_read(IGBCore *core, hwaddr addr, unsigned size) return 0; } -static inline void -igb_autoneg_pause(IGBCore *core) -{ - timer_del(core->autoneg_timer); -} - static void igb_autoneg_resume(IGBCore *core) { @@ -4307,22 +4283,6 @@ igb_autoneg_resume(IGBCore *core) } } -static void -igb_vm_state_change(void *opaque, bool running, RunState state) -{ - IGBCore *core = opaque; - - if (running) { - trace_e1000e_vm_state_running(); - igb_intrmgr_resume(core); - igb_autoneg_resume(core); - } else { - trace_e1000e_vm_state_stopped(); - igb_autoneg_pause(core); - igb_intrmgr_pause(core); - } -} - void igb_core_pci_realize(IGBCore *core, const uint16_t *eeprom_templ, @@ -4335,8 +4295,6 @@ igb_core_pci_realize(IGBCore *core, igb_autoneg_timer, core); igb_intrmgr_pci_realize(core); - core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core); - for (i = 0; i < IGB_NUM_QUEUES; i++) { net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS); } @@ -4360,8 +4318,6 @@ igb_core_pci_uninit(IGBCore *core) igb_intrmgr_pci_unint(core); - qemu_del_vm_change_state_handler(core->vmstate); - for (i = 0; i < IGB_NUM_QUEUES; i++) { net_tx_pkt_uninit(core->tx[i].tx_pkt); } @@ -4586,5 +4542,12 @@ igb_core_post_load(IGBCore *core) */ nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0; + /* + * we need to restart intrmgr timers, as an older version of + * QEMU can have stopped them before migration + */ + igb_intrmgr_resume(core); + igb_autoneg_resume(core); + return 0; } diff --git a/hw/net/igb_core.h b/hw/net/igb_core.h index bf8c46f26b5..d70b54e318f 100644 --- a/hw/net/igb_core.h +++ b/hw/net/igb_core.h @@ -90,8 +90,6 @@ struct IGBCore { IGBIntrDelayTimer eitr[IGB_INTR_NUM]; - VMChangeStateEntry *vmstate; - uint32_t eitr_guest_value[IGB_INTR_NUM]; uint8_t permanent_mac[ETH_ALEN]; diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c index 6881e3e4f05..cee84af7bab 100644 --- a/hw/net/imx_fec.c +++ b/hw/net/imx_fec.c @@ -195,7 +195,7 @@ static const VMStateDescription vmstate_imx_eth_txdescs = { .version_id = 1, .minimum_version_id = 1, .needed = imx_eth_is_multi_tx_ring, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tx_descriptor[1], IMXFECState), VMSTATE_UINT32(tx_descriptor[2], IMXFECState), VMSTATE_END_OF_LIST() @@ -206,7 +206,7 @@ static const VMStateDescription vmstate_imx_eth = { .name = TYPE_IMX_FEC, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX), VMSTATE_UINT32(rx_descriptor, IMXFECState), VMSTATE_UINT32(tx_descriptor[0], IMXFECState), @@ -217,7 +217,7 @@ static const VMStateDescription vmstate_imx_eth = { VMSTATE_UINT32(phy_int_mask, IMXFECState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_imx_eth_txdescs, NULL }, diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c index cf7b8c897a0..91d81b410b5 100644 --- a/hw/net/lan9118.c +++ b/hw/net/lan9118.c @@ -150,6 +150,12 @@ do { printf("lan9118: " fmt , ## __VA_ARGS__); } while (0) #define GPT_TIMER_EN 0x20000000 +/* + * The MAC Interface Layer (MIL), within the MAC, contains a 2K Byte transmit + * and a 128 Byte receive FIFO which is separate from the TX and RX FIFOs. + */ +#define MIL_TXFIFO_SIZE 2048 + enum tx_state { TX_IDLE, TX_B, @@ -166,14 +172,14 @@ typedef struct { int32_t pad; int32_t fifo_used; int32_t len; - uint8_t data[2048]; + uint8_t data[MIL_TXFIFO_SIZE]; } LAN9118Packet; static const VMStateDescription vmstate_lan9118_packet = { .name = "lan9118_packet", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(state, LAN9118Packet), VMSTATE_UINT32(cmd_a, LAN9118Packet), VMSTATE_UINT32(cmd_b, LAN9118Packet), @@ -182,7 +188,7 @@ static const VMStateDescription vmstate_lan9118_packet = { VMSTATE_INT32(pad, LAN9118Packet), VMSTATE_INT32(fifo_used, LAN9118Packet), VMSTATE_INT32(len, LAN9118Packet), - VMSTATE_UINT8_ARRAY(data, LAN9118Packet, 2048), + VMSTATE_UINT8_ARRAY(data, LAN9118Packet, MIL_TXFIFO_SIZE), VMSTATE_END_OF_LIST() } }; @@ -271,7 +277,7 @@ static const VMStateDescription vmstate_lan9118 = { .name = "lan9118", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, lan9118_state), VMSTATE_UINT32(irq_cfg, lan9118_state), VMSTATE_UINT32(int_sts, lan9118_state), @@ -544,7 +550,7 @@ static ssize_t lan9118_receive(NetClientState *nc, const uint8_t *buf, return -1; } - if (size >= 2048 || size < 14) { + if (size >= MIL_TXFIFO_SIZE || size < 14) { return -1; } @@ -793,8 +799,22 @@ static void tx_fifo_push(lan9118_state *s, uint32_t val) /* Documentation is somewhat unclear on the ordering of bytes in FIFO words. Empirical results show it to be little-endian. */ - /* TODO: FIFO overflow checking. */ while (n--) { + if (s->txp->len == MIL_TXFIFO_SIZE) { + /* + * No more space in the FIFO. The datasheet is not + * precise about this case. We choose what is easiest + * to model: the packet is truncated, and TXE is raised. + * + * Note, it could be a fragmented packet, but we currently + * do not handle that (see earlier TX_B case). + */ + qemu_log_mask(LOG_GUEST_ERROR, + "MIL TX FIFO overrun, discarding %u byte%s\n", + n, n > 1 ? "s" : ""); + s->int_sts |= TXE_INT; + break; + } s->txp->data[s->txp->len] = val & 0xff; s->txp->len++; val >>= 8; @@ -1408,14 +1428,13 @@ static void lan9118_register_types(void) /* Legacy helper function. Should go away when machine config files are implemented. */ -void lan9118_init(NICInfo *nd, uint32_t base, qemu_irq irq) +void lan9118_init(uint32_t base, qemu_irq irq) { DeviceState *dev; SysBusDevice *s; - qemu_check_nic_model(nd, "lan9118"); dev = qdev_new(TYPE_LAN9118); - qdev_set_nic_properties(dev, nd); + qemu_configure_nic_device(dev, true, NULL); s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); sysbus_mmio_map(s, 0, base); diff --git a/hw/net/lance.c b/hw/net/lance.c index 4c5f01baad9..e1ed24c2cea 100644 --- a/hw/net/lance.c +++ b/hw/net/lance.c @@ -94,7 +94,7 @@ static const VMStateDescription vmstate_lance = { .name = "pcnet", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, SysBusPCNetState, 0, vmstate_pcnet, PCNetState), VMSTATE_END_OF_LIST() } diff --git a/hw/net/lasi_i82596.c b/hw/net/lasi_i82596.c index e37f7fabe95..fcf7fae9411 100644 --- a/hw/net/lasi_i82596.c +++ b/hw/net/lasi_i82596.c @@ -14,6 +14,7 @@ #include "qapi/error.h" #include "qemu/timer.h" #include "hw/sysbus.h" +#include "sysemu/sysemu.h" #include "net/eth.h" #include "hw/net/lasi_82596.h" #include "hw/net/i82596.h" @@ -99,7 +100,7 @@ static const VMStateDescription vmstate_lasi_82596 = { .name = "i82596", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, SysBusI82596State, 0, vmstate_i82596, I82596State), VMSTATE_END_OF_LIST() @@ -117,19 +118,21 @@ static void lasi_82596_realize(DeviceState *dev, Error **errp) i82596_common_init(dev, s, &net_lasi_82596_info); } -SysBusI82596State *lasi_82596_init(MemoryRegion *addr_space, - hwaddr hpa, qemu_irq lan_irq) +SysBusI82596State *lasi_82596_init(MemoryRegion *addr_space, hwaddr hpa, + qemu_irq lan_irq, gboolean match_default) { DeviceState *dev; SysBusI82596State *s; static const MACAddr HP_MAC = { .a = { 0x08, 0x00, 0x09, 0xef, 0x34, 0xf6 } }; - qemu_check_nic_model(&nd_table[0], TYPE_LASI_82596); - dev = qdev_new(TYPE_LASI_82596); + dev = qemu_create_nic_device(TYPE_LASI_82596, match_default, "lasi"); + if (!dev) { + return NULL; + } + s = SYSBUS_I82596(dev); s->state.irq = lan_irq; - qdev_set_nic_properties(dev, &nd_table[0]); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); s->state.conf.macaddr = HP_MAC; /* set HP MAC prefix */ diff --git a/hw/net/meson.build b/hw/net/meson.build index f64651c467e..b7426870e8d 100644 --- a/hw/net/meson.build +++ b/hw/net/meson.build @@ -33,12 +33,12 @@ system_ss.add(when: 'CONFIG_MARVELL_88W8618', if_true: files('mv88w8618_eth.c')) system_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_gem.c')) system_ss.add(when: 'CONFIG_STELLARIS_ENET', if_true: files('stellaris_enet.c')) system_ss.add(when: 'CONFIG_LANCE', if_true: files('lance.c')) -system_ss.add(when: 'CONFIG_LASI_I82596', if_true: files('lasi_i82596.c')) +system_ss.add(when: 'CONFIG_LASI_82596', if_true: files('lasi_i82596.c')) system_ss.add(when: 'CONFIG_I82596_COMMON', if_true: files('i82596.c')) system_ss.add(when: 'CONFIG_SUNHME', if_true: files('sunhme.c')) system_ss.add(when: 'CONFIG_FTGMAC100', if_true: files('ftgmac100.c')) system_ss.add(when: 'CONFIG_SUNGEM', if_true: files('sungem.c')) -system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_emc.c')) +system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_emc.c', 'npcm_gmac.c')) system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_eth.c')) system_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_fec.c')) @@ -50,7 +50,6 @@ specific_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('virtio-net.c')) if have_vhost_net system_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost_net.c'), if_false: files('vhost_net-stub.c')) - system_ss.add(when: 'CONFIG_ALL', if_true: files('vhost_net-stub.c')) else system_ss.add(files('vhost_net-stub.c')) endif @@ -69,7 +68,6 @@ system_ss.add(when: 'CONFIG_ROCKER', if_true: files( 'rocker/rocker_of_dpa.c', 'rocker/rocker_world.c', ), if_false: files('rocker/qmp-norocker.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('rocker/qmp-norocker.c')) system_ss.add(files('rocker/rocker-hmp-cmds.c')) subdir('can') diff --git a/hw/net/mipsnet.c b/hw/net/mipsnet.c index 8e925de867c..df5101aed73 100644 --- a/hw/net/mipsnet.c +++ b/hw/net/mipsnet.c @@ -218,7 +218,7 @@ static const VMStateDescription vmstate_mipsnet = { .name = "mipsnet", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(busy, MIPSnetState), VMSTATE_UINT32(rx_count, MIPSnetState), VMSTATE_UINT32(rx_read, MIPSnetState), diff --git a/hw/net/msf2-emac.c b/hw/net/msf2-emac.c index 145a5e46ab4..c1fc10de2ab 100644 --- a/hw/net/msf2-emac.c +++ b/hw/net/msf2-emac.c @@ -557,7 +557,7 @@ static const VMStateDescription vmstate_msf2_emac = { .name = TYPE_MSS_EMAC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(mac_addr, MSF2EmacState, ETH_ALEN), VMSTATE_UINT32(rx_desc, MSF2EmacState), VMSTATE_UINT16_ARRAY(phy_regs, MSF2EmacState, PHY_MAX_REGS), diff --git a/hw/net/mv88w8618_eth.c b/hw/net/mv88w8618_eth.c index 2185f1131a9..96c65f4d462 100644 --- a/hw/net/mv88w8618_eth.c +++ b/hw/net/mv88w8618_eth.c @@ -358,7 +358,7 @@ static const VMStateDescription mv88w8618_eth_vmsd = { .name = "mv88w8618_eth", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(smir, mv88w8618_eth_state), VMSTATE_UINT32(icr, mv88w8618_eth_state), VMSTATE_UINT32(imr, mv88w8618_eth_state), diff --git a/hw/net/ne2000-isa.c b/hw/net/ne2000-isa.c index a79f7fad1f1..26980e087ee 100644 --- a/hw/net/ne2000-isa.c +++ b/hw/net/ne2000-isa.c @@ -53,7 +53,7 @@ static const VMStateDescription vmstate_isa_ne2000 = { .name = "ne2000", .version_id = 2, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(ne2000, ISANE2000State, 0, vmstate_ne2000, NE2000State), VMSTATE_END_OF_LIST() } diff --git a/hw/net/ne2000-pci.c b/hw/net/ne2000-pci.c index fee93c6ec0d..74773069c69 100644 --- a/hw/net/ne2000-pci.c +++ b/hw/net/ne2000-pci.c @@ -39,7 +39,7 @@ static const VMStateDescription vmstate_pci_ne2000 = { .name = "ne2000", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCINE2000State), VMSTATE_STRUCT(ne2000, PCINE2000State, 0, vmstate_ne2000, NE2000State), VMSTATE_END_OF_LIST() diff --git a/hw/net/ne2000.c b/hw/net/ne2000.c index d79c884d50a..b482c5f3af7 100644 --- a/hw/net/ne2000.c +++ b/hw/net/ne2000.c @@ -606,7 +606,7 @@ const VMStateDescription vmstate_ne2000 = { .version_id = 2, .minimum_version_id = 0, .post_load = ne2000_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_V(rxcr, NE2000State, 2), VMSTATE_UINT8(cmd, NE2000State), VMSTATE_UINT32(start, NE2000State), diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c index 2e5f58b3c9c..b7b1de816dc 100644 --- a/hw/net/net_tx_pkt.c +++ b/hw/net/net_tx_pkt.c @@ -141,6 +141,10 @@ bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt) uint32_t csum = 0; struct iovec *pl_start_frag = pkt->vec + NET_TX_PKT_PL_START_FRAG; + if (iov_size(pl_start_frag, pkt->payload_frags) < 8 + sizeof(csum)) { + return false; + } + if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) { return false; } @@ -833,6 +837,7 @@ bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, if (offload || gso_type == VIRTIO_NET_HDR_GSO_NONE) { if (!offload && pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + pkt->virt_hdr.flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG], pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1, pkt->payload_len); diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c index 1d4e8f59f35..d1583b6f9b3 100644 --- a/hw/net/npcm7xx_emc.c +++ b/hw/net/npcm7xx_emc.c @@ -837,7 +837,7 @@ static const VMStateDescription vmstate_npcm7xx_emc = { .name = TYPE_NPCM7XX_EMC, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(emc_num, NPCM7xxEMCState), VMSTATE_UINT32_ARRAY(regs, NPCM7xxEMCState, NPCM7XX_NUM_EMC_REGS), VMSTATE_BOOL(tx_active, NPCM7xxEMCState), diff --git a/hw/net/npcm_gmac.c b/hw/net/npcm_gmac.c new file mode 100644 index 00000000000..1b71e2526e3 --- /dev/null +++ b/hw/net/npcm_gmac.c @@ -0,0 +1,942 @@ +/* + * Nuvoton NPCM7xx/8xx GMAC Module + * + * Copyright 2024 Google LLC + * Authors: + * Hao Wu + * Nabih Estefan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Unsupported/unimplemented features: + * - MII is not implemented, MII_ADDR.BUSY and MII_DATA always return zero + * - Precision timestamp (PTP) is not implemented. + */ + +#include "qemu/osdep.h" + +#include "hw/registerfields.h" +#include "hw/net/mii.h" +#include "hw/net/npcm_gmac.h" +#include "migration/vmstate.h" +#include "net/checksum.h" +#include "net/eth.h" +#include "net/net.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu/units.h" +#include "sysemu/dma.h" +#include "trace.h" + +REG32(NPCM_DMA_BUS_MODE, 0x1000) +REG32(NPCM_DMA_XMT_POLL_DEMAND, 0x1004) +REG32(NPCM_DMA_RCV_POLL_DEMAND, 0x1008) +REG32(NPCM_DMA_RX_BASE_ADDR, 0x100c) +REG32(NPCM_DMA_TX_BASE_ADDR, 0x1010) +REG32(NPCM_DMA_STATUS, 0x1014) +REG32(NPCM_DMA_CONTROL, 0x1018) +REG32(NPCM_DMA_INTR_ENA, 0x101c) +REG32(NPCM_DMA_MISSED_FRAME_CTR, 0x1020) +REG32(NPCM_DMA_HOST_TX_DESC, 0x1048) +REG32(NPCM_DMA_HOST_RX_DESC, 0x104c) +REG32(NPCM_DMA_CUR_TX_BUF_ADDR, 0x1050) +REG32(NPCM_DMA_CUR_RX_BUF_ADDR, 0x1054) +REG32(NPCM_DMA_HW_FEATURE, 0x1058) + +REG32(NPCM_GMAC_MAC_CONFIG, 0x0) +REG32(NPCM_GMAC_FRAME_FILTER, 0x4) +REG32(NPCM_GMAC_HASH_HIGH, 0x8) +REG32(NPCM_GMAC_HASH_LOW, 0xc) +REG32(NPCM_GMAC_MII_ADDR, 0x10) +REG32(NPCM_GMAC_MII_DATA, 0x14) +REG32(NPCM_GMAC_FLOW_CTRL, 0x18) +REG32(NPCM_GMAC_VLAN_FLAG, 0x1c) +REG32(NPCM_GMAC_VERSION, 0x20) +REG32(NPCM_GMAC_WAKEUP_FILTER, 0x28) +REG32(NPCM_GMAC_PMT, 0x2c) +REG32(NPCM_GMAC_LPI_CTRL, 0x30) +REG32(NPCM_GMAC_TIMER_CTRL, 0x34) +REG32(NPCM_GMAC_INT_STATUS, 0x38) +REG32(NPCM_GMAC_INT_MASK, 0x3c) +REG32(NPCM_GMAC_MAC0_ADDR_HI, 0x40) +REG32(NPCM_GMAC_MAC0_ADDR_LO, 0x44) +REG32(NPCM_GMAC_MAC1_ADDR_HI, 0x48) +REG32(NPCM_GMAC_MAC1_ADDR_LO, 0x4c) +REG32(NPCM_GMAC_MAC2_ADDR_HI, 0x50) +REG32(NPCM_GMAC_MAC2_ADDR_LO, 0x54) +REG32(NPCM_GMAC_MAC3_ADDR_HI, 0x58) +REG32(NPCM_GMAC_MAC3_ADDR_LO, 0x5c) +REG32(NPCM_GMAC_RGMII_STATUS, 0xd8) +REG32(NPCM_GMAC_WATCHDOG, 0xdc) +REG32(NPCM_GMAC_PTP_TCR, 0x700) +REG32(NPCM_GMAC_PTP_SSIR, 0x704) +REG32(NPCM_GMAC_PTP_STSR, 0x708) +REG32(NPCM_GMAC_PTP_STNSR, 0x70c) +REG32(NPCM_GMAC_PTP_STSUR, 0x710) +REG32(NPCM_GMAC_PTP_STNSUR, 0x714) +REG32(NPCM_GMAC_PTP_TAR, 0x718) +REG32(NPCM_GMAC_PTP_TTSR, 0x71c) + +/* Register Fields */ +#define NPCM_GMAC_MII_ADDR_BUSY BIT(0) +#define NPCM_GMAC_MII_ADDR_WRITE BIT(1) +#define NPCM_GMAC_MII_ADDR_GR(rv) extract16((rv), 6, 5) +#define NPCM_GMAC_MII_ADDR_PA(rv) extract16((rv), 11, 5) + +#define NPCM_GMAC_INT_MASK_LPIIM BIT(10) +#define NPCM_GMAC_INT_MASK_PMTM BIT(3) +#define NPCM_GMAC_INT_MASK_RGIM BIT(0) + +#define NPCM_DMA_BUS_MODE_SWR BIT(0) + +static const uint32_t npcm_gmac_cold_reset_values[NPCM_GMAC_NR_REGS] = { + /* Reduce version to 3.2 so that the kernel can enable interrupt. */ + [R_NPCM_GMAC_VERSION] = 0x00001032, + [R_NPCM_GMAC_TIMER_CTRL] = 0x03e80000, + [R_NPCM_GMAC_MAC0_ADDR_HI] = 0x8000ffff, + [R_NPCM_GMAC_MAC0_ADDR_LO] = 0xffffffff, + [R_NPCM_GMAC_MAC1_ADDR_HI] = 0x0000ffff, + [R_NPCM_GMAC_MAC1_ADDR_LO] = 0xffffffff, + [R_NPCM_GMAC_MAC2_ADDR_HI] = 0x0000ffff, + [R_NPCM_GMAC_MAC2_ADDR_LO] = 0xffffffff, + [R_NPCM_GMAC_MAC3_ADDR_HI] = 0x0000ffff, + [R_NPCM_GMAC_MAC3_ADDR_LO] = 0xffffffff, + [R_NPCM_GMAC_PTP_TCR] = 0x00002000, + [R_NPCM_DMA_BUS_MODE] = 0x00020101, + [R_NPCM_DMA_HW_FEATURE] = 0x100d4f37, +}; + +static const uint16_t phy_reg_init[] = { + [MII_BMCR] = MII_BMCR_AUTOEN | MII_BMCR_FD | MII_BMCR_SPEED1000, + [MII_BMSR] = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD | + MII_BMSR_10T_HD | MII_BMSR_EXTSTAT | MII_BMSR_AUTONEG | + MII_BMSR_LINK_ST | MII_BMSR_EXTCAP, + [MII_PHYID1] = 0x0362, + [MII_PHYID2] = 0x5e6a, + [MII_ANAR] = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD | + MII_ANAR_10 | MII_ANAR_CSMACD, + [MII_ANLPAR] = MII_ANLPAR_ACK | MII_ANLPAR_PAUSE | + MII_ANLPAR_TXFD | MII_ANLPAR_TX | MII_ANLPAR_10FD | + MII_ANLPAR_10 | MII_ANLPAR_CSMACD, + [MII_ANER] = 0x64 | MII_ANER_NWAY, + [MII_ANNP] = 0x2001, + [MII_CTRL1000] = MII_CTRL1000_FULL, + [MII_STAT1000] = MII_STAT1000_FULL, + [MII_EXTSTAT] = 0x3000, /* 1000BASTE_T full-duplex capable */ +}; + +static void npcm_gmac_soft_reset(NPCMGMACState *gmac) +{ + memcpy(gmac->regs, npcm_gmac_cold_reset_values, + NPCM_GMAC_NR_REGS * sizeof(uint32_t)); + /* Clear reset bits */ + gmac->regs[R_NPCM_DMA_BUS_MODE] &= ~NPCM_DMA_BUS_MODE_SWR; +} + +static void gmac_phy_set_link(NPCMGMACState *gmac, bool active) +{ + /* Autonegotiation status mirrors link status. */ + if (active) { + gmac->phy_regs[0][MII_BMSR] |= (MII_BMSR_LINK_ST | MII_BMSR_AN_COMP); + } else { + gmac->phy_regs[0][MII_BMSR] &= ~(MII_BMSR_LINK_ST | MII_BMSR_AN_COMP); + } +} + +static bool gmac_can_receive(NetClientState *nc) +{ + NPCMGMACState *gmac = NPCM_GMAC(qemu_get_nic_opaque(nc)); + + /* If GMAC receive is disabled. */ + if (!(gmac->regs[R_NPCM_GMAC_MAC_CONFIG] & NPCM_GMAC_MAC_CONFIG_RX_EN)) { + return false; + } + + /* If GMAC DMA RX is stopped. */ + if (!(gmac->regs[R_NPCM_DMA_CONTROL] & NPCM_DMA_CONTROL_START_STOP_RX)) { + return false; + } + return true; +} + +/* + * Function that updates the GMAC IRQ + * It find the logical OR of the enabled bits for NIS (if enabled) + * It find the logical OR of the enabled bits for AIS (if enabled) + */ +static void gmac_update_irq(NPCMGMACState *gmac) +{ + /* + * Check if the normal interrupts summary is enabled + * if so, add the bits for the summary that are enabled + */ + if (gmac->regs[R_NPCM_DMA_INTR_ENA] & gmac->regs[R_NPCM_DMA_STATUS] & + (NPCM_DMA_INTR_ENAB_NIE_BITS)) { + gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_NIS; + } + /* + * Check if the abnormal interrupts summary is enabled + * if so, add the bits for the summary that are enabled + */ + if (gmac->regs[R_NPCM_DMA_INTR_ENA] & gmac->regs[R_NPCM_DMA_STATUS] & + (NPCM_DMA_INTR_ENAB_AIE_BITS)) { + gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_AIS; + } + + /* Get the logical OR of both normal and abnormal interrupts */ + int level = !!((gmac->regs[R_NPCM_DMA_STATUS] & + gmac->regs[R_NPCM_DMA_INTR_ENA] & + NPCM_DMA_STATUS_NIS) | + (gmac->regs[R_NPCM_DMA_STATUS] & + gmac->regs[R_NPCM_DMA_INTR_ENA] & + NPCM_DMA_STATUS_AIS)); + + /* Set the IRQ */ + trace_npcm_gmac_update_irq(DEVICE(gmac)->canonical_path, + gmac->regs[R_NPCM_DMA_STATUS], + gmac->regs[R_NPCM_DMA_INTR_ENA], + level); + qemu_set_irq(gmac->irq, level); +} + +static int gmac_read_rx_desc(dma_addr_t addr, struct NPCMGMACRxDesc *desc) +{ + if (dma_memory_read(&address_space_memory, addr, desc, + sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" + HWADDR_PRIx "\n", __func__, addr); + return -1; + } + desc->rdes0 = le32_to_cpu(desc->rdes0); + desc->rdes1 = le32_to_cpu(desc->rdes1); + desc->rdes2 = le32_to_cpu(desc->rdes2); + desc->rdes3 = le32_to_cpu(desc->rdes3); + return 0; +} + +static int gmac_write_rx_desc(dma_addr_t addr, struct NPCMGMACRxDesc *desc) +{ + struct NPCMGMACRxDesc le_desc; + le_desc.rdes0 = cpu_to_le32(desc->rdes0); + le_desc.rdes1 = cpu_to_le32(desc->rdes1); + le_desc.rdes2 = cpu_to_le32(desc->rdes2); + le_desc.rdes3 = cpu_to_le32(desc->rdes3); + if (dma_memory_write(&address_space_memory, addr, &le_desc, + sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" + HWADDR_PRIx "\n", __func__, addr); + return -1; + } + return 0; +} + +static int gmac_read_tx_desc(dma_addr_t addr, struct NPCMGMACTxDesc *desc) +{ + if (dma_memory_read(&address_space_memory, addr, desc, + sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" + HWADDR_PRIx "\n", __func__, addr); + return -1; + } + desc->tdes0 = le32_to_cpu(desc->tdes0); + desc->tdes1 = le32_to_cpu(desc->tdes1); + desc->tdes2 = le32_to_cpu(desc->tdes2); + desc->tdes3 = le32_to_cpu(desc->tdes3); + return 0; +} + +static int gmac_write_tx_desc(dma_addr_t addr, struct NPCMGMACTxDesc *desc) +{ + struct NPCMGMACTxDesc le_desc; + le_desc.tdes0 = cpu_to_le32(desc->tdes0); + le_desc.tdes1 = cpu_to_le32(desc->tdes1); + le_desc.tdes2 = cpu_to_le32(desc->tdes2); + le_desc.tdes3 = cpu_to_le32(desc->tdes3); + if (dma_memory_write(&address_space_memory, addr, &le_desc, + sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" + HWADDR_PRIx "\n", __func__, addr); + return -1; + } + return 0; +} + +static int gmac_rx_transfer_frame_to_buffer(uint32_t rx_buf_len, + uint32_t *left_frame, + uint32_t rx_buf_addr, + bool *eof_transferred, + const uint8_t **frame_ptr, + uint16_t *transferred) +{ + uint32_t to_transfer; + /* + * Check that buffer is bigger than the frame being transfered + * If bigger then transfer only whats left of frame + * Else, fill frame with all the content possible + */ + if (rx_buf_len >= *left_frame) { + to_transfer = *left_frame; + *eof_transferred = true; + } else { + to_transfer = rx_buf_len; + } + + /* write frame part to memory */ + if (dma_memory_write(&address_space_memory, (uint64_t) rx_buf_addr, + *frame_ptr, to_transfer, MEMTXATTRS_UNSPECIFIED)) { + return -1; + } + + /* update frame pointer and size of whats left of frame */ + *frame_ptr += to_transfer; + *left_frame -= to_transfer; + *transferred += to_transfer; + + return 0; +} + +static void gmac_dma_set_state(NPCMGMACState *gmac, int shift, uint32_t state) +{ + gmac->regs[R_NPCM_DMA_STATUS] = deposit32(gmac->regs[R_NPCM_DMA_STATUS], + shift, 3, state); +} + +static ssize_t gmac_receive(NetClientState *nc, const uint8_t *buf, size_t len) +{ + /* + * Comments have steps that relate to the + * receiving process steps in pg 386 + */ + NPCMGMACState *gmac = NPCM_GMAC(qemu_get_nic_opaque(nc)); + uint32_t left_frame = len; + const uint8_t *frame_ptr = buf; + uint32_t desc_addr; + uint32_t rx_buf_len, rx_buf_addr; + struct NPCMGMACRxDesc rx_desc; + uint16_t transferred = 0; + bool eof_transferred = false; + + trace_npcm_gmac_packet_receive(DEVICE(gmac)->canonical_path, len); + if (!gmac_can_receive(nc)) { + qemu_log_mask(LOG_GUEST_ERROR, "GMAC Currently is not able for Rx"); + return -1; + } + if (!gmac->regs[R_NPCM_DMA_HOST_RX_DESC]) { + gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = + NPCM_DMA_HOST_RX_DESC_MASK(gmac->regs[R_NPCM_DMA_RX_BASE_ADDR]); + } + desc_addr = NPCM_DMA_HOST_RX_DESC_MASK(gmac->regs[R_NPCM_DMA_HOST_RX_DESC]); + + /* step 1 */ + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_RX_RUNNING_FETCHING_STATE); + trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path, desc_addr); + if (gmac_read_rx_desc(desc_addr, &rx_desc)) { + qemu_log_mask(LOG_GUEST_ERROR, "RX Descriptor @ 0x%x cant be read\n", + desc_addr); + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_RX_SUSPENDED_STATE); + return -1; + } + + /* step 2 */ + if (!(rx_desc.rdes0 & RX_DESC_RDES0_OWN)) { + qemu_log_mask(LOG_GUEST_ERROR, + "RX Descriptor @ 0x%x is owned by software\n", + desc_addr); + gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RU; + gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RI; + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_RX_SUSPENDED_STATE); + gmac_update_irq(gmac); + return len; + } + /* step 3 */ + /* + * TODO -- + * Implement all frame filtering and processing (with its own interrupts) + */ + trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc, + rx_desc.rdes0, rx_desc.rdes1, rx_desc.rdes2, + rx_desc.rdes3); + /* Clear rdes0 for the incoming descriptor and set FS in first descriptor.*/ + rx_desc.rdes0 = RX_DESC_RDES0_FIRST_DESC_MASK; + + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_RX_RUNNING_TRANSFERRING_STATE); + + /* Pad the frame with FCS as the kernel driver will strip it away. */ + left_frame += ETH_FCS_LEN; + + /* repeat while we still have frame to transfer to memory */ + while (!eof_transferred) { + /* Return descriptor no matter what happens */ + rx_desc.rdes0 &= ~RX_DESC_RDES0_OWN; + /* Set the frame to be an IPv4/IPv6 frame. */ + rx_desc.rdes0 |= RX_DESC_RDES0_FRM_TYPE_MASK; + + /* step 4 */ + rx_buf_len = RX_DESC_RDES1_BFFR1_SZ_MASK(rx_desc.rdes1); + rx_buf_addr = rx_desc.rdes2; + gmac->regs[R_NPCM_DMA_CUR_RX_BUF_ADDR] = rx_buf_addr; + gmac_rx_transfer_frame_to_buffer(rx_buf_len, &left_frame, rx_buf_addr, + &eof_transferred, &frame_ptr, + &transferred); + + trace_npcm_gmac_packet_receiving_buffer(DEVICE(gmac)->canonical_path, + rx_buf_len, rx_buf_addr); + /* if we still have frame left and the second buffer is not chained */ + if (!(rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) && \ + !eof_transferred) { + /* repeat process from above on buffer 2 */ + rx_buf_len = RX_DESC_RDES1_BFFR2_SZ_MASK(rx_desc.rdes1); + rx_buf_addr = rx_desc.rdes3; + gmac->regs[R_NPCM_DMA_CUR_RX_BUF_ADDR] = rx_buf_addr; + gmac_rx_transfer_frame_to_buffer(rx_buf_len, &left_frame, + rx_buf_addr, &eof_transferred, + &frame_ptr, &transferred); + trace_npcm_gmac_packet_receiving_buffer( \ + DEVICE(gmac)->canonical_path, + rx_buf_len, rx_buf_addr); + } + /* update address for descriptor */ + gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = rx_buf_addr; + /* Return descriptor */ + rx_desc.rdes0 &= ~RX_DESC_RDES0_OWN; + /* Update frame length transferred */ + rx_desc.rdes0 |= ((uint32_t)transferred) + << RX_DESC_RDES0_FRAME_LEN_SHIFT; + trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc, + rx_desc.rdes0, rx_desc.rdes1, + rx_desc.rdes2, rx_desc.rdes3); + + /* step 5 */ + gmac_write_rx_desc(desc_addr, &rx_desc); + trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, + &rx_desc, rx_desc.rdes0, + rx_desc.rdes1, rx_desc.rdes2, + rx_desc.rdes3); + /* read new descriptor into rx_desc if needed*/ + if (!eof_transferred) { + /* Get next descriptor address (chained or sequential) */ + if (rx_desc.rdes1 & RX_DESC_RDES1_RC_END_RING_MASK) { + desc_addr = gmac->regs[R_NPCM_DMA_RX_BASE_ADDR]; + } else if (rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) { + desc_addr = rx_desc.rdes3; + } else { + desc_addr += sizeof(rx_desc); + } + trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path, + desc_addr); + if (gmac_read_rx_desc(desc_addr, &rx_desc)) { + qemu_log_mask(LOG_GUEST_ERROR, + "RX Descriptor @ 0x%x cant be read\n", + desc_addr); + gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RU; + gmac_update_irq(gmac); + return len; + } + + /* step 6 */ + if (!(rx_desc.rdes0 & RX_DESC_RDES0_OWN)) { + if (!(gmac->regs[R_NPCM_DMA_CONTROL] & \ + NPCM_DMA_CONTROL_FLUSH_MASK)) { + rx_desc.rdes0 |= RX_DESC_RDES0_DESC_ERR_MASK; + } + eof_transferred = true; + } + /* Clear rdes0 for the incoming descriptor */ + rx_desc.rdes0 = 0; + } + } + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_RX_RUNNING_CLOSING_STATE); + + rx_desc.rdes0 |= RX_DESC_RDES0_LAST_DESC_MASK; + if (!(rx_desc.rdes1 & RX_DESC_RDES1_DIS_INTR_COMP_MASK)) { + gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RI; + gmac_update_irq(gmac); + } + trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc, + rx_desc.rdes0, rx_desc.rdes1, rx_desc.rdes2, + rx_desc.rdes3); + + /* step 8 */ + gmac->regs[R_NPCM_DMA_CONTROL] |= NPCM_DMA_CONTROL_FLUSH_MASK; + + /* step 9 */ + trace_npcm_gmac_packet_received(DEVICE(gmac)->canonical_path, left_frame); + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); + gmac_write_rx_desc(desc_addr, &rx_desc); + + /* Get next descriptor address (chained or sequential) */ + if (rx_desc.rdes1 & RX_DESC_RDES1_RC_END_RING_MASK) { + desc_addr = gmac->regs[R_NPCM_DMA_RX_BASE_ADDR]; + } else if (rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) { + desc_addr = rx_desc.rdes3; + } else { + desc_addr += sizeof(rx_desc); + } + gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = desc_addr; + return len; +} + +static int gmac_tx_get_csum(uint32_t tdes1) +{ + uint32_t mask = TX_DESC_TDES1_CHKSM_INS_CTRL_MASK(tdes1); + int csum = 0; + + if (likely(mask > 0)) { + csum |= CSUM_IP; + } + if (likely(mask > 1)) { + csum |= CSUM_TCP | CSUM_UDP; + } + + return csum; +} + +static void gmac_try_send_next_packet(NPCMGMACState *gmac) +{ + /* + * Comments about steps refer to steps for + * transmitting in page 384 of datasheet + */ + uint16_t tx_buffer_size = 2048; + g_autofree uint8_t *tx_send_buffer = g_malloc(tx_buffer_size); + uint32_t desc_addr; + struct NPCMGMACTxDesc tx_desc; + uint32_t tx_buf_addr, tx_buf_len; + uint16_t length = 0; + uint8_t *buf = tx_send_buffer; + uint32_t prev_buf_size = 0; + int csum = 0; + + /* steps 1&2 */ + if (!gmac->regs[R_NPCM_DMA_HOST_TX_DESC]) { + gmac->regs[R_NPCM_DMA_HOST_TX_DESC] = + NPCM_DMA_HOST_TX_DESC_MASK(gmac->regs[R_NPCM_DMA_TX_BASE_ADDR]); + } + desc_addr = gmac->regs[R_NPCM_DMA_HOST_TX_DESC]; + + while (true) { + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_TX_RUNNING_FETCHING_STATE); + if (gmac_read_tx_desc(desc_addr, &tx_desc)) { + qemu_log_mask(LOG_GUEST_ERROR, + "TX Descriptor @ 0x%x can't be read\n", + desc_addr); + return; + } + /* step 3 */ + + trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path, + desc_addr); + trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &tx_desc, + tx_desc.tdes0, tx_desc.tdes1, tx_desc.tdes2, tx_desc.tdes3); + + /* 1 = DMA Owned, 0 = Software Owned */ + if (!(tx_desc.tdes0 & TX_DESC_TDES0_OWN)) { + qemu_log_mask(LOG_GUEST_ERROR, + "TX Descriptor @ 0x%x is owned by software\n", + desc_addr); + gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TU; + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_TX_SUSPENDED_STATE); + gmac_update_irq(gmac); + return; + } + + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_TX_RUNNING_READ_STATE); + /* Give the descriptor back regardless of what happens. */ + tx_desc.tdes0 &= ~TX_DESC_TDES0_OWN; + + if (tx_desc.tdes1 & TX_DESC_TDES1_FIRST_SEG_MASK) { + csum = gmac_tx_get_csum(tx_desc.tdes1); + } + + /* step 4 */ + tx_buf_addr = tx_desc.tdes2; + gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr; + tx_buf_len = TX_DESC_TDES1_BFFR1_SZ_MASK(tx_desc.tdes1); + buf = &tx_send_buffer[prev_buf_size]; + + if ((prev_buf_size + tx_buf_len) > sizeof(buf)) { + tx_buffer_size = prev_buf_size + tx_buf_len; + tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size); + buf = &tx_send_buffer[prev_buf_size]; + } + + /* step 5 */ + if (dma_memory_read(&address_space_memory, tx_buf_addr, buf, + tx_buf_len, MEMTXATTRS_UNSPECIFIED)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", + __func__, tx_buf_addr); + return; + } + length += tx_buf_len; + prev_buf_size += tx_buf_len; + + /* If not chained we'll have a second buffer. */ + if (!(tx_desc.tdes1 & TX_DESC_TDES1_SEC_ADDR_CHND_MASK)) { + tx_buf_addr = tx_desc.tdes3; + gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr; + tx_buf_len = TX_DESC_TDES1_BFFR2_SZ_MASK(tx_desc.tdes1); + buf = &tx_send_buffer[prev_buf_size]; + + if ((prev_buf_size + tx_buf_len) > sizeof(buf)) { + tx_buffer_size = prev_buf_size + tx_buf_len; + tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size); + buf = &tx_send_buffer[prev_buf_size]; + } + + if (dma_memory_read(&address_space_memory, tx_buf_addr, buf, + tx_buf_len, MEMTXATTRS_UNSPECIFIED)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Failed to read packet @ 0x%x\n", + __func__, tx_buf_addr); + return; + } + length += tx_buf_len; + prev_buf_size += tx_buf_len; + } + if (tx_desc.tdes1 & TX_DESC_TDES1_LAST_SEG_MASK) { + net_checksum_calculate(tx_send_buffer, length, csum); + qemu_send_packet(qemu_get_queue(gmac->nic), tx_send_buffer, length); + trace_npcm_gmac_packet_sent(DEVICE(gmac)->canonical_path, length); + buf = tx_send_buffer; + length = 0; + } + + /* step 6 */ + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_TX_RUNNING_CLOSING_STATE); + gmac_write_tx_desc(desc_addr, &tx_desc); + if (tx_desc.tdes1 & TX_DESC_TDES1_TX_END_RING_MASK) { + desc_addr = gmac->regs[R_NPCM_DMA_TX_BASE_ADDR]; + } else if (tx_desc.tdes1 & TX_DESC_TDES1_SEC_ADDR_CHND_MASK) { + desc_addr = tx_desc.tdes3; + } else { + desc_addr += sizeof(tx_desc); + } + gmac->regs[R_NPCM_DMA_HOST_TX_DESC] = desc_addr; + + /* step 7 */ + if (tx_desc.tdes1 & TX_DESC_TDES1_INTERR_COMP_MASK) { + gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TI; + gmac_update_irq(gmac); + } + } +} + +static void gmac_cleanup(NetClientState *nc) +{ + /* Nothing to do yet. */ +} + +static void gmac_set_link(NetClientState *nc) +{ + NPCMGMACState *gmac = qemu_get_nic_opaque(nc); + + trace_npcm_gmac_set_link(!nc->link_down); + gmac_phy_set_link(gmac, !nc->link_down); +} + +static void npcm_gmac_mdio_access(NPCMGMACState *gmac, uint16_t v) +{ + bool busy = v & NPCM_GMAC_MII_ADDR_BUSY; + uint8_t is_write; + uint8_t pa, gr; + uint16_t data; + + if (busy) { + is_write = v & NPCM_GMAC_MII_ADDR_WRITE; + pa = NPCM_GMAC_MII_ADDR_PA(v); + gr = NPCM_GMAC_MII_ADDR_GR(v); + /* Both pa and gr are 5 bits, so they are less than 32. */ + g_assert(pa < NPCM_GMAC_MAX_PHYS); + g_assert(gr < NPCM_GMAC_MAX_PHY_REGS); + + + if (v & NPCM_GMAC_MII_ADDR_WRITE) { + data = gmac->regs[R_NPCM_GMAC_MII_DATA]; + /* Clear reset bit for BMCR register */ + switch (gr) { + case MII_BMCR: + data &= ~MII_BMCR_RESET; + /* Autonegotiation is a W1C bit*/ + if (data & MII_BMCR_ANRESTART) { + /* Tells autonegotiation to not restart again */ + data &= ~MII_BMCR_ANRESTART; + } + if ((data & MII_BMCR_AUTOEN) && + !(gmac->phy_regs[pa][MII_BMSR] & MII_BMSR_AN_COMP)) { + /* sets autonegotiation as complete */ + gmac->phy_regs[pa][MII_BMSR] |= MII_BMSR_AN_COMP; + /* Resolve AN automatically->need to set this */ + gmac->phy_regs[0][MII_ANLPAR] = 0x0000; + } + } + gmac->phy_regs[pa][gr] = data; + } else { + data = gmac->phy_regs[pa][gr]; + gmac->regs[R_NPCM_GMAC_MII_DATA] = data; + } + trace_npcm_gmac_mdio_access(DEVICE(gmac)->canonical_path, is_write, pa, + gr, data); + } + gmac->regs[R_NPCM_GMAC_MII_ADDR] = v & ~NPCM_GMAC_MII_ADDR_BUSY; +} + +static uint64_t npcm_gmac_read(void *opaque, hwaddr offset, unsigned size) +{ + NPCMGMACState *gmac = opaque; + uint32_t v = 0; + + switch (offset) { + /* Write only registers */ + case A_NPCM_DMA_XMT_POLL_DEMAND: + case A_NPCM_DMA_RCV_POLL_DEMAND: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Read of write-only reg: offset: 0x%04" HWADDR_PRIx + "\n", DEVICE(gmac)->canonical_path, offset); + break; + + default: + v = gmac->regs[offset / sizeof(uint32_t)]; + } + + trace_npcm_gmac_reg_read(DEVICE(gmac)->canonical_path, offset, v); + return v; +} + +static void npcm_gmac_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + NPCMGMACState *gmac = opaque; + + trace_npcm_gmac_reg_write(DEVICE(gmac)->canonical_path, offset, v); + + switch (offset) { + /* Read only registers */ + case A_NPCM_GMAC_VERSION: + case A_NPCM_GMAC_INT_STATUS: + case A_NPCM_GMAC_RGMII_STATUS: + case A_NPCM_GMAC_PTP_STSR: + case A_NPCM_GMAC_PTP_STNSR: + case A_NPCM_DMA_MISSED_FRAME_CTR: + case A_NPCM_DMA_HOST_TX_DESC: + case A_NPCM_DMA_HOST_RX_DESC: + case A_NPCM_DMA_CUR_TX_BUF_ADDR: + case A_NPCM_DMA_CUR_RX_BUF_ADDR: + case A_NPCM_DMA_HW_FEATURE: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write of read-only reg: offset: 0x%04" HWADDR_PRIx + ", value: 0x%04" PRIx64 "\n", + DEVICE(gmac)->canonical_path, offset, v); + break; + + case A_NPCM_GMAC_MAC_CONFIG: + gmac->regs[offset / sizeof(uint32_t)] = v; + break; + + case A_NPCM_GMAC_MII_ADDR: + npcm_gmac_mdio_access(gmac, v); + break; + + case A_NPCM_GMAC_MAC0_ADDR_HI: + gmac->regs[offset / sizeof(uint32_t)] = v; + gmac->conf.macaddr.a[0] = v >> 8; + gmac->conf.macaddr.a[1] = v >> 0; + break; + + case A_NPCM_GMAC_MAC0_ADDR_LO: + gmac->regs[offset / sizeof(uint32_t)] = v; + gmac->conf.macaddr.a[2] = v >> 24; + gmac->conf.macaddr.a[3] = v >> 16; + gmac->conf.macaddr.a[4] = v >> 8; + gmac->conf.macaddr.a[5] = v >> 0; + break; + + case A_NPCM_GMAC_MAC1_ADDR_HI: + case A_NPCM_GMAC_MAC1_ADDR_LO: + case A_NPCM_GMAC_MAC2_ADDR_HI: + case A_NPCM_GMAC_MAC2_ADDR_LO: + case A_NPCM_GMAC_MAC3_ADDR_HI: + case A_NPCM_GMAC_MAC3_ADDR_LO: + gmac->regs[offset / sizeof(uint32_t)] = v; + qemu_log_mask(LOG_UNIMP, + "%s: Only MAC Address 0 is supported. This request " + "is ignored.\n", DEVICE(gmac)->canonical_path); + break; + + case A_NPCM_DMA_BUS_MODE: + gmac->regs[offset / sizeof(uint32_t)] = v; + if (v & NPCM_DMA_BUS_MODE_SWR) { + npcm_gmac_soft_reset(gmac); + } + break; + + case A_NPCM_DMA_RCV_POLL_DEMAND: + /* We dont actually care about the value */ + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); + break; + + case A_NPCM_DMA_XMT_POLL_DEMAND: + /* We dont actually care about the value */ + gmac_try_send_next_packet(gmac); + break; + + case A_NPCM_DMA_CONTROL: + gmac->regs[offset / sizeof(uint32_t)] = v; + if (v & NPCM_DMA_CONTROL_START_STOP_TX) { + gmac_try_send_next_packet(gmac); + } else { + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_TX_STOPPED_STATE); + } + if (v & NPCM_DMA_CONTROL_START_STOP_RX) { + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); + qemu_flush_queued_packets(qemu_get_queue(gmac->nic)); + } else { + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_RX_STOPPED_STATE); + } + break; + + case A_NPCM_DMA_STATUS: + /* Check that RO bits are not written to */ + if (NPCM_DMA_STATUS_RO_MASK(v)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write of read-only bits of reg: offset: 0x%04" + HWADDR_PRIx ", value: 0x%04" PRIx64 "\n", + DEVICE(gmac)->canonical_path, offset, v); + } + /* for W1C bits, implement W1C */ + gmac->regs[offset / sizeof(uint32_t)] &= ~NPCM_DMA_STATUS_W1C_MASK(v); + if (v & NPCM_DMA_STATUS_RU) { + /* Clearing RU bit indicates descriptor is owned by DMA again. */ + gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT, + NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE); + qemu_flush_queued_packets(qemu_get_queue(gmac->nic)); + } + break; + + default: + gmac->regs[offset / sizeof(uint32_t)] = v; + break; + } + + gmac_update_irq(gmac); +} + +static void npcm_gmac_reset(DeviceState *dev) +{ + NPCMGMACState *gmac = NPCM_GMAC(dev); + + npcm_gmac_soft_reset(gmac); + memcpy(gmac->phy_regs[0], phy_reg_init, sizeof(phy_reg_init)); + + trace_npcm_gmac_reset(DEVICE(gmac)->canonical_path, + gmac->phy_regs[0][MII_BMSR]); +} + +static NetClientInfo net_npcm_gmac_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = gmac_can_receive, + .receive = gmac_receive, + .cleanup = gmac_cleanup, + .link_status_changed = gmac_set_link, +}; + +static const struct MemoryRegionOps npcm_gmac_ops = { + .read = npcm_gmac_read, + .write = npcm_gmac_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm_gmac_realize(DeviceState *dev, Error **errp) +{ + NPCMGMACState *gmac = NPCM_GMAC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&gmac->iomem, OBJECT(gmac), &npcm_gmac_ops, gmac, + TYPE_NPCM_GMAC, 8 * KiB); + sysbus_init_mmio(sbd, &gmac->iomem); + sysbus_init_irq(sbd, &gmac->irq); + + qemu_macaddr_default_if_unset(&gmac->conf.macaddr); + + gmac->nic = qemu_new_nic(&net_npcm_gmac_info, &gmac->conf, TYPE_NPCM_GMAC, + dev->id, &dev->mem_reentrancy_guard, gmac); + qemu_format_nic_info_str(qemu_get_queue(gmac->nic), gmac->conf.macaddr.a); + gmac->regs[R_NPCM_GMAC_MAC0_ADDR_HI] = (gmac->conf.macaddr.a[0] << 8) + \ + gmac->conf.macaddr.a[1]; + gmac->regs[R_NPCM_GMAC_MAC0_ADDR_LO] = (gmac->conf.macaddr.a[2] << 24) + \ + (gmac->conf.macaddr.a[3] << 16) + \ + (gmac->conf.macaddr.a[4] << 8) + \ + gmac->conf.macaddr.a[5]; +} + +static void npcm_gmac_unrealize(DeviceState *dev) +{ + NPCMGMACState *gmac = NPCM_GMAC(dev); + + qemu_del_nic(gmac->nic); +} + +static const VMStateDescription vmstate_npcm_gmac = { + .name = TYPE_NPCM_GMAC, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, NPCMGMACState, NPCM_GMAC_NR_REGS), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property npcm_gmac_properties[] = { + DEFINE_NIC_PROPERTIES(NPCMGMACState, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void npcm_gmac_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->desc = "NPCM GMAC Controller"; + dc->realize = npcm_gmac_realize; + dc->unrealize = npcm_gmac_unrealize; + dc->reset = npcm_gmac_reset; + dc->vmsd = &vmstate_npcm_gmac; + device_class_set_props(dc, npcm_gmac_properties); +} + +static const TypeInfo npcm_gmac_types[] = { + { + .name = TYPE_NPCM_GMAC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCMGMACState), + .class_init = npcm_gmac_class_init, + }, +}; +DEFINE_TYPES(npcm_gmac_types) diff --git a/hw/net/pcnet-pci.c b/hw/net/pcnet-pci.c index 96a302c141a..fe1a845b2b0 100644 --- a/hw/net/pcnet-pci.c +++ b/hw/net/pcnet-pci.c @@ -147,7 +147,7 @@ static const VMStateDescription vmstate_pci_pcnet = { .name = "pcnet", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIPCNetState), VMSTATE_STRUCT(state, PCIPCNetState, 0, vmstate_pcnet, PCNetState), VMSTATE_END_OF_LIST() diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c index a7e123e60db..ad675ab29d4 100644 --- a/hw/net/pcnet.c +++ b/hw/net/pcnet.c @@ -632,7 +632,7 @@ static inline int ladr_match(PCNetState *s, const uint8_t *buf, int size) { struct qemu_ether_header *hdr = (void *)buf; if ((*(hdr->ether_dhost)&0x01) && - ((uint64_t *)&s->csr[8])[0] != 0LL) { + (s->csr[8] | s->csr[9] | s->csr[10] | s->csr[11]) != 0) { uint8_t ladr[8] = { s->csr[8] & 0xff, s->csr[8] >> 8, s->csr[9] & 0xff, s->csr[9] >> 8, @@ -1682,7 +1682,7 @@ const VMStateDescription vmstate_pcnet = { .name = "pcnet", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(rap, PCNetState), VMSTATE_INT32(isr, PCNetState), VMSTATE_INT32(lnkst, PCNetState), diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 4af8c66266b..897c86ec41e 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -3150,7 +3150,7 @@ static const VMStateDescription vmstate_rtl8139_hotplug_ready ={ .version_id = 1, .minimum_version_id = 1, .needed = rtl8139_hotplug_ready_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() } }; @@ -3173,7 +3173,7 @@ static const VMStateDescription vmstate_rtl8139 = { .minimum_version_id = 3, .post_load = rtl8139_post_load, .pre_save = rtl8139_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, RTL8139State), VMSTATE_PARTIAL_BUFFER(phys, RTL8139State, 6), VMSTATE_BUFFER(mult, RTL8139State), @@ -3257,7 +3257,7 @@ static const VMStateDescription vmstate_rtl8139 = { VMSTATE_UINT32_V(cplus_enabled, RTL8139State, 4), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_rtl8139_hotplug_ready, NULL } diff --git a/hw/net/smc91c111.c b/hw/net/smc91c111.c index 876a78456ac..702d0e8e837 100644 --- a/hw/net/smc91c111.c +++ b/hw/net/smc91c111.c @@ -62,7 +62,7 @@ static const VMStateDescription vmstate_smc91c111 = { .name = "smc91c111", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(tcr, smc91c111_state), VMSTATE_UINT16(rcr, smc91c111_state), VMSTATE_UINT16(cr, smc91c111_state), @@ -818,14 +818,13 @@ static void smc91c111_register_types(void) /* Legacy helper function. Should go away when machine config files are implemented. */ -void smc91c111_init(NICInfo *nd, uint32_t base, qemu_irq irq) +void smc91c111_init(uint32_t base, qemu_irq irq) { DeviceState *dev; SysBusDevice *s; - qemu_check_nic_model(nd, "smc91c111"); dev = qdev_new(TYPE_SMC91C111); - qdev_set_nic_properties(dev, nd); + qemu_configure_nic_device(dev, true, NULL); s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); sysbus_mmio_map(s, 0, base); diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c index 475d5f3a348..ecb30b7c76b 100644 --- a/hw/net/spapr_llan.c +++ b/hw/net/spapr_llan.c @@ -800,7 +800,7 @@ static const VMStateDescription vmstate_rx_buffer_pool = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_vlan_rx_buffer_pools_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(bufsize, RxBufPool), VMSTATE_INT32(count, RxBufPool), VMSTATE_UINT64_ARRAY(bds, RxBufPool, RX_POOL_MAX_BDS), @@ -813,7 +813,7 @@ static const VMStateDescription vmstate_rx_pools = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_vlan_rx_buffer_pools_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(rx_pool, SpaprVioVlan, RX_MAX_POOLS, 1, vmstate_rx_buffer_pool, RxBufPool), @@ -825,7 +825,7 @@ static const VMStateDescription vmstate_spapr_llan = { .name = "spapr_llan", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SPAPR_VIO(sdev, SpaprVioVlan), /* LLAN state */ VMSTATE_BOOL(isopen, SpaprVioVlan), @@ -837,7 +837,7 @@ static const VMStateDescription vmstate_spapr_llan = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_rx_pools, NULL } diff --git a/hw/net/stellaris_enet.c b/hw/net/stellaris_enet.c index 6768a6912f0..db95766e294 100644 --- a/hw/net/stellaris_enet.c +++ b/hw/net/stellaris_enet.c @@ -88,7 +88,7 @@ static const VMStateDescription vmstate_rx_frame = { .name = "stellaris_enet/rx_frame", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(data, StellarisEnetRxFrame, 2048), VMSTATE_UINT32(len, StellarisEnetRxFrame), VMSTATE_END_OF_LIST() @@ -133,7 +133,7 @@ static const VMStateDescription vmstate_stellaris_enet = { .version_id = 2, .minimum_version_id = 2, .post_load = stellaris_enet_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ris, stellaris_enet_state), VMSTATE_UINT32(im, stellaris_enet_state), VMSTATE_UINT32(rctl, stellaris_enet_state), diff --git a/hw/net/sungem.c b/hw/net/sungem.c index 013cfc27361..dd1b4a13446 100644 --- a/hw/net/sungem.c +++ b/hw/net/sungem.c @@ -1434,7 +1434,7 @@ static const VMStateDescription vmstate_sungem = { .name = "sungem", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pdev, SunGEMState), VMSTATE_MACADDR(conf.macaddr, SunGEMState), VMSTATE_UINT32(phy_addr, SunGEMState), diff --git a/hw/net/sunhme.c b/hw/net/sunhme.c index ddc83a64bd1..ae8452e5f9f 100644 --- a/hw/net/sunhme.c +++ b/hw/net/sunhme.c @@ -925,7 +925,7 @@ static const VMStateDescription vmstate_hme = { .name = "sunhme", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, SunHMEState), VMSTATE_MACADDR(conf.macaddr, SunHMEState), VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)), diff --git a/hw/net/trace-events b/hw/net/trace-events index 387e32e1533..78efa2ec2cc 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -467,6 +467,25 @@ npcm7xx_emc_rx_done(uint32_t crxdsa) "RX done, CRXDSA=0x%x" npcm7xx_emc_reg_read(int emc_num, uint32_t result, const char *name, int regno) "emc%d: 0x%x = reg[%s/%d]" npcm7xx_emc_reg_write(int emc_num, const char *name, int regno, uint32_t value) "emc%d: reg[%s/%d] = 0x%x" +# npcm_gmac.c +npcm_gmac_reg_read(const char *name, uint64_t offset, uint32_t value) "%s: offset: 0x%04" PRIx64 " value: 0x%04" PRIx32 +npcm_gmac_reg_write(const char *name, uint64_t offset, uint32_t value) "%s: offset: 0x%04" PRIx64 " value: 0x%04" PRIx32 +npcm_gmac_mdio_access(const char *name, uint8_t is_write, uint8_t pa, uint8_t gr, uint16_t val) "%s: is_write: %" PRIu8 " pa: %" PRIu8 " gr: %" PRIu8 " val: 0x%04" PRIx16 +npcm_gmac_reset(const char *name, uint16_t value) "%s: phy_regs[0][1]: 0x%04" PRIx16 +npcm_gmac_set_link(bool active) "Set link: active=%u" +npcm_gmac_update_irq(const char *name, uint32_t status, uint32_t intr_en, int level) "%s: Status Reg: 0x%04" PRIX32 " Interrupt Enable Reg: 0x%04" PRIX32 " IRQ Set: %d" +npcm_gmac_packet_desc_read(const char* name, uint32_t desc_addr) "%s: attempting to read descriptor @0x%04" PRIX32 +npcm_gmac_packet_receive(const char* name, uint32_t len) "%s: RX packet length: 0x%04" PRIX32 +npcm_gmac_packet_receiving_buffer(const char* name, uint32_t buf_len, uint32_t rx_buf_addr) "%s: Receiving into Buffer size: 0x%04" PRIX32 " at address 0x%04" PRIX32 +npcm_gmac_packet_received(const char* name, uint32_t len) "%s: Reception finished, packet left: 0x%04" PRIX32 +npcm_gmac_packet_sent(const char* name, uint16_t len) "%s: TX packet sent!, length: 0x%04" PRIX16 +npcm_gmac_debug_desc_data(const char* name, void* addr, uint32_t des0, uint32_t des1, uint32_t des2, uint32_t des3)"%s: Address: %p Descriptor 0: 0x%04" PRIX32 " Descriptor 1: 0x%04" PRIX32 "Descriptor 2: 0x%04" PRIX32 " Descriptor 3: 0x%04" PRIX32 +npcm_gmac_packet_tx_desc_data(const char* name, uint32_t tdes0, uint32_t tdes1) "%s: Tdes0: 0x%04" PRIX32 " Tdes1: 0x%04" PRIX32 + +# npcm_pcs.c +npcm_pcs_reg_read(const char *name, uint16_t indirect_access_baes, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16 +npcm_pcs_reg_write(const char *name, uint16_t indirect_access_baes, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16 + # dp8398x.c dp8393x_raise_irq(int isr) "raise irq, isr is 0x%04x" dp8393x_lower_irq(void) "lower irq" diff --git a/hw/net/tulip.c b/hw/net/tulip.c index f21b8ca62b0..1f2ef209775 100644 --- a/hw/net/tulip.c +++ b/hw/net/tulip.c @@ -48,7 +48,7 @@ struct TULIPState { static const VMStateDescription vmstate_pci_tulip = { .name = "tulip", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, TULIPState), VMSTATE_UINT32_ARRAY(csr, TULIPState, 16), VMSTATE_UINT32(old_csr9, TULIPState), diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index e8e16616462..fd1a93701ad 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -541,6 +541,16 @@ int vhost_set_vring_enable(NetClientState *nc, int enable) VHostNetState *net = get_vhost_net(nc); const VhostOps *vhost_ops = net->dev.vhost_ops; + /* + * vhost-vdpa network devices need to enable dataplane virtqueues after + * DRIVER_OK, so they can recover device state before starting dataplane. + * Because of that, we don't enable virtqueues here and leave it to + * net/vhost-vdpa.c. + */ + if (nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + return 0; + } + nc->vring_enable = enable; if (vhost_ops && vhost_ops->vhost_set_vring_enable) { diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 73024babd45..24e5e7d347c 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -42,6 +42,7 @@ #include "sysemu/sysemu.h" #include "trace.h" #include "monitor/qdev.h" +#include "monitor/monitor.h" #include "hw/pci/pci_device.h" #include "net_rx_pkt.h" #include "hw/virtio/vhost.h" @@ -1328,14 +1329,53 @@ static void virtio_net_detach_epbf_rss(VirtIONet *n) virtio_net_attach_ebpf_to_backend(n->nic, -1); } -static bool virtio_net_load_ebpf(VirtIONet *n) +static bool virtio_net_load_ebpf_fds(VirtIONet *n, Error **errp) { - if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) { - /* backend doesn't support steering ebpf */ - return false; + int fds[EBPF_RSS_MAX_FDS] = { [0 ... EBPF_RSS_MAX_FDS - 1] = -1}; + int ret = true; + int i = 0; + + ERRP_GUARD(); + + if (n->nr_ebpf_rss_fds != EBPF_RSS_MAX_FDS) { + error_setg(errp, + "Expected %d file descriptors but got %d", + EBPF_RSS_MAX_FDS, n->nr_ebpf_rss_fds); + return false; + } + + for (i = 0; i < n->nr_ebpf_rss_fds; i++) { + fds[i] = monitor_fd_param(monitor_cur(), n->ebpf_rss_fds[i], errp); + if (*errp) { + ret = false; + goto exit; + } + } + + ret = ebpf_rss_load_fds(&n->ebpf_rss, fds[0], fds[1], fds[2], fds[3]); + +exit: + if (!ret || *errp) { + for (i = 0; i < n->nr_ebpf_rss_fds && fds[i] != -1; i++) { + close(fds[i]); + } + } + + return ret; +} + +static bool virtio_net_load_ebpf(VirtIONet *n, Error **errp) +{ + bool ret = false; + + if (virtio_net_attach_ebpf_to_backend(n->nic, -1)) { + if (!(n->ebpf_rss_fds + && virtio_net_load_ebpf_fds(n, errp))) { + ret = ebpf_rss_load(&n->ebpf_rss); + } } - return ebpf_rss_load(&n->ebpf_rss); + return ret; } static void virtio_net_unload_ebpf(VirtIONet *n) @@ -2809,6 +2849,10 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) VirtIONet *n = VIRTIO_NET(vdev); VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; + if (unlikely(n->vhost_started)) { + return; + } + if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { virtio_net_drop_tx_queue_data(vdev, vq); return; @@ -3118,7 +3162,7 @@ static int virtio_net_post_load_virtio(VirtIODevice *vdev) /* tx_waiting field of a VirtIONetQueue */ static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { .name = "virtio-net-queue-tx_waiting", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tx_waiting, VirtIONetQueue), VMSTATE_END_OF_LIST() }, @@ -3196,7 +3240,7 @@ static const VMStateDescription vmstate_virtio_net_tx_waiting = { .name = "virtio-net-tx_waiting", .pre_load = virtio_net_tx_waiting_pre_load, .pre_save = virtio_net_tx_waiting_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, curr_queue_pairs_1, vmstate_virtio_net_queue_tx_waiting, @@ -3233,7 +3277,7 @@ static const VMStateDescription vmstate_virtio_net_has_ufo = { .name = "virtio-net-ufo", .post_load = virtio_net_ufo_post_load, .pre_save = virtio_net_ufo_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp), VMSTATE_END_OF_LIST() }, @@ -3267,7 +3311,7 @@ static const VMStateDescription vmstate_virtio_net_has_vnet = { .name = "virtio-net-vnet", .post_load = virtio_net_vnet_post_load, .pre_save = virtio_net_vnet_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp), VMSTATE_END_OF_LIST() }, @@ -3283,7 +3327,7 @@ static const VMStateDescription vmstate_virtio_net_rss = { .version_id = 1, .minimum_version_id = 1, .needed = virtio_net_rss_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(rss_data.enabled, VirtIONet), VMSTATE_BOOL(rss_data.redirect, VirtIONet), VMSTATE_BOOL(rss_data.populate_hash, VirtIONet), @@ -3304,7 +3348,7 @@ static const VMStateDescription vmstate_virtio_net_device = { .version_id = VIRTIO_NET_VM_VERSION, .minimum_version_id = VIRTIO_NET_VM_VERSION, .post_load = virtio_net_post_load_device, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN), VMSTATE_STRUCT_POINTER(vqs, VirtIONet, vmstate_virtio_net_queue_tx_waiting, @@ -3348,8 +3392,8 @@ static const VMStateDescription vmstate_virtio_net_device = { VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, has_ctrl_guest_offloads), VMSTATE_END_OF_LIST() - }, - .subsections = (const VMStateDescription * []) { + }, + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_net_rss, NULL } @@ -3370,7 +3414,7 @@ static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) VirtIONet *n = VIRTIO_NET(vdev); NetClientState *nc; assert(n->vhost_started); - if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) { + if (!n->multiqueue && idx == 2) { /* Must guard against invalid features and bogus queue index * from being set by malicious guest, or penetrated through * buggy migration stream. @@ -3402,7 +3446,7 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, VirtIONet *n = VIRTIO_NET(vdev); NetClientState *nc; assert(n->vhost_started); - if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) { + if (!n->multiqueue && idx == 2) { /* Must guard against invalid features and bogus queue index * from being set by malicious guest, or penetrated through * buggy migration stream. @@ -3504,7 +3548,7 @@ static bool failover_replug_primary(VirtIONet *n, DeviceState *dev, return !err; } -static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s) +static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationEvent *e) { bool should_be_hidden; Error *err = NULL; @@ -3516,7 +3560,7 @@ static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s) should_be_hidden = qatomic_read(&n->failover_primary_hidden); - if (migration_in_setup(s) && !should_be_hidden) { + if (e->type == MIG_EVENT_PRECOPY_SETUP && !should_be_hidden) { if (failover_unplug_primary(n, dev)) { vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev); qapi_event_send_unplug_primary(dev->id); @@ -3524,7 +3568,7 @@ static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s) } else { warn_report("couldn't unplug primary device"); } - } else if (migration_has_failed(s)) { + } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { /* We already unplugged the device let's plug it back */ if (!failover_replug_primary(n, dev, &err)) { if (err) { @@ -3534,11 +3578,12 @@ static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s) } } -static void virtio_net_migration_state_notifier(Notifier *notifier, void *data) +static int virtio_net_migration_state_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, Error **errp) { - MigrationState *s = data; VirtIONet *n = container_of(notifier, VirtIONet, migration_state); - virtio_net_handle_migration_primary(n, s); + virtio_net_handle_migration_primary(n, e); + return 0; } static bool failover_hide_primary_device(DeviceListener *listener, @@ -3767,7 +3812,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) net_rx_pkt_init(&n->rx_pkt); if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) { - virtio_net_load_ebpf(n); + virtio_net_load_ebpf(n, errp); } } @@ -3876,7 +3921,7 @@ static const VMStateDescription vmstate_virtio_net = { .name = "virtio-net", .minimum_version_id = VIRTIO_NET_VM_VERSION, .version_id = VIRTIO_NET_VM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, @@ -3929,6 +3974,8 @@ static Property virtio_net_properties[] = { VIRTIO_NET_F_RSS, false), DEFINE_PROP_BIT64("hash", VirtIONet, host_features, VIRTIO_NET_F_HASH_REPORT, false), + DEFINE_PROP_ARRAY("ebpf-rss-fds", VirtIONet, nr_ebpf_rss_fds, + ebpf_rss_fds, qdev_prop_string, char*), DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features, VIRTIO_NET_F_RSC_EXT, false), DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout, diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 6fb4102d033..707487c6366 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -2307,7 +2307,7 @@ static const VMStateDescription vmxstate_vmxnet3_mcast_list = { .minimum_version_id = 1, .pre_load = vmxnet3_mcast_list_pre_load, .needed = vmxnet3_mc_list_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, mcast_list_buff_size), VMSTATE_END_OF_LIST() @@ -2317,7 +2317,7 @@ static const VMStateDescription vmxstate_vmxnet3_mcast_list = { static const VMStateDescription vmstate_vmxnet3_ring = { .name = "vmxnet3-ring", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(pa, Vmxnet3Ring), VMSTATE_UINT32(size, Vmxnet3Ring), VMSTATE_UINT32(cell_size, Vmxnet3Ring), @@ -2330,7 +2330,7 @@ static const VMStateDescription vmstate_vmxnet3_ring = { static const VMStateDescription vmstate_vmxnet3_tx_stats = { .name = "vmxnet3-tx-stats", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(TSOPktsTxOK, struct UPT1_TxStats), VMSTATE_UINT64(TSOBytesTxOK, struct UPT1_TxStats), VMSTATE_UINT64(ucastPktsTxOK, struct UPT1_TxStats), @@ -2348,7 +2348,7 @@ static const VMStateDescription vmstate_vmxnet3_tx_stats = { static const VMStateDescription vmstate_vmxnet3_txq_descr = { .name = "vmxnet3-txq-descr", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(tx_ring, Vmxnet3TxqDescr, 0, vmstate_vmxnet3_ring, Vmxnet3Ring), VMSTATE_STRUCT(comp_ring, Vmxnet3TxqDescr, 0, vmstate_vmxnet3_ring, @@ -2364,7 +2364,7 @@ static const VMStateDescription vmstate_vmxnet3_txq_descr = { static const VMStateDescription vmstate_vmxnet3_rx_stats = { .name = "vmxnet3-rx-stats", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(LROPktsRxOK, struct UPT1_RxStats), VMSTATE_UINT64(LROBytesRxOK, struct UPT1_RxStats), VMSTATE_UINT64(ucastPktsRxOK, struct UPT1_RxStats), @@ -2382,7 +2382,7 @@ static const VMStateDescription vmstate_vmxnet3_rx_stats = { static const VMStateDescription vmstate_vmxnet3_rxq_descr = { .name = "vmxnet3-rxq-descr", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(rx_ring, Vmxnet3RxqDescr, VMXNET3_RX_RINGS_PER_QUEUE, 0, vmstate_vmxnet3_ring, Vmxnet3Ring), @@ -2418,7 +2418,7 @@ static int vmxnet3_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_vmxnet3_int_state = { .name = "vmxnet3-int-state", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(is_masked, Vmxnet3IntState), VMSTATE_BOOL(is_pending, Vmxnet3IntState), VMSTATE_BOOL(is_asserted, Vmxnet3IntState), @@ -2432,7 +2432,7 @@ static const VMStateDescription vmstate_vmxnet3 = { .minimum_version_id = 1, .pre_save = vmxnet3_pre_save, .post_load = vmxnet3_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, VMXNET3State), VMSTATE_MSIX(parent_obj, VMXNET3State), VMSTATE_BOOL(rx_packets_compound, VMXNET3State), @@ -2468,7 +2468,7 @@ static const VMStateDescription vmstate_vmxnet3 = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmxstate_vmxnet3_mcast_list, NULL } diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c index 1e2b3baeb1a..89487b49baf 100644 --- a/hw/net/xen_nic.c +++ b/hw/net/xen_nic.c @@ -133,7 +133,7 @@ static bool net_tx_packets(struct XenNetDev *netdev) void *page; void *tmpbuf = NULL; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); for (;;) { rc = netdev->tx_ring.req_cons; @@ -260,7 +260,7 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size RING_IDX rc, rp; void *page; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (xen_device_backend_get_state(&netdev->xendev) != XenbusStateConnected) { return -1; @@ -351,10 +351,11 @@ static bool net_event(void *_xendev) static bool xen_netdev_connect(XenDevice *xendev, Error **errp) { + ERRP_GUARD(); XenNetDev *netdev = XEN_NET_DEVICE(xendev); unsigned int port, rx_copy; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (xen_device_frontend_scanf(xendev, "tx-ring-ref", "%u", &netdev->tx_ring_ref) != 1) { @@ -425,7 +426,7 @@ static void xen_netdev_disconnect(XenDevice *xendev, Error **errp) trace_xen_netdev_disconnect(netdev->dev); - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); netdev->tx_ring.sring = NULL; netdev->rx_ring.sring = NULL; diff --git a/hw/net/xgmac.c b/hw/net/xgmac.c index 1f4f277d840..ffe3fc8dbef 100644 --- a/hw/net/xgmac.c +++ b/hw/net/xgmac.c @@ -159,7 +159,7 @@ static const VMStateDescription vmstate_rxtx_stats = { .name = "xgmac_stats", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(rx_bytes, RxTxStats), VMSTATE_UINT64(tx_bytes, RxTxStats), VMSTATE_UINT64(rx, RxTxStats), @@ -173,7 +173,7 @@ static const VMStateDescription vmstate_xgmac = { .name = "xgmac", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(stats, XgmacState, 0, vmstate_rxtx_stats, RxTxStats), VMSTATE_UINT32_ARRAY(regs, XgmacState, R_MAX), VMSTATE_END_OF_LIST() diff --git a/hw/nubus/meson.build b/hw/nubus/meson.build index e7ebda89935..9a7a12ea683 100644 --- a/hw/nubus/meson.build +++ b/hw/nubus/meson.build @@ -2,6 +2,7 @@ nubus_ss = ss.source_set() nubus_ss.add(files('nubus-device.c')) nubus_ss.add(files('nubus-bus.c')) nubus_ss.add(files('nubus-bridge.c')) +nubus_ss.add(files('nubus-virtio-mmio.c')) nubus_ss.add(when: 'CONFIG_Q800', if_true: files('mac-nubus-bridge.c')) system_ss.add_all(when: 'CONFIG_NUBUS', if_true: nubus_ss) diff --git a/hw/nubus/nubus-device.c b/hw/nubus/nubus-device.c index 49008e49385..be4cb246966 100644 --- a/hw/nubus/nubus-device.c +++ b/hw/nubus/nubus-device.c @@ -10,6 +10,7 @@ #include "qemu/osdep.h" #include "qemu/datadir.h" +#include "exec/target_page.h" #include "hw/irq.h" #include "hw/loader.h" #include "hw/nubus/nubus.h" @@ -30,7 +31,8 @@ static void nubus_device_realize(DeviceState *dev, Error **errp) NubusDevice *nd = NUBUS_DEVICE(dev); char *name, *path; hwaddr slot_offset; - int64_t size; + int64_t size, align_size; + uint8_t *rom_ptr; int ret; /* Super */ @@ -76,16 +78,24 @@ static void nubus_device_realize(DeviceState *dev, Error **errp) } name = g_strdup_printf("nubus-slot-%x-declaration-rom", nd->slot); - memory_region_init_rom(&nd->decl_rom, OBJECT(dev), name, size, + + /* + * Ensure ROM memory region is aligned to target page size regardless + * of the size of the Declaration ROM image + */ + align_size = ROUND_UP(size, qemu_target_page_size()); + memory_region_init_rom(&nd->decl_rom, OBJECT(dev), name, align_size, &error_abort); - ret = load_image_mr(path, &nd->decl_rom); + rom_ptr = memory_region_get_ram_ptr(&nd->decl_rom); + ret = load_image_size(path, rom_ptr + (uintptr_t)(align_size - size), + size); g_free(path); g_free(name); if (ret < 0) { error_setg(errp, "could not load romfile \"%s\"", nd->romfile); return; } - memory_region_add_subregion(&nd->slot_mem, NUBUS_SLOT_SIZE - size, + memory_region_add_subregion(&nd->slot_mem, NUBUS_SLOT_SIZE - align_size, &nd->decl_rom); } } diff --git a/hw/nubus/nubus-virtio-mmio.c b/hw/nubus/nubus-virtio-mmio.c new file mode 100644 index 00000000000..58a63c84d0b --- /dev/null +++ b/hw/nubus/nubus-virtio-mmio.c @@ -0,0 +1,102 @@ +/* + * QEMU Macintosh Nubus Virtio MMIO card + * + * Copyright (c) 2024 Mark Cave-Ayland + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/nubus/nubus-virtio-mmio.h" + + +#define NUBUS_VIRTIO_MMIO_PIC_OFFSET 0 +#define NUBUS_VIRTIO_MMIO_DEV_OFFSET 0x200 + + +static void nubus_virtio_mmio_set_input_irq(void *opaque, int n, int level) +{ + NubusDevice *nd = NUBUS_DEVICE(opaque); + + nubus_set_irq(nd, level); +} + +static void nubus_virtio_mmio_realize(DeviceState *dev, Error **errp) +{ + NubusVirtioMMIODeviceClass *nvmdc = NUBUS_VIRTIO_MMIO_GET_CLASS(dev); + NubusVirtioMMIO *s = NUBUS_VIRTIO_MMIO(dev); + NubusDevice *nd = NUBUS_DEVICE(dev); + SysBusDevice *sbd; + int i, offset; + + nvmdc->parent_realize(dev, errp); + if (*errp) { + return; + } + + /* Goldfish PIC */ + sbd = SYS_BUS_DEVICE(&s->pic); + if (!sysbus_realize(sbd, errp)) { + return; + } + memory_region_add_subregion(&nd->slot_mem, NUBUS_VIRTIO_MMIO_PIC_OFFSET, + sysbus_mmio_get_region(sbd, 0)); + sysbus_connect_irq(sbd, 0, + qdev_get_gpio_in_named(dev, "pic-input-irq", 0)); + + /* virtio-mmio devices */ + offset = NUBUS_VIRTIO_MMIO_DEV_OFFSET; + for (i = 0; i < NUBUS_VIRTIO_MMIO_NUM_DEVICES; i++) { + sbd = SYS_BUS_DEVICE(&s->virtio_mmio[i]); + qdev_prop_set_bit(DEVICE(sbd), "force-legacy", false); + if (!sysbus_realize_and_unref(sbd, errp)) { + return; + } + + memory_region_add_subregion(&nd->slot_mem, offset, + sysbus_mmio_get_region(sbd, 0)); + offset += 0x200; + + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(DEVICE(&s->pic), i)); + } +} + +static void nubus_virtio_mmio_init(Object *obj) +{ + NubusVirtioMMIO *s = NUBUS_VIRTIO_MMIO(obj); + int i; + + object_initialize_child(obj, "pic", &s->pic, TYPE_GOLDFISH_PIC); + for (i = 0; i < NUBUS_VIRTIO_MMIO_NUM_DEVICES; i++) { + char *name = g_strdup_printf("virtio-mmio[%d]", i); + object_initialize_child(obj, name, &s->virtio_mmio[i], + TYPE_VIRTIO_MMIO); + g_free(name); + } + + /* Input from goldfish PIC */ + qdev_init_gpio_in_named(DEVICE(obj), nubus_virtio_mmio_set_input_irq, + "pic-input-irq", 1); +} + +static void nubus_virtio_mmio_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + NubusVirtioMMIODeviceClass *nvmdc = NUBUS_VIRTIO_MMIO_CLASS(oc); + + device_class_set_parent_realize(dc, nubus_virtio_mmio_realize, + &nvmdc->parent_realize); +} + +static const TypeInfo nubus_virtio_mmio_types[] = { + { + .name = TYPE_NUBUS_VIRTIO_MMIO, + .parent = TYPE_NUBUS_DEVICE, + .instance_init = nubus_virtio_mmio_init, + .instance_size = sizeof(NubusVirtioMMIO), + .class_init = nubus_virtio_mmio_class_init, + .class_size = sizeof(NubusVirtioMMIODeviceClass), + }, +}; + +DEFINE_TYPES(nubus_virtio_mmio_types) diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index 76fe0397045..127c3d23834 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -2855,7 +2855,7 @@ static inline uint16_t nvme_check_copy_mcl(NvmeNamespace *ns, uint32_t nlb; nvme_copy_source_range_parse(iocb->ranges, idx, iocb->format, NULL, &nlb, NULL, NULL, NULL); - copy_len += nlb + 1; + copy_len += nlb; } if (copy_len > ns->id_ns.mcl) { @@ -5640,6 +5640,10 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req) NvmeIdNsDescr hdr; uint8_t v[NVME_NIDL_UUID]; } QEMU_PACKED uuid = {}; + struct { + NvmeIdNsDescr hdr; + uint8_t v[NVME_NIDL_NGUID]; + } QEMU_PACKED nguid = {}; struct { NvmeIdNsDescr hdr; uint64_t v; @@ -5668,6 +5672,14 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req) pos += sizeof(uuid); } + if (!nvme_nguid_is_null(&ns->params.nguid)) { + nguid.hdr.nidt = NVME_NIDT_NGUID; + nguid.hdr.nidl = NVME_NIDL_NGUID; + memcpy(nguid.v, ns->params.nguid.data, NVME_NIDL_NGUID); + memcpy(pos, &nguid, sizeof(nguid)); + pos += sizeof(nguid); + } + if (ns->params.eui64) { eui64.hdr.nidt = NVME_NIDT_EUI64; eui64.hdr.nidl = NVME_NIDL_EUI64; @@ -5882,7 +5894,7 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) uint32_t dw10 = le32_to_cpu(cmd->cdw10); uint32_t dw11 = le32_to_cpu(cmd->cdw11); uint32_t nsid = le32_to_cpu(cmd->nsid); - uint32_t result; + uint32_t result = 0; uint8_t fid = NVME_GETSETFEAT_FID(dw10); NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10); uint16_t iv; @@ -7114,10 +7126,6 @@ static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst) sctrl = &n->sec_ctrl_list.sec[i]; nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); } - - if (rst != NVME_RESET_CONTROLLER) { - pcie_sriov_pf_disable_vfs(pci_dev); - } } if (rst != NVME_RESET_CONTROLLER) { @@ -7798,6 +7806,11 @@ static bool nvme_check_params(NvmeCtrl *n, Error **errp) } if (n->pmr.dev) { + if (params->msix_exclusive_bar) { + error_setg(errp, "not enough BARs available to enable PMR"); + return false; + } + if (host_memory_backend_is_mapped(n->pmr.dev)) { error_setg(errp, "can't use already busy memdev: %s", object_get_canonical_path_component(OBJECT(n->pmr.dev))); @@ -8003,13 +8016,18 @@ static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev) memory_region_set_enabled(&n->pmr.dev->mr, false); } -static uint64_t nvme_bar_size(unsigned total_queues, unsigned total_irqs, - unsigned *msix_table_offset, - unsigned *msix_pba_offset) +static uint64_t nvme_mbar_size(unsigned total_queues, unsigned total_irqs, + unsigned *msix_table_offset, + unsigned *msix_pba_offset) { - uint64_t bar_size, msix_table_size, msix_pba_size; + uint64_t bar_size, msix_table_size; bar_size = sizeof(NvmeBar) + 2 * total_queues * NVME_DB_SIZE; + + if (total_irqs == 0) { + goto out; + } + bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); if (msix_table_offset) { @@ -8024,11 +8042,10 @@ static uint64_t nvme_bar_size(unsigned total_queues, unsigned total_irqs, *msix_pba_offset = bar_size; } - msix_pba_size = QEMU_ALIGN_UP(total_irqs, 64) / 8; - bar_size += msix_pba_size; + bar_size += QEMU_ALIGN_UP(total_irqs, 64) / 8; - bar_size = pow2ceil(bar_size); - return bar_size; +out: + return pow2ceil(bar_size); } static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset) @@ -8036,7 +8053,7 @@ static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset) uint16_t vf_dev_id = n->params.use_intel_id ? PCI_DEVICE_ID_INTEL_NVME : PCI_DEVICE_ID_REDHAT_NVME; NvmePriCtrlCap *cap = &n->pri_ctrl_cap; - uint64_t bar_size = nvme_bar_size(le16_to_cpu(cap->vqfrsm), + uint64_t bar_size = nvme_mbar_size(le16_to_cpu(cap->vqfrsm), le16_to_cpu(cap->vifrsm), NULL, NULL); @@ -8075,7 +8092,7 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) ERRP_GUARD(); uint8_t *pci_conf = pci_dev->config; uint64_t bar_size; - unsigned msix_table_offset, msix_pba_offset; + unsigned msix_table_offset = 0, msix_pba_offset = 0; int ret; pci_conf[PCI_INTERRUPT_PIN] = 1; @@ -8097,24 +8114,38 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) pcie_ari_init(pci_dev, 0x100); } - /* add one to max_ioqpairs to account for the admin queue pair */ - bar_size = nvme_bar_size(n->params.max_ioqpairs + 1, n->params.msix_qsize, - &msix_table_offset, &msix_pba_offset); + if (n->params.msix_exclusive_bar && !pci_is_vf(pci_dev)) { + bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1, 0, NULL, NULL); + memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", + bar_size); + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem); + ret = msix_init_exclusive_bar(pci_dev, n->params.msix_qsize, 4, errp); + } else { + assert(n->params.msix_qsize >= 1); - memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); - memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", - msix_table_offset); - memory_region_add_subregion(&n->bar0, 0, &n->iomem); + /* add one to max_ioqpairs to account for the admin queue pair */ + bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1, + n->params.msix_qsize, &msix_table_offset, + &msix_pba_offset); - if (pci_is_vf(pci_dev)) { - pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0); - } else { - pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | - PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); + memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); + memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", + msix_table_offset); + memory_region_add_subregion(&n->bar0, 0, &n->iomem); + + if (pci_is_vf(pci_dev)) { + pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0); + } else { + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); + } + + ret = msix_init(pci_dev, n->params.msix_qsize, + &n->bar0, 0, msix_table_offset, + &n->bar0, 0, msix_pba_offset, 0, errp); } - ret = msix_init(pci_dev, n->params.msix_qsize, - &n->bar0, 0, msix_table_offset, - &n->bar0, 0, msix_pba_offset, 0, errp); + if (ret == -ENOTSUP) { /* report that msix is not supported, but do not error out */ warn_report_err(*errp); @@ -8309,9 +8340,15 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp) if (pci_is_vf(pci_dev)) { /* * VFs derive settings from the parent. PF's lifespan exceeds - * that of VF's, so it's safe to share params.serial. + * that of VF's. */ memcpy(&n->params, &pn->params, sizeof(NvmeParams)); + + /* + * Set PF's serial value to a new string memory to prevent 'serial' + * property object release of PF when a VF is removed from the system. + */ + n->params.serial = g_strdup(pn->params.serial); n->subsys = pn->subsys; } @@ -8412,6 +8449,8 @@ static Property nvme_props[] = { params.sriov_max_vi_per_vf, 0), DEFINE_PROP_UINT8("sriov_max_vq_per_vf", NvmeCtrl, params.sriov_max_vq_per_vf, 0), + DEFINE_PROP_BOOL("msix-exclusive-bar", NvmeCtrl, params.msix_exclusive_bar, + false), DEFINE_PROP_END_OF_LIST(), }; @@ -8466,36 +8505,26 @@ static void nvme_pci_reset(DeviceState *qdev) nvme_ctrl_reset(n, NVME_RESET_FUNCTION); } -static void nvme_sriov_pre_write_ctrl(PCIDevice *dev, uint32_t address, - uint32_t val, int len) +static void nvme_sriov_post_write_config(PCIDevice *dev, uint16_t old_num_vfs) { NvmeCtrl *n = NVME(dev); NvmeSecCtrlEntry *sctrl; - uint16_t sriov_cap = dev->exp.sriov_cap; - uint32_t off = address - sriov_cap; - int i, num_vfs; + int i; - if (!sriov_cap) { - return; - } - - if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) { - if (!(val & PCI_SRIOV_CTRL_VFE)) { - num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF); - for (i = 0; i < num_vfs; i++) { - sctrl = &n->sec_ctrl_list.sec[i]; - nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); - } - } + for (i = pcie_sriov_num_vfs(dev); i < old_num_vfs; i++) { + sctrl = &n->sec_ctrl_list.sec[i]; + nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); } } static void nvme_pci_write_config(PCIDevice *dev, uint32_t address, uint32_t val, int len) { - nvme_sriov_pre_write_ctrl(dev, address, val, len); + uint16_t old_num_vfs = pcie_sriov_num_vfs(dev); + pci_default_write_config(dev, address, val, len); pcie_cap_flr_write_config(dev, address, val, len); + nvme_sriov_post_write_config(dev, old_num_vfs); } static const VMStateDescription nvme_vmstate = { diff --git a/hw/nvme/meson.build b/hw/nvme/meson.build index 1a6a2ca2f30..7d5caa53c28 100644 --- a/hw/nvme/meson.build +++ b/hw/nvme/meson.build @@ -1 +1 @@ -system_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('ctrl.c', 'dif.c', 'ns.c', 'subsys.c')) +system_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('ctrl.c', 'dif.c', 'ns.c', 'subsys.c', 'nguid.c')) \ No newline at end of file diff --git a/hw/nvme/nguid.c b/hw/nvme/nguid.c new file mode 100644 index 00000000000..829832bd9f4 --- /dev/null +++ b/hw/nvme/nguid.c @@ -0,0 +1,187 @@ +/* + * QEMU NVMe NGUID functions + * + * Copyright 2024 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/visitor.h" +#include "qemu/ctype.h" +#include "nvme.h" + +#define NGUID_SEPARATOR '-' + +#define NGUID_VALUE_AUTO "auto" + +#define NGUID_FMT \ + "%02hhx%02hhx%02hhx%02hhx" \ + "%02hhx%02hhx%02hhx%02hhx" \ + "%02hhx%02hhx%02hhx%02hhx" \ + "%02hhx%02hhx%02hhx%02hhx" + +#define NGUID_STR_LEN (2 * NGUID_LEN + 1) + +bool nvme_nguid_is_null(const NvmeNGUID *nguid) +{ + static NvmeNGUID null_nguid; + return memcmp(nguid, &null_nguid, sizeof(NvmeNGUID)) == 0; +} + +static void nvme_nguid_generate(NvmeNGUID *out) +{ + int i; + uint32_t x; + + QEMU_BUILD_BUG_ON((NGUID_LEN % sizeof(x)) != 0); + + for (i = 0; i < NGUID_LEN; i += sizeof(x)) { + x = g_random_int(); + memcpy(&out->data[i], &x, sizeof(x)); + } +} + +/* + * The Linux Kernel typically prints the NGUID of an NVMe namespace using the + * same format as the UUID. For instance: + * + * $ cat /sys/class/block/nvme0n1/nguid + * e9accd3b-8390-4e13-167c-f0593437f57d + * + * When there is no UUID but there is NGUID the Kernel will print the NGUID as + * wwid and it won't use the UUID format: + * + * $ cat /sys/class/block/nvme0n1/wwid + * eui.e9accd3b83904e13167cf0593437f57d + * + * The NGUID has different fields compared to the UUID, so the grouping used in + * the UUID format has no relation with the 3 fields of the NGUID. + * + * This implementation won't expect a strict format as the UUID one and instead + * it will admit any string of hexadecimal digits. Byte groups could be created + * using the '-' separator. The number of bytes needs to be exactly 16 and the + * separator '-' has to be exactly in a byte boundary. The following are + * examples of accepted formats for the NGUID string: + * + * nguid="e9accd3b-8390-4e13-167c-f0593437f57d" + * nguid="e9accd3b83904e13167cf0593437f57d" + * nguid="FEDCBA9876543210-ABCDEF-0123456789" + */ +static bool nvme_nguid_is_valid(const char *str) +{ + int i; + int digit_count = 0; + + for (i = 0; i < strlen(str); i++) { + const char c = str[i]; + if (qemu_isxdigit(c)) { + digit_count++; + continue; + } + if (c == NGUID_SEPARATOR) { + /* + * We need to make sure the separator is in a byte boundary, the + * string does not start with the separator and they are not back to + * back "--". + */ + if ((i > 0) && (str[i - 1] != NGUID_SEPARATOR) && + (digit_count % 2) == 0) { + continue; + } + } + return false; + } + /* + * The string should have the correct byte length and not finish with the + * separator + */ + return (digit_count == (2 * NGUID_LEN)) && (str[i - 1] != NGUID_SEPARATOR); +} + +static int nvme_nguid_parse(const char *str, NvmeNGUID *nguid) +{ + uint8_t *id = &nguid->data[0]; + int ret = 0; + int i; + const char *ptr = str; + + if (!nvme_nguid_is_valid(str)) { + return -1; + } + + for (i = 0; i < NGUID_LEN; i++) { + ret = sscanf(ptr, "%02hhx", &id[i]); + if (ret != 1) { + return -1; + } + ptr += 2; + if (*ptr == NGUID_SEPARATOR) { + ptr++; + } + } + + return 0; +} + +/* + * When converted back to string this implementation will use a raw hex number + * with no separators, for instance: + * + * "e9accd3b83904e13167cf0593437f57d" + */ +static void nvme_nguid_stringify(const NvmeNGUID *nguid, char *out) +{ + const uint8_t *id = &nguid->data[0]; + snprintf(out, NGUID_STR_LEN, NGUID_FMT, + id[0], id[1], id[2], id[3], id[4], id[5], id[6], id[7], + id[8], id[9], id[10], id[11], id[12], id[13], id[14], id[15]); +} + +static void get_nguid(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + NvmeNGUID *nguid = object_field_prop_ptr(obj, prop); + char buffer[NGUID_STR_LEN]; + char *p = buffer; + + nvme_nguid_stringify(nguid, buffer); + + visit_type_str(v, name, &p, errp); +} + +static void set_nguid(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + NvmeNGUID *nguid = object_field_prop_ptr(obj, prop); + char *str; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + if (!strcmp(str, NGUID_VALUE_AUTO)) { + nvme_nguid_generate(nguid); + } else if (nvme_nguid_parse(str, nguid) < 0) { + error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str); + } + g_free(str); +} + +const PropertyInfo qdev_prop_nguid = { + .name = "str", + .description = + "NGUID or \"" NGUID_VALUE_AUTO "\" for random value", + .get = get_nguid, + .set = set_nguid, +}; diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c index 0eabcf5cf50..ea8db175dbd 100644 --- a/hw/nvme/ns.c +++ b/hw/nvme/ns.c @@ -89,6 +89,7 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp) id_ns->mcl = cpu_to_le32(ns->params.mcl); id_ns->msrc = ns->params.msrc; id_ns->eui64 = cpu_to_be64(ns->params.eui64); + memcpy(&id_ns->nguid, &ns->params.nguid.data, sizeof(id_ns->nguid)); ds = 31 - clz32(ns->blkconf.logical_block_size); ms = ns->params.ms; @@ -797,6 +798,7 @@ static Property nvme_ns_props[] = { DEFINE_PROP_BOOL("shared", NvmeNamespace, params.shared, true), DEFINE_PROP_UINT32("nsid", NvmeNamespace, params.nsid, 0), DEFINE_PROP_UUID_NODEFAULT("uuid", NvmeNamespace, params.uuid), + DEFINE_PROP_NGUID_NODEFAULT("nguid", NvmeNamespace, params.nguid), DEFINE_PROP_UINT64("eui64", NvmeNamespace, params.eui64, 0), DEFINE_PROP_UINT16("ms", NvmeNamespace, params.ms, 0), DEFINE_PROP_UINT8("mset", NvmeNamespace, params.mset, 0), diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h index 5f2ae7b28b9..bed8191bd5f 100644 --- a/hw/nvme/nvme.h +++ b/hw/nvme/nvme.h @@ -171,13 +171,27 @@ static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = { [FDP_EVT_RUH_IMPLICIT_RU_CHANGE] = 33, }; +#define NGUID_LEN 16 + +typedef struct { + uint8_t data[NGUID_LEN]; +} NvmeNGUID; + +bool nvme_nguid_is_null(const NvmeNGUID *nguid); + +extern const PropertyInfo qdev_prop_nguid; + +#define DEFINE_PROP_NGUID_NODEFAULT(_name, _state, _field) \ + DEFINE_PROP(_name, _state, _field, qdev_prop_nguid, NvmeNGUID) + typedef struct NvmeNamespaceParams { - bool detached; - bool shared; - uint32_t nsid; - QemuUUID uuid; - uint64_t eui64; - bool eui64_default; + bool detached; + bool shared; + uint32_t nsid; + QemuUUID uuid; + NvmeNGUID nguid; + uint64_t eui64; + bool eui64_default; uint16_t ms; uint8_t mset; @@ -522,6 +536,7 @@ typedef struct NvmeParams { uint16_t sriov_vi_flexible; uint8_t sriov_max_vq_per_vf; uint8_t sriov_max_vi_per_vf; + bool msix_exclusive_bar; } NvmeParams; typedef struct NvmeCtrl { diff --git a/hw/nvram/ds1225y.c b/hw/nvram/ds1225y.c index 3660a47c51c..6d510dcc686 100644 --- a/hw/nvram/ds1225y.c +++ b/hw/nvram/ds1225y.c @@ -102,7 +102,7 @@ static const VMStateDescription vmstate_nvram = { .version_id = 0, .minimum_version_id = 0, .post_load = nvram_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(contents, NvRamState, chip_size, 0, vmstate_info_uint8, uint8_t), VMSTATE_END_OF_LIST() diff --git a/hw/nvram/eeprom93xx.c b/hw/nvram/eeprom93xx.c index 57d63638d77..a8fd60a8fb5 100644 --- a/hw/nvram/eeprom93xx.c +++ b/hw/nvram/eeprom93xx.c @@ -131,7 +131,7 @@ static const VMStateDescription vmstate_eeprom = { .name = "eeprom", .version_id = EEPROM_VERSION, .minimum_version_id = OLD_EEPROM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(tick, eeprom_t), VMSTATE_UINT8(address, eeprom_t), VMSTATE_UINT8(command, eeprom_t), diff --git a/hw/nvram/fw_cfg-acpi.c b/hw/nvram/fw_cfg-acpi.c new file mode 100644 index 00000000000..58cdcd3121c --- /dev/null +++ b/hw/nvram/fw_cfg-acpi.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Add fw_cfg device in DSDT + * + */ + +#include "qemu/osdep.h" +#include "hw/nvram/fw_cfg_acpi.h" +#include "hw/acpi/aml-build.h" + +void fw_cfg_acpi_dsdt_add(Aml *scope, const MemMapEntry *fw_cfg_memmap) +{ + Aml *dev = aml_device("FWCF"); + aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base, + fw_cfg_memmap->size, AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 4e4524673a3..fc0263f3491 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -27,6 +27,7 @@ #include "sysemu/sysemu.h" #include "sysemu/dma.h" #include "sysemu/reset.h" +#include "exec/address-spaces.h" #include "hw/boards.h" #include "hw/nvram/fw_cfg.h" #include "hw/qdev-properties.h" @@ -656,7 +657,7 @@ static int fw_cfg_acpi_mr_restore_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_fw_cfg_dma = { .name = "fw_cfg/dma", .needed = fw_cfg_dma_enabled, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(dma_addr, FWCfgState), VMSTATE_END_OF_LIST() }, @@ -668,7 +669,7 @@ static const VMStateDescription vmstate_fw_cfg_acpi_mr = { .minimum_version_id = 1, .needed = fw_cfg_acpi_mr_restore, .post_load = fw_cfg_acpi_mr_restore_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(table_mr_size, FWCfgState), VMSTATE_UINT64(linker_mr_size, FWCfgState), VMSTATE_UINT64(rsdp_mr_size, FWCfgState), @@ -680,13 +681,13 @@ static const VMStateDescription vmstate_fw_cfg = { .name = "fw_cfg", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(cur_entry, FWCfgState), VMSTATE_UINT16_HACK(cur_offset, FWCfgState, is_version_1), VMSTATE_UINT32_V(cur_offset, FWCfgState, 2), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fw_cfg_dma, &vmstate_fw_cfg_acpi_mr, NULL, @@ -1142,6 +1143,7 @@ FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase, SysBusDevice *sbd; FWCfgIoState *ios; FWCfgState *s; + MemoryRegion *iomem = get_system_io(); bool dma_requested = dma_iobase && dma_as; dev = qdev_new(TYPE_FW_CFG_IO); @@ -1155,7 +1157,7 @@ FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase, sbd = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sbd, &error_fatal); ios = FW_CFG_IO(dev); - sysbus_add_io(sbd, iobase, &ios->comb_iomem); + memory_region_add_subregion(iomem, iobase, &ios->comb_iomem); s = FW_CFG(dev); @@ -1163,7 +1165,7 @@ FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase, /* 64 bits for the address field */ s->dma_as = dma_as; s->dma_addr = 0; - sysbus_add_io(sbd, dma_iobase, &s->dma_iomem); + memory_region_add_subregion(iomem, dma_iobase, &s->dma_iomem); } return s; diff --git a/hw/nvram/mac_nvram.c b/hw/nvram/mac_nvram.c index 810e84f07e4..fe9df9fa35b 100644 --- a/hw/nvram/mac_nvram.c +++ b/hw/nvram/mac_nvram.c @@ -33,6 +33,7 @@ #include "migration/vmstate.h" #include "qemu/cutils.h" #include "qemu/module.h" +#include "qemu/error-report.h" #include "trace.h" #include @@ -48,7 +49,10 @@ static void macio_nvram_writeb(void *opaque, hwaddr addr, trace_macio_nvram_write(addr, value); s->data[addr] = value; if (s->blk) { - blk_pwrite(s->blk, addr, 1, &s->data[addr], 0); + if (blk_pwrite(s->blk, addr, 1, &s->data[addr], 0) < 0) { + error_report("%s: write of NVRAM data to backing store failed", + blk_name(s->blk)); + } } } @@ -79,7 +83,7 @@ static const VMStateDescription vmstate_macio_nvram = { .name = "macio_nvram", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VBUFFER_UINT32(data, MacIONVRAMState, 0, NULL, size), VMSTATE_END_OF_LIST() } diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build index 75e415b1a01..4996c72456f 100644 --- a/hw/nvram/meson.build +++ b/hw/nvram/meson.build @@ -17,3 +17,4 @@ system_ss.add(when: 'CONFIG_XLNX_EFUSE_ZYNQMP', if_true: files( system_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c')) +specific_ss.add(when: 'CONFIG_ACPI', if_true: files('fw_cfg-acpi.c')) diff --git a/hw/nvram/npcm7xx_otp.c b/hw/nvram/npcm7xx_otp.c index c61f2fc1aa2..f00ebfa931e 100644 --- a/hw/nvram/npcm7xx_otp.c +++ b/hw/nvram/npcm7xx_otp.c @@ -384,7 +384,7 @@ static const VMStateDescription vmstate_npcm7xx_otp = { .name = "npcm7xx-otp", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, NPCM7xxOTPState, NPCM7XX_OTP_NR_REGS), VMSTATE_UINT8_ARRAY(array, NPCM7xxOTPState, NPCM7XX_OTP_ARRAY_BYTES), VMSTATE_END_OF_LIST(), diff --git a/hw/nvram/nrf51_nvm.c b/hw/nvram/nrf51_nvm.c index 7f1db8c4239..73564f7e6ea 100644 --- a/hw/nvram/nrf51_nvm.c +++ b/hw/nvram/nrf51_nvm.c @@ -336,12 +336,9 @@ static void nrf51_nvm_init(Object *obj) static void nrf51_nvm_realize(DeviceState *dev, Error **errp) { NRF51NVMState *s = NRF51_NVM(dev); - Error *err = NULL; - memory_region_init_rom_device(&s->flash, OBJECT(dev), &flash_ops, s, - "nrf51_soc.flash", s->flash_size, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom_device(&s->flash, OBJECT(dev), &flash_ops, s, + "nrf51_soc.flash", s->flash_size, errp)) { return; } @@ -366,7 +363,7 @@ static const VMStateDescription vmstate_nvm = { .name = "nrf51_soc.nvm", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(uicr_content, NRF51NVMState, NRF51_UICR_FIXTURE_SIZE), VMSTATE_UINT32(config, NRF51NVMState), diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c index 2d72f304422..bfd8aa367e1 100644 --- a/hw/nvram/spapr_nvram.c +++ b/hw/nvram/spapr_nvram.c @@ -245,7 +245,7 @@ static const VMStateDescription vmstate_spapr_nvram = { .minimum_version_id = 1, .pre_load = spapr_nvram_pre_load, .post_load = spapr_nvram_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(size, SpaprNvram), VMSTATE_VBUFFER_ALLOC_UINT32(buf, SpaprNvram, 1, NULL, size), VMSTATE_END_OF_LIST() diff --git a/hw/nvram/xlnx-bbram.c b/hw/nvram/xlnx-bbram.c index e18e7770e1e..0a71a005c69 100644 --- a/hw/nvram/xlnx-bbram.c +++ b/hw/nvram/xlnx-bbram.c @@ -508,7 +508,7 @@ static const VMStateDescription vmstate_bbram_ctrl = { .name = TYPE_XLNX_BBRAM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxBBRam, R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c index 2480af35e1b..e4b9e11a3dd 100644 --- a/hw/nvram/xlnx-versal-efuse-ctrl.c +++ b/hw/nvram/xlnx-versal-efuse-ctrl.c @@ -737,7 +737,7 @@ static const VMStateDescription vmstate_efuse_ctrl = { .name = TYPE_XLNX_VERSAL_EFUSE_CTRL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxVersalEFuseCtrl, R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/nvram/xlnx-zynqmp-efuse.c b/hw/nvram/xlnx-zynqmp-efuse.c index 3db5f98ec1a..ec98456e5d1 100644 --- a/hw/nvram/xlnx-zynqmp-efuse.c +++ b/hw/nvram/xlnx-zynqmp-efuse.c @@ -821,7 +821,7 @@ static const VMStateDescription vmstate_efuse = { .name = TYPE_XLNX_ZYNQMP_EFUSE, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPEFuse, R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/openrisc/cputimer.c b/hw/openrisc/cputimer.c index 10163b391b2..835986c4dbe 100644 --- a/hw/openrisc/cputimer.c +++ b/hw/openrisc/cputimer.c @@ -145,7 +145,7 @@ static const VMStateDescription vmstate_or1k_timer = { .name = "or1k_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ttcr, OR1KTimerState), VMSTATE_UINT64(last_clk, OR1KTimerState), VMSTATE_END_OF_LIST() diff --git a/hw/openrisc/openrisc_sim.c b/hw/openrisc/openrisc_sim.c index 35da123aef4..bffd6f721f7 100644 --- a/hw/openrisc/openrisc_sim.c +++ b/hw/openrisc/openrisc_sim.c @@ -170,7 +170,7 @@ static void openrisc_create_fdt(Or1ksimState *state, static void openrisc_sim_net_init(Or1ksimState *state, hwaddr base, hwaddr size, int num_cpus, OpenRISCCPU *cpus[], - int irq_pin, NICInfo *nd) + int irq_pin) { void *fdt = state->fdt; DeviceState *dev; @@ -178,8 +178,10 @@ static void openrisc_sim_net_init(Or1ksimState *state, hwaddr base, hwaddr size, char *nodename; int i; - dev = qdev_new("open_eth"); - qdev_set_nic_properties(dev, nd); + dev = qemu_create_nic_device("open_eth", true, NULL); + if (!dev) { + return; + } s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); @@ -313,12 +315,10 @@ static void openrisc_sim_init(MachineState *machine) openrisc_create_fdt(state, or1ksim_memmap, smp_cpus, machine->ram_size, machine->kernel_cmdline); - if (nd_table[0].used) { - openrisc_sim_net_init(state, or1ksim_memmap[OR1KSIM_ETHOC].base, - or1ksim_memmap[OR1KSIM_ETHOC].size, - smp_cpus, cpus, - OR1KSIM_ETHOC_IRQ, nd_table); - } + openrisc_sim_net_init(state, or1ksim_memmap[OR1KSIM_ETHOC].base, + or1ksim_memmap[OR1KSIM_ETHOC].size, + smp_cpus, cpus, + OR1KSIM_ETHOC_IRQ); if (smp_cpus > 1) { openrisc_sim_ompic_init(state, or1ksim_memmap[OR1KSIM_OMPIC].base, diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c index 405a133eef6..742da07a015 100644 --- a/hw/pci-bridge/cxl_downstream.c +++ b/hw/pci-bridge/cxl_downstream.c @@ -109,9 +109,9 @@ static void build_dvsecs(CXLComponentState *cxl) .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */ }; cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT, - PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0, + PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH, PCIE_FLEXBUS_PORT_DVSEC, - PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec); + PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec); dvsec = (uint8_t *)&(CXLDVSECPortGPF){ .rsvd = 0, diff --git a/hw/pci-bridge/cxl_root_port.c b/hw/pci-bridge/cxl_root_port.c index 8f976976318..8a30da602cc 100644 --- a/hw/pci-bridge/cxl_root_port.c +++ b/hw/pci-bridge/cxl_root_port.c @@ -129,9 +129,9 @@ static void build_dvsecs(CXLComponentState *cxl) .rcvd_mod_ts_data_phase1 = 0xef, }; cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT, - PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0, + PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH, PCIE_FLEXBUS_PORT_DVSEC, - PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec); + PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec); dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){ .rsvd = 0, @@ -175,7 +175,7 @@ static void cxl_rp_realize(DeviceState *dev, Error **errp) cxl_cstate->dvsec_offset = CXL_ROOT_PORT_DVSEC_OFFSET; cxl_cstate->pdev = pci_dev; - build_dvsecs(&crp->cxl_cstate); + build_dvsecs(cxl_cstate); cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate, TYPE_CXL_ROOT_PORT); diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c index 36737189c62..783fa6adac1 100644 --- a/hw/pci-bridge/cxl_upstream.c +++ b/hw/pci-bridge/cxl_upstream.c @@ -121,9 +121,9 @@ static void build_dvsecs(CXLComponentState *cxl) .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */ }; cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT, - PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0, + PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH, PCIE_FLEXBUS_PORT_DVSEC, - PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec); + PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec); dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){ .rsvd = 0, @@ -192,8 +192,8 @@ enum { static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv) { - g_autofree CDATSslbis *sslbis_latency = NULL; - g_autofree CDATSslbis *sslbis_bandwidth = NULL; + CDATSslbis *sslbis_latency; + CDATSslbis *sslbis_bandwidth; CXLUpstreamPort *us = CXL_USP(priv); PCIBus *bus = &PCI_BRIDGE(us)->sec_bus; int devfn, sslbis_size, i; @@ -228,9 +228,6 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv) sslbis_size = sizeof(CDATSslbis) + sizeof(*sslbis_latency->sslbe) * count; sslbis_latency = g_malloc(sslbis_size); - if (!sslbis_latency) { - return -ENOMEM; - } *sslbis_latency = (CDATSslbis) { .sslbis_header = { .header = { @@ -251,9 +248,6 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv) } sslbis_bandwidth = g_malloc(sslbis_size); - if (!sslbis_bandwidth) { - return 0; - } *sslbis_bandwidth = (CDATSslbis) { .sslbis_header = { .header = { @@ -276,8 +270,8 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv) *cdat_table = g_new0(CDATSubHeader *, CXL_USP_CDAT_NUM_ENTRIES); /* Header always at start of structure */ - (*cdat_table)[CXL_USP_CDAT_SSLBIS_LAT] = g_steal_pointer(&sslbis_latency); - (*cdat_table)[CXL_USP_CDAT_SSLBIS_BW] = g_steal_pointer(&sslbis_bandwidth); + (*cdat_table)[CXL_USP_CDAT_SSLBIS_LAT] = (CDATSubHeader *)sslbis_latency; + (*cdat_table)[CXL_USP_CDAT_SSLBIS_BW] = (CDATSubHeader *)sslbis_bandwidth; return CXL_USP_CDAT_NUM_ENTRIES; } @@ -295,6 +289,7 @@ static void free_default_cdat_table(CDATSubHeader **cdat_table, int num, static void cxl_usp_realize(PCIDevice *d, Error **errp) { + ERRP_GUARD(); PCIEPort *p = PCIE_PORT(d); CXLUpstreamPort *usp = CXL_USP(d); CXLComponentState *cxl_cstate = &usp->cxl_cstate; diff --git a/hw/pci-bridge/gen_pcie_root_port.c b/hw/pci-bridge/gen_pcie_root_port.c index 1ce4e7bebae..784507c826b 100644 --- a/hw/pci-bridge/gen_pcie_root_port.c +++ b/hw/pci-bridge/gen_pcie_root_port.c @@ -117,7 +117,7 @@ static const VMStateDescription vmstate_rp_dev = { .version_id = 1, .minimum_version_id = 1, .post_load = pcie_cap_slot_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), diff --git a/hw/pci-bridge/i82801b11.c b/hw/pci-bridge/i82801b11.c index 0e83cd11b2c..c140919cbc5 100644 --- a/hw/pci-bridge/i82801b11.c +++ b/hw/pci-bridge/i82801b11.c @@ -81,7 +81,7 @@ static void i82801b11_bridge_realize(PCIDevice *d, Error **errp) static const VMStateDescription i82801b11_bridge_dev_vmstate = { .name = "i82801b11_bridge", .priority = MIG_PRI_PCI_BUS, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), VMSTATE_END_OF_LIST() } diff --git a/hw/pci-bridge/ioh3420.c b/hw/pci-bridge/ioh3420.c index f1e16135a32..be752a4bda5 100644 --- a/hw/pci-bridge/ioh3420.c +++ b/hw/pci-bridge/ioh3420.c @@ -88,7 +88,7 @@ static const VMStateDescription vmstate_ioh3420 = { .version_id = 1, .minimum_version_id = 1, .post_load = pcie_cap_slot_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), diff --git a/hw/pci-bridge/meson.build b/hw/pci-bridge/meson.build index 6d5ad9f37b2..f2a60434dda 100644 --- a/hw/pci-bridge/meson.build +++ b/hw/pci-bridge/meson.build @@ -13,5 +13,3 @@ pci_ss.add(when: 'CONFIG_CXL', if_true: files('cxl_root_port.c', 'cxl_upstream.c pci_ss.add(when: 'CONFIG_SIMBA', if_true: files('simba.c')) system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) - -system_ss.add(when: 'CONFIG_ALL', if_true: files('pci_expander_bridge_stubs.c')) diff --git a/hw/pci-bridge/pci_bridge_dev.c b/hw/pci-bridge/pci_bridge_dev.c index 4b2696ea7ff..089f91efed4 100644 --- a/hw/pci-bridge/pci_bridge_dev.c +++ b/hw/pci-bridge/pci_bridge_dev.c @@ -199,7 +199,7 @@ static bool pci_device_shpc_present(void *opaque, int version_id) static const VMStateDescription pci_bridge_dev_vmstate = { .name = "pci_bridge", .priority = MIG_PRI_PCI_BUS, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), SHPC_VMSTATE(shpc, PCIDevice, pci_device_shpc_present), VMSTATE_END_OF_LIST() diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index 535889f7c23..0411ad31ea4 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -290,7 +290,7 @@ static void pxb_cxl_dev_reset(DeviceState *dev) uint32_t *write_msk = cxl_cstate->crb.cache_mem_regs_write_mask; int dsp_count = 0; - cxl_component_register_init_common(reg_state, write_msk, CXL2_ROOT_PORT); + cxl_component_register_init_common(reg_state, write_msk, CXL2_RC); /* * The CXL specification allows for host bridges with no HDM decoders * if they only have a single root port. diff --git a/hw/pci-bridge/pcie_pci_bridge.c b/hw/pci-bridge/pcie_pci_bridge.c index 2301b2ca0b0..7646ac23975 100644 --- a/hw/pci-bridge/pcie_pci_bridge.c +++ b/hw/pci-bridge/pcie_pci_bridge.c @@ -132,7 +132,7 @@ static Property pcie_pci_bridge_dev_properties[] = { static const VMStateDescription pcie_pci_bridge_dev_vmstate = { .name = TYPE_PCIE_PCI_BRIDGE_DEV, .priority = MIG_PRI_PCI_BUS, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), SHPC_VMSTATE(shpc, PCIDevice, NULL), VMSTATE_END_OF_LIST() diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c index 38a2361fa2a..907d5105b01 100644 --- a/hw/pci-bridge/xio3130_downstream.c +++ b/hw/pci-bridge/xio3130_downstream.c @@ -146,7 +146,7 @@ static const VMStateDescription vmstate_xio3130_downstream = { .version_id = 1, .minimum_version_id = 1, .post_load = pcie_cap_slot_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), diff --git a/hw/pci-bridge/xio3130_upstream.c b/hw/pci-bridge/xio3130_upstream.c index a48bfe3bc54..2a6cff6e033 100644 --- a/hw/pci-bridge/xio3130_upstream.c +++ b/hw/pci-bridge/xio3130_upstream.c @@ -115,7 +115,7 @@ static const VMStateDescription vmstate_xio3130_upstream = { .priority = MIG_PRI_PCI_BUS, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent_obj, PCIEPort), VMSTATE_STRUCT(parent_obj.parent_obj.exp.aer_log, PCIEPort, 0, vmstate_pcie_aer_log, PCIEAERLog), diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig index f046d76a687..c91880b2370 100644 --- a/hw/pci-host/Kconfig +++ b/hw/pci-host/Kconfig @@ -6,6 +6,14 @@ config XEN_IGD_PASSTHROUGH default y depends on XEN && PCI_I440FX +config PPC4XX_PCI + bool + select PCI + +config PPC440_PCIX + bool + select PCI + config RAVEN_PCI bool select PCI diff --git a/hw/pci-host/astro.c b/hw/pci-host/astro.c index cb2c8a828de..e3e589ceacc 100644 --- a/hw/pci-host/astro.c +++ b/hw/pci-host/astro.c @@ -122,10 +122,6 @@ static MemTxResult elroy_chip_read_with_attrs(void *opaque, hwaddr addr, case 0x0800: /* IOSAPIC_REG_SELECT */ val = s->iosapic_reg_select; break; - case 0x0808: - val = UINT64_MAX; /* XXX: tbc. */ - g_assert_not_reached(); - break; case 0x0810: /* IOSAPIC_REG_WINDOW */ switch (s->iosapic_reg_select) { case 0x01: /* IOSAPIC_REG_VERSION */ @@ -135,15 +131,21 @@ static MemTxResult elroy_chip_read_with_attrs(void *opaque, hwaddr addr, if (s->iosapic_reg_select < ARRAY_SIZE(s->iosapic_reg)) { val = s->iosapic_reg[s->iosapic_reg_select]; } else { - trace_iosapic_reg_read(s->iosapic_reg_select, size, val); - g_assert_not_reached(); + goto check_hf; } } trace_iosapic_reg_read(s->iosapic_reg_select, size, val); break; default: - trace_elroy_read(addr, size, val); - g_assert_not_reached(); + check_hf: + if (s->status_control & HF_ENABLE) { + val = 0; + ret = MEMTX_DECODE_ERROR; + } else { + /* return -1ULL if HardFail is disabled */ + val = ~0; + ret = MEMTX_OK; + } } trace_elroy_read(addr, size, val); @@ -191,7 +193,7 @@ static MemTxResult elroy_chip_write_with_attrs(void *opaque, hwaddr addr, if (s->iosapic_reg_select < ARRAY_SIZE(s->iosapic_reg)) { s->iosapic_reg[s->iosapic_reg_select] = val; } else { - g_assert_not_reached(); + goto check_hf; } break; case 0x0840: /* IOSAPIC_REG_EOI */ @@ -204,7 +206,10 @@ static MemTxResult elroy_chip_write_with_attrs(void *opaque, hwaddr addr, } break; default: - g_assert_not_reached(); + check_hf: + if (s->status_control & HF_ENABLE) { + return MEMTX_DECODE_ERROR; + } } return MEMTX_OK; } @@ -464,7 +469,7 @@ static const VMStateDescription vmstate_elroy = { .name = "Elroy", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(hpa, ElroyState), VMSTATE_UINT32(pci_bus_num, ElroyState), VMSTATE_UINT64(config_address, ElroyState), @@ -594,8 +599,8 @@ static MemTxResult astro_chip_read_with_attrs(void *opaque, hwaddr addr, #undef EMPTY_PORT default: - trace_astro_chip_read(addr, size, val); - g_assert_not_reached(); + val = 0; + ret = MEMTX_DECODE_ERROR; } /* for 32-bit accesses mask return value */ @@ -610,6 +615,7 @@ static MemTxResult astro_chip_write_with_attrs(void *opaque, hwaddr addr, uint64_t val, unsigned size, MemTxAttrs attrs) { + MemTxResult ret = MEMTX_OK; AstroState *s = opaque; trace_astro_chip_write(addr, size, val); @@ -686,11 +692,9 @@ static MemTxResult astro_chip_write_with_attrs(void *opaque, hwaddr addr, #undef EMPTY_PORT default: - /* Controlled by astro_chip_mem_valid above. */ - trace_astro_chip_write(addr, size, val); - g_assert_not_reached(); + ret = MEMTX_DECODE_ERROR; } - return MEMTX_OK; + return ret; } static const MemoryRegionOps astro_chip_ops = { @@ -711,7 +715,7 @@ static const VMStateDescription vmstate_astro = { .name = "Astro", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ioc_ctrl, AstroState), VMSTATE_UINT64(ioc_status_ctrl, AstroState), VMSTATE_UINT64_ARRAY(ioc_ranges, AstroState, (0x03d8 - 0x300) / 8), diff --git a/hw/pci-host/bonito.c b/hw/pci-host/bonito.c index bab661f3ce1..1f0c4353484 100644 --- a/hw/pci-host/bonito.c +++ b/hw/pci-host/bonito.c @@ -619,7 +619,7 @@ static const VMStateDescription vmstate_bonito = { .name = "Bonito", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCIBonitoState), VMSTATE_END_OF_LIST() } diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c index f016f021095..c25d50f1c6b 100644 --- a/hw/pci-host/designware.c +++ b/hw/pci-host/designware.c @@ -531,7 +531,7 @@ static const VMStateDescription vmstate_designware_pcie_msi_bank = { .name = "designware-pcie-msi-bank", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(enable, DesignwarePCIEMSIBank), VMSTATE_UINT32(mask, DesignwarePCIEMSIBank), VMSTATE_UINT32(status, DesignwarePCIEMSIBank), @@ -543,7 +543,7 @@ static const VMStateDescription vmstate_designware_pcie_msi = { .name = "designware-pcie-msi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(base, DesignwarePCIEMSI), VMSTATE_STRUCT_ARRAY(intr, DesignwarePCIEMSI, @@ -559,7 +559,7 @@ static const VMStateDescription vmstate_designware_pcie_viewport = { .name = "designware-pcie-viewport", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(base, DesignwarePCIEViewport), VMSTATE_UINT64(target, DesignwarePCIEViewport), VMSTATE_UINT32(limit, DesignwarePCIEViewport), @@ -572,7 +572,7 @@ static const VMStateDescription vmstate_designware_pcie_root = { .name = "designware-pcie-root", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), VMSTATE_UINT32(atu_viewport, DesignwarePCIERoot), VMSTATE_STRUCT_2DARRAY(viewports, @@ -720,7 +720,7 @@ static const VMStateDescription vmstate_designware_pcie_host = { .name = "designware-pcie-host", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(root, DesignwarePCIEHost, 1, diff --git a/hw/pci-host/dino.c b/hw/pci-host/dino.c index 5b0947a16c9..d992c4bb69d 100644 --- a/hw/pci-host/dino.c +++ b/hw/pci-host/dino.c @@ -287,7 +287,7 @@ static const VMStateDescription vmstate_dino = { .name = "Dino", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(iar0, DinoState), VMSTATE_UINT32(iar1, DinoState), VMSTATE_UINT32(imr, DinoState), diff --git a/hw/pci-host/gpex-acpi.c b/hw/pci-host/gpex-acpi.c index 1092dc3b708..f69413ea2c3 100644 --- a/hw/pci-host/gpex-acpi.c +++ b/hw/pci-host/gpex-acpi.c @@ -281,3 +281,16 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) crs_range_set_free(&crs_range_set); } + +void acpi_dsdt_add_gpex_host(Aml *scope, uint32_t irq) +{ + bool ambig; + Object *obj = object_resolve_path_type("", TYPE_GPEX_HOST, &ambig); + + if (!obj || ambig) { + return; + } + + GPEX_HOST(obj)->gpex_cfg.irq = irq; + acpi_dsdt_add_gpex(scope, &GPEX_HOST(obj)->gpex_cfg); +} diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c index a6752fac5e8..e9cf455bf52 100644 --- a/hw/pci-host/gpex.c +++ b/hw/pci-host/gpex.c @@ -154,6 +154,18 @@ static Property gpex_host_properties[] = { */ DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost, allow_unmapped_accesses, true), + DEFINE_PROP_UINT64(PCI_HOST_ECAM_BASE, GPEXHost, gpex_cfg.ecam.base, 0), + DEFINE_PROP_SIZE(PCI_HOST_ECAM_SIZE, GPEXHost, gpex_cfg.ecam.size, 0), + DEFINE_PROP_UINT64(PCI_HOST_PIO_BASE, GPEXHost, gpex_cfg.pio.base, 0), + DEFINE_PROP_SIZE(PCI_HOST_PIO_SIZE, GPEXHost, gpex_cfg.pio.size, 0), + DEFINE_PROP_UINT64(PCI_HOST_BELOW_4G_MMIO_BASE, GPEXHost, + gpex_cfg.mmio32.base, 0), + DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MMIO_SIZE, GPEXHost, + gpex_cfg.mmio32.size, 0), + DEFINE_PROP_UINT64(PCI_HOST_ABOVE_4G_MMIO_BASE, GPEXHost, + gpex_cfg.mmio64.base, 0), + DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MMIO_SIZE, GPEXHost, + gpex_cfg.mmio64.size, 0), DEFINE_PROP_END_OF_LIST(), }; @@ -195,7 +207,7 @@ static const VMStateDescription vmstate_gpex_root = { .name = "gpex_root", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, GPEXRootState), VMSTATE_END_OF_LIST() } diff --git a/hw/pci-host/gt64120.c b/hw/pci-host/gt64120.c index 143bf053d71..e02efc9e2ea 100644 --- a/hw/pci-host/gt64120.c +++ b/hw/pci-host/gt64120.c @@ -431,7 +431,7 @@ static const VMStateDescription vmstate_gt64120 = { .version_id = 1, .minimum_version_id = 1, .post_load = gt64120_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, GT64120State, GT_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/pci-host/i440fx.c b/hw/pci-host/i440fx.c index 653cc3f1495..4f0a0438d77 100644 --- a/hw/pci-host/i440fx.c +++ b/hw/pci-host/i440fx.c @@ -125,7 +125,7 @@ static const VMStateDescription vmstate_i440fx = { .version_id = 3, .minimum_version_id = 3, .post_load = i440fx_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCII440FXState), /* Used to be smm_enabled, which was basically always zero because * SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code. diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build index 36d5ab756fa..3001e93a437 100644 --- a/hw/pci-host/meson.build +++ b/hw/pci-host/meson.build @@ -14,6 +14,8 @@ pci_ss.add(when: 'CONFIG_REMOTE_PCIHOST', if_true: files('remote.c')) pci_ss.add(when: 'CONFIG_SH_PCI', if_true: files('sh_pci.c')) # PPC devices +pci_ss.add(when: 'CONFIG_PPC4XX_PCI', if_true: files('ppc4xx_pci.c')) +pci_ss.add(when: 'CONFIG_PPC440_PCIX', if_true: files('ppc440_pcix.c')) pci_ss.add(when: 'CONFIG_RAVEN_PCI', if_true: files('raven.c')) pci_ss.add(when: 'CONFIG_GRACKLE_PCI', if_true: files('grackle.c')) # NewWorld PowerMac diff --git a/hw/ppc/ppc440_pcix.c b/hw/pci-host/ppc440_pcix.c similarity index 97% rename from hw/ppc/ppc440_pcix.c rename to hw/pci-host/ppc440_pcix.c index df4ee374d04..ef212d99aaf 100644 --- a/hw/ppc/ppc440_pcix.c +++ b/hw/pci-host/ppc440_pcix.c @@ -25,8 +25,7 @@ #include "qemu/module.h" #include "qemu/units.h" #include "hw/irq.h" -#include "hw/ppc/ppc.h" -#include "hw/ppc/ppc4xx.h" +#include "hw/pci-host/ppc4xx.h" #include "hw/pci/pci_device.h" #include "hw/pci/pci_host.h" #include "trace.h" @@ -53,7 +52,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(PPC440PCIXState, PPC440_PCIX_HOST) struct PPC440PCIXState { PCIHostState parent_obj; - PCIDevice *dev; + uint8_t config[PCI_CONFIG_SPACE_SIZE]; struct PLBOutMap pom[PPC440_PCIX_NR_POMS]; struct PLBInMap pim[PPC440_PCIX_NR_PIMS]; uint32_t sts; @@ -172,7 +171,7 @@ static void ppc440_pcix_reg_write4(void *opaque, hwaddr addr, trace_ppc440_pcix_reg_write(addr, val, size); switch (addr) { case PCI_VENDOR_ID ... PCI_MAX_LAT: - stl_le_p(s->dev->config + addr, val); + stl_le_p(s->config + addr, val); break; case PCIX0_POM0LAL: @@ -303,7 +302,7 @@ static uint64_t ppc440_pcix_reg_read4(void *opaque, hwaddr addr, switch (addr) { case PCI_VENDOR_ID ... PCI_MAX_LAT: - val = ldl_le_p(s->dev->config + addr); + val = ldl_le_p(s->config + addr); break; case PCIX0_POM0LAL: @@ -499,10 +498,7 @@ static void ppc440_pcix_realize(DeviceState *dev, Error **errp) memory_region_init(&s->iomem, OBJECT(dev), "pci-io", 64 * KiB); h->bus = pci_register_root_bus(dev, NULL, ppc440_pcix_set_irq, ppc440_pcix_map_irq, &s->irq, &s->busmem, &s->iomem, - PCI_DEVFN(0, 0), 1, TYPE_PCI_BUS); - - s->dev = pci_create_simple(h->bus, PCI_DEVFN(0, 0), - TYPE_PPC4xx_HOST_BRIDGE); + PCI_DEVFN(1, 0), 1, TYPE_PCI_BUS); memory_region_init(&s->bm, OBJECT(s), "bm-ppc440-pcix", UINT64_MAX); memory_region_add_subregion(&s->bm, 0x0, &s->busmem); diff --git a/hw/ppc/ppc4xx_pci.c b/hw/pci-host/ppc4xx_pci.c similarity index 98% rename from hw/ppc/ppc4xx_pci.c rename to hw/pci-host/ppc4xx_pci.c index 66521190086..b6c6c8993c4 100644 --- a/hw/ppc/ppc4xx_pci.c +++ b/hw/pci-host/ppc4xx_pci.c @@ -24,8 +24,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "hw/irq.h" -#include "hw/ppc/ppc.h" -#include "hw/ppc/ppc4xx.h" +#include "hw/pci-host/ppc4xx.h" #include "migration/vmstate.h" #include "qemu/module.h" #include "sysemu/reset.h" @@ -276,7 +275,7 @@ static const VMStateDescription vmstate_pci_master_map = { .name = "pci_master_map", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(la, struct PCIMasterMap), VMSTATE_UINT32(ma, struct PCIMasterMap), VMSTATE_UINT32(pcila, struct PCIMasterMap), @@ -289,7 +288,7 @@ static const VMStateDescription vmstate_pci_target_map = { .name = "pci_target_map", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ms, struct PCITargetMap), VMSTATE_UINT32(la, struct PCITargetMap), VMSTATE_END_OF_LIST() @@ -300,7 +299,7 @@ static const VMStateDescription vmstate_ppc4xx_pci = { .name = "ppc4xx_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(pmm, PPC4xxPCIState, PPC4xx_PCI_NR_PMMS, 1, vmstate_pci_master_map, struct PCIMasterMap), diff --git a/hw/pci-host/ppce500.c b/hw/pci-host/ppce500.c index 453a4e6ed3b..95b983b2b3c 100644 --- a/hw/pci-host/ppce500.c +++ b/hw/pci-host/ppce500.c @@ -5,7 +5,7 @@ * * Author: Yu Liu, * - * This file is derived from hw/ppc4xx_pci.c, + * This file is derived from ppc4xx_pci.c, * the copyright for that material belongs to the original owners. * * This is free software; you can redistribute it and/or modify @@ -379,7 +379,7 @@ static const VMStateDescription vmstate_pci_outbound = { .name = "pci_outbound", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(potar, struct pci_outbound), VMSTATE_UINT32(potear, struct pci_outbound), VMSTATE_UINT32(powbar, struct pci_outbound), @@ -392,7 +392,7 @@ static const VMStateDescription vmstate_pci_inbound = { .name = "pci_inbound", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pitar, struct pci_inbound), VMSTATE_UINT32(piwbar, struct pci_inbound), VMSTATE_UINT32(piwbear, struct pci_inbound), @@ -405,7 +405,7 @@ static const VMStateDescription vmstate_ppce500_pci = { .name = "ppce500_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(pob, PPCE500PCIState, PPCE500_PCI_NR_POBS, 1, vmstate_pci_outbound, struct pci_outbound), VMSTATE_STRUCT_ARRAY(pib, PPCE500PCIState, PPCE500_PCI_NR_PIBS, 1, diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c index 08534bc7cc0..0d7d4e3f086 100644 --- a/hw/pci-host/q35.c +++ b/hw/pci-host/q35.c @@ -520,7 +520,7 @@ static const VMStateDescription vmstate_mch = { .version_id = 1, .minimum_version_id = 1, .post_load = mch_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, MCHPCIState), /* Used to be smm_enabled, which was basically always zero because * SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code. diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c index 86c3a490871..a7dfddd69ea 100644 --- a/hw/pci-host/raven.c +++ b/hw/pci-host/raven.c @@ -200,6 +200,7 @@ static const MemoryRegionOps raven_io_ops = { .write = raven_io_write, .endianness = DEVICE_LITTLE_ENDIAN, .impl.max_access_size = 4, + .impl.unaligned = true, .valid.unaligned = true, }; @@ -345,8 +346,10 @@ static void raven_realize(PCIDevice *d, Error **errp) d->config[PCI_LATENCY_TIMER] = 0x10; d->config[PCI_CAPABILITY_LIST] = 0x00; - memory_region_init_rom_nomigrate(&s->bios, OBJECT(s), "bios", BIOS_SIZE, - &error_fatal); + if (!memory_region_init_rom_nomigrate(&s->bios, OBJECT(s), "bios", + BIOS_SIZE, errp)) { + return; + } memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE), &s->bios); if (s->bios_name) { @@ -383,7 +386,7 @@ static const VMStateDescription vmstate_raven = { .name = "raven", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, RavenPCIState), VMSTATE_END_OF_LIST() }, diff --git a/hw/pci-host/trace-events b/hw/pci-host/trace-events index b2f47e6335c..0a816b9aa12 100644 --- a/hw/pci-host/trace-events +++ b/hw/pci-host/trace-events @@ -37,6 +37,18 @@ unin_data_read(uint64_t addr, unsigned len, uint64_t val) "read addr 0x%"PRIx64 unin_write(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64 unin_read(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64 +# ppc4xx_pci.c +ppc4xx_pci_map_irq(int32_t devfn, int irq_num, int slot) "devfn 0x%x irq %d -> %d" +ppc4xx_pci_set_irq(int irq_num) "PCI irq %d" + +# ppc440_pcix.c +ppc440_pcix_map_irq(int32_t devfn, int irq_num, int slot) "devfn 0x%x irq %d -> %d" +ppc440_pcix_set_irq(int irq_num) "PCI irq %d" +ppc440_pcix_update_pim(int idx, uint64_t size, uint64_t la) "Added window %d of size=0x%" PRIx64 " to CPU=0x%" PRIx64 +ppc440_pcix_update_pom(int idx, uint32_t size, uint64_t la, uint64_t pcia) "Added window %d of size=0x%x from CPU=0x%" PRIx64 " to PCI=0x%" PRIx64 +ppc440_pcix_reg_read(uint64_t addr, uint32_t val) "addr 0x%" PRIx64 " = 0x%" PRIx32 +ppc440_pcix_reg_write(uint64_t addr, uint32_t val, uint32_t size) "addr 0x%" PRIx64 " = 0x%" PRIx32 " size 0x%" PRIx32 + # pnv_phb4.c pnv_phb4_xive_notify(uint64_t notif_port, uint64_t data) "notif=@0x%"PRIx64" data=0x%"PRIx64 pnv_phb4_xive_notify_ic(uint64_t addr, uint64_t data) "addr=@0x%"PRIx64" data=0x%"PRIx64 diff --git a/hw/pci-host/versatile.c b/hw/pci-host/versatile.c index 60d4e7cd923..0e65deb3f97 100644 --- a/hw/pci-host/versatile.c +++ b/hw/pci-host/versatile.c @@ -147,7 +147,7 @@ static const VMStateDescription pci_vpb_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = pci_vpb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(imap, PCIVPBState, 3), VMSTATE_UINT32_ARRAY(smap, PCIVPBState, 3), VMSTATE_UINT32(selfid, PCIVPBState), diff --git a/hw/pci/meson.build b/hw/pci/meson.build index b1855452f5b..b9c34b2acfe 100644 --- a/hw/pci/meson.build +++ b/hw/pci/meson.build @@ -20,4 +20,3 @@ system_ss.add(when: 'CONFIG_PCI_EXPRESS', if_true: files('pcie_port.c', 'pcie_ho system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) system_ss.add(when: 'CONFIG_PCI', if_false: files('pci-stub.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('pci-stub.c')) diff --git a/hw/pci/msi.c b/hw/pci/msi.c index 041b0bdbec4..8104ac1d91a 100644 --- a/hw/pci/msi.c +++ b/hw/pci/msi.c @@ -23,6 +23,7 @@ #include "hw/xen/xen.h" #include "qemu/range.h" #include "qapi/error.h" +#include "sysemu/xen.h" #include "hw/i386/kvm/xen_evtchn.h" @@ -308,7 +309,7 @@ bool msi_is_masked(const PCIDevice *dev, unsigned int vector) } data = pci_get_word(dev->config + msi_data_off(dev, msi64bit)); - if (xen_is_pirq_msi(data)) { + if (xen_enabled() && xen_is_pirq_msi(data)) { return false; } diff --git a/hw/pci/msix.c b/hw/pci/msix.c index cd817f4ca8e..487e49834ee 100644 --- a/hw/pci/msix.c +++ b/hw/pci/msix.c @@ -685,7 +685,7 @@ static int get_msix_state(QEMUFile *f, void *pv, size_t size, return 0; } -static VMStateInfo vmstate_info_msix = { +static const VMStateInfo vmstate_info_msix = { .name = "msix state", .get = get_msix_state, .put = put_msix_state, @@ -693,7 +693,7 @@ static VMStateInfo vmstate_info_msix = { const VMStateDescription vmstate_msix = { .name = "msix", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { { .name = "msix", .version_id = 0, diff --git a/hw/pci/pci.c b/hw/pci/pci.c index c49417abb2d..e7a39cb203a 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -64,7 +64,7 @@ bool pci_available = true; static char *pcibus_get_dev_path(DeviceState *dev); static char *pcibus_get_fw_dev_path(DeviceState *dev); -static void pcibus_reset(BusState *qbus); +static void pcibus_reset_hold(Object *obj); static bool pcie_has_upstream_port(PCIDevice *dev); static Property pci_props[] = { @@ -92,7 +92,7 @@ static const VMStateDescription vmstate_pcibus = { .name = "PCIBUS", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), VMSTATE_VARRAY_INT32(irq_count, PCIBus, nirq, 0, vmstate_info_int32, @@ -202,13 +202,15 @@ static void pci_bus_class_init(ObjectClass *klass, void *data) { BusClass *k = BUS_CLASS(klass); PCIBusClass *pbc = PCI_BUS_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); k->print_dev = pcibus_dev_print; k->get_dev_path = pcibus_get_dev_path; k->get_fw_dev_path = pcibus_get_fw_dev_path; k->realize = pci_bus_realize; k->unrealize = pci_bus_unrealize; - k->reset = pcibus_reset; + + rc->phases.hold = pcibus_reset_hold; pbc->bus_num = pcibus_num; pbc->numa_node = pcibus_numa_node; @@ -407,6 +409,7 @@ static void pci_do_device_reset(PCIDevice *dev) msi_reset(dev); msix_reset(dev); + pcie_sriov_pf_reset(dev); } /* @@ -424,9 +427,9 @@ void pci_device_reset(PCIDevice *dev) * Called via bus_cold_reset on RST# assert, after the devices * have been reset device_cold_reset-ed already. */ -static void pcibus_reset(BusState *qbus) +static void pcibus_reset_hold(Object *obj) { - PCIBus *bus = DO_UPCAST(PCIBus, qbus, qbus); + PCIBus *bus = PCI_BUS(obj); int i; for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { @@ -673,7 +676,7 @@ static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, return 0; } -static VMStateInfo vmstate_info_pci_config = { +static const VMStateInfo vmstate_info_pci_config = { .name = "pci config", .get = get_pci_config_device, .put = put_pci_config_device, @@ -714,7 +717,7 @@ static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, return 0; } -static VMStateInfo vmstate_info_pci_irq_state = { +static const VMStateInfo vmstate_info_pci_irq_state = { .name = "pci irq state", .get = get_pci_irq_state, .put = put_pci_irq_state, @@ -734,7 +737,7 @@ const VMStateDescription vmstate_pci_device = { .name = "PCIDevice", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, migrate_is_not_pcie, @@ -1853,76 +1856,49 @@ const pci_class_desc *get_class_desc(int class) return desc; } -/* Initialize a PCI NIC. */ -PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus, - const char *default_model, - const char *default_devaddr) +void pci_init_nic_devices(PCIBus *bus, const char *default_model) { - const char *devaddr = nd->devaddr ? nd->devaddr : default_devaddr; - GPtrArray *pci_nic_models; - PCIBus *bus; + qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model, + "virtio", "virtio-net-pci"); +} + +bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model, + const char *alias, const char *devaddr) +{ + NICInfo *nd = qemu_find_nic_info(model, true, alias); + int dom, busnr, devfn; PCIDevice *pci_dev; - DeviceState *dev; - int devfn; - int i; - int dom, busnr; unsigned slot; + PCIBus *bus; - if (nd->model && !strcmp(nd->model, "virtio")) { - g_free(nd->model); - nd->model = g_strdup("virtio-net-pci"); - } - - pci_nic_models = qemu_get_nic_models(TYPE_PCI_DEVICE); - - if (qemu_show_nic_models(nd->model, (const char **)pci_nic_models->pdata)) { - exit(0); + if (!nd) { + return false; } - i = qemu_find_nic_model(nd, (const char **)pci_nic_models->pdata, - default_model); - if (i < 0) { + if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) { + error_report("Invalid PCI device address %s for device %s", + devaddr, model); exit(1); } - if (!rootbus) { - error_report("No primary PCI bus"); + if (dom != 0) { + error_report("No support for non-zero PCI domains"); exit(1); } - assert(!rootbus->parent_dev); - - if (!devaddr) { - devfn = -1; - busnr = 0; - } else { - if (pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) { - error_report("Invalid PCI device address %s for device %s", - devaddr, nd->model); - exit(1); - } - - if (dom != 0) { - error_report("No support for non-zero PCI domains"); - exit(1); - } - - devfn = PCI_DEVFN(slot, 0); - } + devfn = PCI_DEVFN(slot, 0); bus = pci_find_bus_nr(rootbus, busnr); if (!bus) { error_report("Invalid PCI device address %s for device %s", - devaddr, nd->model); + devaddr, model); exit(1); } - pci_dev = pci_new(devfn, nd->model); - dev = &pci_dev->qdev; - qdev_set_nic_properties(dev, nd); + pci_dev = pci_new(devfn, model); + qdev_set_nic_properties(&pci_dev->qdev, nd); pci_realize_and_unref(pci_dev, bus, &error_fatal); - g_ptr_array_free(pci_nic_models, true); - return pci_dev; + return true; } PCIDevice *pci_vga_init(PCIBus *bus) diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c index a18aa0a8d4c..dfe6fe61840 100644 --- a/hw/pci/pci_host.c +++ b/hw/pci/pci_host.c @@ -234,7 +234,7 @@ const VMStateDescription vmstate_pcihost = { .needed = pci_host_needed, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(config_reg, PCIHostState), VMSTATE_END_OF_LIST() } diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index 6db0cf69cd8..4b2f0805c6e 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -28,6 +28,7 @@ #include "hw/pci/pcie_regs.h" #include "hw/pci/pcie_port.h" #include "qemu/range.h" +#include "trace.h" //#define DEBUG_PCIE #ifdef DEBUG_PCIE @@ -45,6 +46,23 @@ static bool pcie_sltctl_powered_off(uint16_t sltctl) && (sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF; } +static const char *pcie_led_state_to_str(uint16_t value) +{ + switch (value) { + case PCI_EXP_SLTCTL_PWR_IND_ON: + case PCI_EXP_SLTCTL_ATTN_IND_ON: + return "on"; + case PCI_EXP_SLTCTL_PWR_IND_BLINK: + case PCI_EXP_SLTCTL_ATTN_IND_BLINK: + return "blink"; + case PCI_EXP_SLTCTL_PWR_IND_OFF: + case PCI_EXP_SLTCTL_ATTN_IND_OFF: + return "off"; + default: + return "invalid"; + } +} + /*************************************************************************** * pci express capability helper functions */ @@ -153,6 +171,14 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev) pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_16_0GB); } + if (s->speed > QEMU_PCI_EXP_LNK_16GT) { + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, + PCI_EXP_LNKCAP2_SLS_32_0GB); + } + if (s->speed > QEMU_PCI_EXP_LNK_32GT) { + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, + PCI_EXP_LNKCAP2_SLS_64_0GB); + } } } @@ -735,6 +761,28 @@ void pcie_cap_slot_get(PCIDevice *dev, uint16_t *slt_ctl, uint16_t *slt_sta) *slt_sta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); } +static void find_child_fn(PCIBus *bus, PCIDevice *dev, void *opaque) +{ + PCIDevice **child = opaque; + + if (!*child) { + *child = dev; + } +} + +/* + * Returns the plugged device or first function of multifunction plugged device + */ +static PCIDevice *pcie_cap_slot_find_child(PCIDevice *dev) +{ + PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); + PCIDevice *child = NULL; + + pci_for_each_device(sec_bus, pci_bus_num(sec_bus), find_child_fn, &child); + + return child; +} + void pcie_cap_slot_write_config(PCIDevice *dev, uint16_t old_slt_ctl, uint16_t old_slt_sta, uint32_t addr, uint32_t val, int len) @@ -779,6 +827,22 @@ void pcie_cap_slot_write_config(PCIDevice *dev, sltsta); } + if (trace_event_get_state_backends(TRACE_PCIE_CAP_SLOT_WRITE_CONFIG)) { + DeviceState *parent = DEVICE(dev); + DeviceState *child = DEVICE(pcie_cap_slot_find_child(dev)); + + trace_pcie_cap_slot_write_config( + parent->canonical_path, + child ? child->canonical_path : "no-child", + (sltsta & PCI_EXP_SLTSTA_PDS) ? "present" : "not present", + pcie_led_state_to_str(old_slt_ctl & PCI_EXP_SLTCTL_PIC), + pcie_led_state_to_str(val & PCI_EXP_SLTCTL_PIC), + pcie_led_state_to_str(old_slt_ctl & PCI_EXP_SLTCTL_AIC), + pcie_led_state_to_str(val & PCI_EXP_SLTCTL_AIC), + (old_slt_ctl & PCI_EXP_SLTCTL_PWR_OFF) ? "off" : "on", + (val & PCI_EXP_SLTCTL_PWR_OFF) ? "off" : "on"); + } + /* * If the slot is populated, power indicator is off and power * controller is off, it is safe to detach the devices. diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index b68c7ecb49c..2c85a78fcde 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -797,7 +797,7 @@ static const VMStateDescription vmstate_pcie_aer_err = { .name = "PCIE_AER_ERROR", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(status, PCIEAERErr), VMSTATE_UINT16(source_id, PCIEAERErr), VMSTATE_UINT16(flags, PCIEAERErr), @@ -818,7 +818,7 @@ const VMStateDescription vmstate_pcie_aer_log = { .name = "PCIE_AER_ERROR_LOG", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(log_num, PCIEAERLog), VMSTATE_UINT16_EQUAL(log_max, PCIEAERLog, NULL), VMSTATE_VALIDATE("log_num <= log_max", pcie_aer_state_log_num_valid), diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c index a1fe65f5d80..e9b23221d71 100644 --- a/hw/pci/pcie_sriov.c +++ b/hw/pci/pcie_sriov.c @@ -176,6 +176,9 @@ static void register_vfs(PCIDevice *dev) assert(sriov_cap > 0); num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF); + if (num_vfs > pci_get_word(dev->config + sriov_cap + PCI_SRIOV_TOTAL_VF)) { + return; + } dev->exp.sriov_pf.vf = g_new(PCIDevice *, num_vfs); @@ -212,7 +215,6 @@ static void unregister_vfs(PCIDevice *dev) g_free(dev->exp.sriov_pf.vf); dev->exp.sriov_pf.vf = NULL; dev->exp.sriov_pf.num_vfs = 0; - pci_set_word(dev->config + dev->exp.sriov_cap + PCI_SRIOV_NUM_VF, 0); } void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, @@ -246,16 +248,28 @@ void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, } -/* Reset SR/IOV VF Enable bit to trigger an unregister of all VFs */ -void pcie_sriov_pf_disable_vfs(PCIDevice *dev) +/* Reset SR/IOV */ +void pcie_sriov_pf_reset(PCIDevice *dev) { uint16_t sriov_cap = dev->exp.sriov_cap; - if (sriov_cap) { - uint32_t val = pci_get_byte(dev->config + sriov_cap + PCI_SRIOV_CTRL); - if (val & PCI_SRIOV_CTRL_VFE) { - val &= ~PCI_SRIOV_CTRL_VFE; - pcie_sriov_config_write(dev, sriov_cap + PCI_SRIOV_CTRL, val, 1); - } + if (!sriov_cap) { + return; + } + + pci_set_word(dev->config + sriov_cap + PCI_SRIOV_CTRL, 0); + unregister_vfs(dev); + + pci_set_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF, 0); + + /* + * Default is to use 4K pages, software can modify it + * to any of the supported bits + */ + pci_set_word(dev->config + sriov_cap + PCI_SRIOV_SYS_PGSIZE, 0x1); + + for (uint16_t i = 0; i < PCI_NUM_REGIONS; i++) { + pci_set_quad(dev->config + sriov_cap + PCI_SRIOV_BAR + i * 4, + dev->exp.sriov_pf.vf_bar_type[i]); } } diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c index df7f3701119..aac6f2d0345 100644 --- a/hw/pci/shpc.c +++ b/hw/pci/shpc.c @@ -8,6 +8,7 @@ #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" #include "hw/pci/msi.h" +#include "trace.h" /* TODO: model power only and disabled slot states. */ /* TODO: handle SERR and wakeups */ @@ -123,6 +124,34 @@ #define SHPC_PCI_TO_IDX(pci_slot) ((pci_slot) - 1) #define SHPC_IDX_TO_PHYSICAL(slot) ((slot) + 1) +static const char *shpc_led_state_to_str(uint8_t value) +{ + switch (value) { + case SHPC_LED_ON: + return "on"; + case SHPC_LED_BLINK: + return "blink"; + case SHPC_LED_OFF: + return "off"; + default: + return "invalid"; + } +} + +static const char *shpc_slot_state_to_str(uint8_t value) +{ + switch (value) { + case SHPC_STATE_PWRONLY: + return "power-only"; + case SHPC_STATE_ENABLED: + return "enabled"; + case SHPC_STATE_DISABLED: + return "disabled"; + default: + return "invalid"; + } +} + static uint8_t shpc_get_status(SHPCDevice *shpc, int slot, uint16_t msk) { uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot); @@ -302,6 +331,23 @@ static void shpc_slot_command(PCIDevice *d, uint8_t target, shpc_set_status(shpc, slot, state, SHPC_SLOT_STATE_MASK); } + if (trace_event_get_state_backends(TRACE_SHPC_SLOT_COMMAND)) { + DeviceState *parent = DEVICE(d); + int pci_slot = SHPC_IDX_TO_PCI(slot); + DeviceState *child = + DEVICE(shpc->sec_bus->devices[PCI_DEVFN(pci_slot, 0)]); + + trace_shpc_slot_command( + parent->canonical_path, pci_slot, + child ? child->canonical_path : "no-child", + shpc_led_state_to_str(old_power), + shpc_led_state_to_str(power), + shpc_led_state_to_str(old_attn), + shpc_led_state_to_str(attn), + shpc_slot_state_to_str(old_state), + shpc_slot_state_to_str(state)); + } + if (!shpc_slot_is_off(old_state, old_power, old_attn) && shpc_slot_is_off(state, power, attn)) { @@ -736,7 +782,7 @@ static int shpc_load(QEMUFile *f, void *pv, size_t size, return 0; } -VMStateInfo shpc_vmstate_info = { +const VMStateInfo shpc_vmstate_info = { .name = "shpc", .get = shpc_load, .put = shpc_save, diff --git a/hw/pci/trace-events b/hw/pci/trace-events index 42430869ce0..19643aa8c6b 100644 --- a/hw/pci/trace-events +++ b/hw/pci/trace-events @@ -16,3 +16,9 @@ msix_write_config(char *name, bool enabled, bool masked) "dev %s enabled %d mask sriov_register_vfs(const char *name, int slot, int function, int num_vfs) "%s %02x:%x: creating %d vf devs" sriov_unregister_vfs(const char *name, int slot, int function, int num_vfs) "%s %02x:%x: Unregistering %d vf devs" sriov_config_write(const char *name, int slot, int fun, uint32_t offset, uint32_t val, uint32_t len) "%s %02x:%x: sriov offset 0x%x val 0x%x len %d" + +# pcie.c +pcie_cap_slot_write_config(const char *parent, const char *child, const char *pds, const char *old_pic, const char *new_pic, const char *old_aic, const char *new_aic, const char *old_power, const char *new_power) "%s > %s: pds: %s, pic: %s->%s, aic: %s->%s, power: %s->%s" + +# shpc.c +shpc_slot_command(const char *parent, int pci_slot, const char *child, const char *old_pic, const char *new_pic, const char *old_aic, const char *new_aic, const char *old_state, const char *new_state) "%s[%d] > %s: pic: %s->%s, aic: %s->%s, state: %s->%s" diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index 56f0475a8e2..37ccf9cdcaf 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -1,13 +1,14 @@ config PSERIES bool + imply USB_OHCI_PCI imply PCI_DEVICES imply TEST_DEVICES imply VIRTIO_VGA + imply VFIO_PCI if LINUX # needed by spapr_pci_vfio.c select NVDIMM select DIMM select PCI select SPAPR_VSCSI - select VFIO if LINUX # needed by spapr_pci_vfio.c select XICS select XIVE select MSI_NONBROKEN @@ -32,6 +33,8 @@ config POWERNV select XIVE select FDT_PPC select PCI_POWERNV + select PCA9552 + select PCA9554 config PPC405 bool @@ -46,6 +49,7 @@ config PPC440 imply TEST_DEVICES imply E1000_PCI select PCI_EXPRESS + select PPC440_PCIX select PPC4XX select SERIAL select FDT_PPC @@ -53,7 +57,7 @@ config PPC440 config PPC4XX bool select BITBANG_I2C - select PCI + select PPC4XX_PCI select PPC_UIC config SAM460EX @@ -66,7 +70,7 @@ config SAM460EX select SM501 select SMBUS_EEPROM select USB_EHCI_SYSBUS - select USB_OHCI + select USB_OHCI_SYSBUS select FDT_PPC config AMIGAONE @@ -116,13 +120,13 @@ config MAC_NEWWORLD imply PCI_DEVICES imply SUNGEM imply TEST_DEVICES + imply USB_OHCI_PCI select ADB select MACIO select MACIO_GPIO select MAC_PMU select UNIN_PCI select FW_CFG_PPC - select USB_OHCI_PCI config E500 bool diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 384226296bf..3bd12b54ab9 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -955,7 +955,7 @@ void ppce500_init(MachineState *machine) * when implementing non-kernel boot. */ object_property_set_bool(OBJECT(cs), "start-powered-off", i != 0, - &error_fatal); + &error_abort); qdev_realize_and_unref(DEVICE(cs), NULL, &error_fatal); if (!firstenv) { @@ -1079,9 +1079,7 @@ void ppce500_init(MachineState *machine) if (pci_bus) { /* Register network interfaces. */ - for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); - } + pci_init_nic_devices(pci_bus, mc->default_nic); } /* Register spinning region */ diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index 535710314a5..ff9e490c4e4 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -77,7 +77,7 @@ #define MAX_IDE_BUS 2 #define CFG_ADDR 0xf0000510 -#define TBFREQ (100UL * 1000UL * 1000UL) +#define TBFREQ (25UL * 1000UL * 1000UL) #define CLOCKFREQ (900UL * 1000UL * 1000UL) #define BUSFREQ (100UL * 1000UL * 1000UL) @@ -431,8 +431,10 @@ static void ppc_core99_init(MachineState *machine) /* U3 needs to use USB for input because Linux doesn't support via-cuda on PPC64 */ if (!has_adb || machine_arch == ARCH_MAC99_U3) { - USBBus *usb_bus = usb_bus_find(-1); + USBBus *usb_bus; + usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS, + &error_abort)); usb_create_simple(usb_bus, "usb-kbd"); usb_create_simple(usb_bus, "usb-mouse"); } @@ -444,9 +446,7 @@ static void ppc_core99_init(MachineState *machine) graphic_depth = 15; } - for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); - } + pci_init_nic_devices(pci_bus, mc->default_nic); /* The NewWorld NVRAM is not located in the MacIO device */ if (kvm_enabled() && qemu_real_host_page_size() > 4096) { diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c index 9acc7adfc92..1981d3d8f6e 100644 --- a/hw/ppc/mac_oldworld.c +++ b/hw/ppc/mac_oldworld.c @@ -277,9 +277,7 @@ static void ppc_heathrow_init(MachineState *machine) pci_vga_init(pci_bus); - for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); - } + pci_init_nic_devices(pci_bus, mc->default_nic); /* MacIO IDE */ ide_drive_get(hd, ARRAY_SIZE(hd)); diff --git a/hw/ppc/meson.build b/hw/ppc/meson.build index ea44856d43b..d096636ee7f 100644 --- a/hw/ppc/meson.build +++ b/hw/ppc/meson.build @@ -31,12 +31,14 @@ ppc_ss.add(when: 'CONFIG_PSERIES', if_true: files( 'pef.c', )) ppc_ss.add(when: ['CONFIG_PSERIES', 'CONFIG_TCG'], if_true: files( - 'spapr_softmmu.c', + 'spapr_vhyp_mmu.c', )) ppc_ss.add(when: 'CONFIG_SPAPR_RNG', if_true: files('spapr_rng.c')) -ppc_ss.add(when: ['CONFIG_PSERIES', 'CONFIG_LINUX'], if_true: files( - 'spapr_pci_vfio.c', -)) +if host_os == 'linux' + ppc_ss.add(when: 'CONFIG_PSERIES', if_true: files( + 'spapr_pci_vfio.c', + )) +endif # IBM PowerNV ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files( @@ -46,11 +48,14 @@ ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files( 'pnv_i2c.c', 'pnv_lpc.c', 'pnv_psi.c', + 'pnv_chiptod.c', 'pnv_occ.c', 'pnv_sbe.c', 'pnv_bmc.c', 'pnv_homer.c', 'pnv_pnor.c', + 'pnv_nest_pervasive.c', + 'pnv_n1_chiplet.c', )) # PowerPC 4xx boards ppc_ss.add(when: 'CONFIG_PPC405', if_true: files( @@ -58,10 +63,9 @@ ppc_ss.add(when: 'CONFIG_PPC405', if_true: files( 'ppc405_uc.c')) ppc_ss.add(when: 'CONFIG_PPC440', if_true: files( 'ppc440_bamboo.c', - 'ppc440_pcix.c', 'ppc440_uc.c')) + 'ppc440_uc.c')) ppc_ss.add(when: 'CONFIG_PPC4XX', if_true: files( 'ppc4xx_devs.c', - 'ppc4xx_pci.c', 'ppc4xx_sdram.c')) ppc_ss.add(when: 'CONFIG_SAM460EX', if_true: files('sam460ex.c')) # PReP diff --git a/hw/ppc/mpc8544_guts.c b/hw/ppc/mpc8544_guts.c index a26e83d0484..e3540b02816 100644 --- a/hw/ppc/mpc8544_guts.c +++ b/hw/ppc/mpc8544_guts.c @@ -71,8 +71,7 @@ static uint64_t mpc8544_guts_read(void *opaque, hwaddr addr, unsigned size) { uint32_t value = 0; - PowerPCCPU *cpu = POWERPC_CPU(current_cpu); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(current_cpu); addr &= MPC8544_GUTS_MMIO_SIZE - 1; switch (addr) { diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index 3203a4a7289..04d6decb2b0 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -285,6 +285,12 @@ static void pegasos2_pci_config_write(Pegasos2MachineState *pm, int bus, pegasos2_mv_reg_write(pm, pcicfg + 4, len, val); } +static void pegasos2_superio_write(uint8_t addr, uint8_t val) +{ + cpu_physical_memory_write(PCI1_IO_BASE + 0x3f0, &addr, 1); + cpu_physical_memory_write(PCI1_IO_BASE + 0x3f1, &val, 1); +} + static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason) { Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine); @@ -310,6 +316,12 @@ static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason) pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | PCI_INTERRUPT_LINE, 2, 0x9); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | + 0x50, 1, 0x6); + pegasos2_superio_write(0xf4, 0xbe); + pegasos2_superio_write(0xf6, 0xef); + pegasos2_superio_write(0xf7, 0xfc); + pegasos2_superio_write(0xf2, 0x14); pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | 0x50, 1, 0x2); pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | @@ -515,7 +527,7 @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) CPUPPCState *env = &cpu->env; /* The TCG path should also be holding the BQL at this point */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (FIELD_EX64(env->msr, MSR, PR)) { qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n"); diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 0297871bdd5..6e3a5ccdec7 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -133,7 +133,7 @@ static int get_cpus_node(void *fdt) * device tree, used in XSCOM to address cores and in interrupt * servers. */ -static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) +static int pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) { PowerPCCPU *cpu = pc->threads[0]; CPUState *cs = CPU(cpu); @@ -141,32 +141,31 @@ static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) int smt_threads = CPU_CORE(pc)->nr_threads; CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); + PnvChipClass *pnv_cc = PNV_CHIP_GET_CLASS(chip); g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads); int i; + uint32_t pir; uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 0xffffffff, 0xffffffff}; uint32_t tbfreq = PNV_TIMEBASE_FREQ; uint32_t cpufreq = 1000000000; uint32_t page_sizes_prop[64]; size_t page_sizes_prop_size; - const uint8_t pa_features[] = { 24, 0, - 0xf6, 0x3f, 0xc7, 0xc0, 0x80, 0xf0, - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, - 0x80, 0x00, 0x80, 0x00, 0x80, 0x00 }; int offset; char *nodename; int cpus_offset = get_cpus_node(fdt); - nodename = g_strdup_printf("%s@%x", dc->fw_name, pc->pir); + pir = pnv_cc->chip_pir(chip, pc->hwid, 0); + + nodename = g_strdup_printf("%s@%x", dc->fw_name, pir); offset = fdt_add_subnode(fdt, cpus_offset, nodename); _FDT(offset); g_free(nodename); _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", chip->chip_id))); - _FDT((fdt_setprop_cell(fdt, offset, "reg", pc->pir))); - _FDT((fdt_setprop_cell(fdt, offset, "ibm,pir", pc->pir))); + _FDT((fdt_setprop_cell(fdt, offset, "reg", pir))); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,pir", pir))); _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu"))); _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR]))); @@ -236,20 +235,21 @@ static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) page_sizes_prop, page_sizes_prop_size))); } - _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", - pa_features, sizeof(pa_features)))); - /* Build interrupt servers properties */ for (i = 0; i < smt_threads; i++) { - servers_prop[i] = cpu_to_be32(pc->pir + i); + servers_prop[i] = cpu_to_be32(pnv_cc->chip_pir(chip, pc->hwid, i)); } _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", servers_prop, sizeof(*servers_prop) * smt_threads))); + + return offset; } -static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t pir, +static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t hwid, uint32_t nr_threads) { + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip); + uint32_t pir = pcc->chip_pir(chip, hwid, 0); uint64_t addr = PNV_ICP_BASE(chip) | (pir << 12); char *name; const char compat[] = "IBM,power8-icp\0IBM,ppc-xicp"; @@ -263,6 +263,7 @@ static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t pir, rsize = sizeof(uint64_t) * 2 * nr_threads; reg = g_malloc(rsize); for (i = 0; i < nr_threads; i++) { + /* We know P8 PIR is linear with thread id */ reg[i * 2] = cpu_to_be64(addr | ((pir + i) * 0x1000)); reg[i * 2 + 1] = cpu_to_be64(0x1000); } @@ -299,6 +300,17 @@ PnvChip *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb) return chip; } +/* + * Same as spapr pa_features_207 except pnv always enables CI largepages bit. + * HTM is always enabled because TCG does implement HTM, it's just a + * degenerate implementation. + */ +static const uint8_t pa_features_207[] = { 24, 0, + 0xf6, 0x3f, 0xc7, 0xc0, 0x00, 0xf0, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00 }; + static void pnv_chip_power8_dt_populate(PnvChip *chip, void *fdt) { static const char compat[] = "ibm,power8-xscom\0ibm,xscom"; @@ -311,11 +323,15 @@ static void pnv_chip_power8_dt_populate(PnvChip *chip, void *fdt) for (i = 0; i < chip->nr_cores; i++) { PnvCore *pnv_core = chip->cores[i]; + int offset; - pnv_dt_core(chip, pnv_core, fdt); + offset = pnv_dt_core(chip, pnv_core, fdt); + + _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", + pa_features_207, sizeof(pa_features_207)))); /* Interrupt Control Presenters (ICP). One per core. */ - pnv_dt_icp(chip, fdt, pnv_core->pir, CPU_CORE(pnv_core)->nr_threads); + pnv_dt_icp(chip, fdt, pnv_core->hwid, CPU_CORE(pnv_core)->nr_threads); } if (chip->ram_size) { @@ -323,6 +339,35 @@ static void pnv_chip_power8_dt_populate(PnvChip *chip, void *fdt) } } +/* + * Same as spapr pa_features_300 except pnv always enables CI largepages bit. + */ +static const uint8_t pa_features_300[] = { 66, 0, + /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: CILRG|fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ + /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */ + 0xf6, 0x3f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */ + /* 6: DS207 */ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ + /* 16: Vector */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ + /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 18 - 23 */ + /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ + /* 32: LE atomic, 34: EBB + ext EBB */ + 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ + /* 40: Radix MMU */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */ + /* 42: PM, 44: PC RA, 46: SC vec'd */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ + /* 48: SIMD, 50: QP BFP, 52: String */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ + /* 54: DecFP, 56: DecI, 58: SHA */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ + /* 60: NM atomic, 62: RNG */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ +}; + static void pnv_chip_power9_dt_populate(PnvChip *chip, void *fdt) { static const char compat[] = "ibm,power9-xscom\0ibm,xscom"; @@ -335,8 +380,12 @@ static void pnv_chip_power9_dt_populate(PnvChip *chip, void *fdt) for (i = 0; i < chip->nr_cores; i++) { PnvCore *pnv_core = chip->cores[i]; + int offset; + + offset = pnv_dt_core(chip, pnv_core, fdt); - pnv_dt_core(chip, pnv_core, fdt); + _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", + pa_features_300, sizeof(pa_features_300)))); } if (chip->ram_size) { @@ -346,6 +395,40 @@ static void pnv_chip_power9_dt_populate(PnvChip *chip, void *fdt) pnv_dt_lpc(chip, fdt, 0, PNV9_LPCM_BASE(chip), PNV9_LPCM_SIZE); } +/* + * Same as spapr pa_features_31 except pnv always enables CI largepages bit, + * always disables copy/paste. + */ +static const uint8_t pa_features_31[] = { 74, 0, + /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: CILRG|fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ + /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */ + 0xf6, 0x3f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */ + /* 6: DS207 */ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ + /* 16: Vector */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ + /* 18: Vec. Scalar, 20: Vec. XOR */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ + /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ + /* 32: LE atomic, 34: EBB + ext EBB */ + 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ + /* 40: Radix MMU */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */ + /* 42: PM, 44: PC RA, 46: SC vec'd */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ + /* 48: SIMD, 50: QP BFP, 52: String */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ + /* 54: DecFP, 56: DecI, 58: SHA */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ + /* 60: NM atomic, 62: RNG */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ + /* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */ + 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */ + /* 72: [P]HASHST/[P]HASHCHK */ + 0x80, 0x00, /* 72 - 73 */ +}; + static void pnv_chip_power10_dt_populate(PnvChip *chip, void *fdt) { static const char compat[] = "ibm,power10-xscom\0ibm,xscom"; @@ -358,8 +441,12 @@ static void pnv_chip_power10_dt_populate(PnvChip *chip, void *fdt) for (i = 0; i < chip->nr_cores; i++) { PnvCore *pnv_core = chip->cores[i]; + int offset; - pnv_dt_core(chip, pnv_core, fdt); + offset = pnv_dt_core(chip, pnv_core, fdt); + + _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", + pa_features_31, sizeof(pa_features_31)))); } if (chip->ram_size) { @@ -790,6 +877,7 @@ static void pnv_init(MachineState *machine) const char *bios_name = machine->firmware ?: FW_FILE_NAME; PnvMachineState *pnv = PNV_MACHINE(machine); MachineClass *mc = MACHINE_GET_CLASS(machine); + PnvMachineClass *pmc = PNV_MACHINE_GET_CLASS(machine); char *fw_filename; long fw_size; uint64_t chip_ram_start = 0; @@ -979,6 +1067,13 @@ static void pnv_init(MachineState *machine) */ pnv->powerdown_notifier.notify = pnv_powerdown_notify; qemu_register_powerdown_notifier(&pnv->powerdown_notifier); + + /* + * Create/Connect any machine-specific I2C devices + */ + if (pmc->i2c_init) { + pmc->i2c_init(pnv); + } } /* @@ -987,9 +1082,10 @@ static void pnv_init(MachineState *machine) * 25:28 Core number * 29:31 Thread ID */ -static uint32_t pnv_chip_core_pir_p8(PnvChip *chip, uint32_t core_id) +static uint32_t pnv_chip_pir_p8(PnvChip *chip, uint32_t core_id, + uint32_t thread_id) { - return (chip->chip_id << 7) | (core_id << 3); + return (chip->chip_id << 7) | (core_id << 3) | thread_id; } static void pnv_chip_power8_intc_create(PnvChip *chip, PowerPCCPU *cpu, @@ -1041,14 +1137,37 @@ static void pnv_chip_power8_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, * * We only care about the lower bits. uint32_t is fine for the moment. */ -static uint32_t pnv_chip_core_pir_p9(PnvChip *chip, uint32_t core_id) +static uint32_t pnv_chip_pir_p9(PnvChip *chip, uint32_t core_id, + uint32_t thread_id) { - return (chip->chip_id << 8) | (core_id << 2); + if (chip->nr_threads == 8) { + return (chip->chip_id << 8) | ((thread_id & 1) << 2) | (core_id << 3) | + (thread_id >> 1); + } else { + return (chip->chip_id << 8) | (core_id << 2) | thread_id; + } } -static uint32_t pnv_chip_core_pir_p10(PnvChip *chip, uint32_t core_id) +/* + * 0:48 Reserved - Read as zeroes + * 49:52 Node ID + * 53:55 Chip ID + * 56 Reserved - Read as zero + * 57:59 Quad ID + * 60 Core Chiplet Pair ID + * 61:63 Thread/Core Chiplet ID t0-t2 + * + * We only care about the lower bits. uint32_t is fine for the moment. + */ +static uint32_t pnv_chip_pir_p10(PnvChip *chip, uint32_t core_id, + uint32_t thread_id) { - return (chip->chip_id << 8) | (core_id << 2); + if (chip->nr_threads == 8) { + return (chip->chip_id << 8) | ((core_id / 4) << 4) | + ((core_id % 2) << 3) | thread_id; + } else { + return (chip->chip_id << 8) | (core_id << 2) | thread_id; + } } static void pnv_chip_power9_intc_create(PnvChip *chip, PowerPCCPU *cpu, @@ -1227,7 +1346,7 @@ static void pnv_chip_icp_realize(Pnv8Chip *chip8, Error **errp) int core_hwid = CPU_CORE(pnv_core)->core_id; for (j = 0; j < CPU_CORE(pnv_core)->nr_threads; j++) { - uint32_t pir = pcc->core_pir(chip, core_hwid) + j; + uint32_t pir = pcc->chip_pir(chip, core_hwid, j); PnvICPState *icp = PNV_ICP(xics_icp_get(chip8->xics, pir)); memory_region_add_subregion(&chip8->icp_mmio, pir << 12, @@ -1257,11 +1376,11 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) } /* Processor Service Interface (PSI) Host Bridge */ - object_property_set_int(OBJECT(&chip8->psi), "bar", PNV_PSIHB_BASE(chip), + object_property_set_int(OBJECT(psi8), "bar", PNV_PSIHB_BASE(chip), &error_fatal); - object_property_set_link(OBJECT(&chip8->psi), ICS_PROP_XICS, + object_property_set_link(OBJECT(psi8), ICS_PROP_XICS, OBJECT(chip8->xics), &error_abort); - if (!qdev_realize(DEVICE(&chip8->psi), NULL, errp)) { + if (!qdev_realize(DEVICE(psi8), NULL, errp)) { return; } pnv_xscom_add_subregion(chip, PNV_XSCOM_PSIHB_BASE, @@ -1292,7 +1411,7 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) } pnv_xscom_add_subregion(chip, PNV_XSCOM_OCC_BASE, &chip8->occ.xscom_regs); qdev_connect_gpio_out(DEVICE(&chip8->occ), 0, - qdev_get_gpio_in(DEVICE(&chip8->psi), PSIHB_IRQ_OCC)); + qdev_get_gpio_in(DEVICE(psi8), PSIHB_IRQ_OCC)); /* OCC SRAM model */ memory_region_add_subregion(get_system_memory(), PNV_OCC_SENSOR_BASE(chip), @@ -1340,7 +1459,7 @@ static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x221ef04980000000ull; /* P8 Murano DD2.1 */ k->cores_mask = POWER8E_CORE_MASK; k->num_phbs = 3; - k->core_pir = pnv_chip_core_pir_p8; + k->chip_pir = pnv_chip_pir_p8; k->intc_create = pnv_chip_power8_intc_create; k->intc_reset = pnv_chip_power8_intc_reset; k->intc_destroy = pnv_chip_power8_intc_destroy; @@ -1364,7 +1483,7 @@ static void pnv_chip_power8_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x220ea04980000000ull; /* P8 Venice DD2.0 */ k->cores_mask = POWER8_CORE_MASK; k->num_phbs = 3; - k->core_pir = pnv_chip_core_pir_p8; + k->chip_pir = pnv_chip_pir_p8; k->intc_create = pnv_chip_power8_intc_create; k->intc_reset = pnv_chip_power8_intc_reset; k->intc_destroy = pnv_chip_power8_intc_destroy; @@ -1388,7 +1507,7 @@ static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */ k->cores_mask = POWER8_CORE_MASK; k->num_phbs = 4; - k->core_pir = pnv_chip_core_pir_p8; + k->chip_pir = pnv_chip_pir_p8; k->intc_create = pnv_chip_power8_intc_create; k->intc_reset = pnv_chip_power8_intc_reset; k->intc_destroy = pnv_chip_power8_intc_destroy; @@ -1419,6 +1538,8 @@ static void pnv_chip_power9_instance_init(Object *obj) object_initialize_child(obj, "lpc", &chip9->lpc, TYPE_PNV9_LPC); + object_initialize_child(obj, "chiptod", &chip9->chiptod, TYPE_PNV9_CHIPTOD); + object_initialize_child(obj, "occ", &chip9->occ, TYPE_PNV9_OCC); object_initialize_child(obj, "sbe", &chip9->sbe, TYPE_PNV9_SBE); @@ -1543,12 +1664,12 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) &chip9->xive.xscom_regs); /* Processor Service Interface (PSI) Host Bridge */ - object_property_set_int(OBJECT(&chip9->psi), "bar", PNV9_PSIHB_BASE(chip), + object_property_set_int(OBJECT(psi9), "bar", PNV9_PSIHB_BASE(chip), &error_fatal); /* This is the only device with 4k ESB pages */ - object_property_set_int(OBJECT(&chip9->psi), "shift", XIVE_ESB_4K, + object_property_set_int(OBJECT(psi9), "shift", XIVE_ESB_4K, &error_fatal); - if (!qdev_realize(DEVICE(&chip9->psi), NULL, errp)) { + if (!qdev_realize(DEVICE(psi9), NULL, errp)) { return; } pnv_xscom_add_subregion(chip, PNV9_XSCOM_PSIHB_BASE, @@ -1565,13 +1686,26 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) chip->dt_isa_nodename = g_strdup_printf("/lpcm-opb@%" PRIx64 "/lpc@0", (uint64_t) PNV9_LPCM_BASE(chip)); + /* ChipTOD */ + object_property_set_bool(OBJECT(&chip9->chiptod), "primary", + chip->chip_id == 0, &error_abort); + object_property_set_bool(OBJECT(&chip9->chiptod), "secondary", + chip->chip_id == 1, &error_abort); + object_property_set_link(OBJECT(&chip9->chiptod), "chip", OBJECT(chip), + &error_abort); + if (!qdev_realize(DEVICE(&chip9->chiptod), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV9_XSCOM_CHIPTOD_BASE, + &chip9->chiptod.xscom_regs); + /* Create the simplified OCC model */ if (!qdev_realize(DEVICE(&chip9->occ), NULL, errp)) { return; } pnv_xscom_add_subregion(chip, PNV9_XSCOM_OCC_BASE, &chip9->occ.xscom_regs); qdev_connect_gpio_out(DEVICE(&chip9->occ), 0, qdev_get_gpio_in( - DEVICE(&chip9->psi), PSIHB9_IRQ_OCC)); + DEVICE(psi9), PSIHB9_IRQ_OCC)); /* OCC SRAM model */ memory_region_add_subregion(get_system_memory(), PNV9_OCC_SENSOR_BASE(chip), @@ -1586,7 +1720,7 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) pnv_xscom_add_subregion(chip, PNV9_XSCOM_SBE_MBOX_BASE, &chip9->sbe.xscom_mbox_regs); qdev_connect_gpio_out(DEVICE(&chip9->sbe), 0, qdev_get_gpio_in( - DEVICE(&chip9->psi), PSIHB9_IRQ_PSU)); + DEVICE(psi9), PSIHB9_IRQ_PSU)); /* HOMER */ object_property_set_link(OBJECT(&chip9->homer), "chip", OBJECT(chip), @@ -1627,7 +1761,7 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) PNV9_XSCOM_I2CM_SIZE, &chip9->i2c[i].xscom_regs); qdev_connect_gpio_out(DEVICE(&chip9->i2c[i]), 0, - qdev_get_gpio_in(DEVICE(&chip9->psi), + qdev_get_gpio_in(DEVICE(psi9), PSIHB9_IRQ_SBE_I2C)); } } @@ -1646,7 +1780,7 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x220d104900008000ull; /* P9 Nimbus DD2.0 */ k->cores_mask = POWER9_CORE_MASK; - k->core_pir = pnv_chip_core_pir_p9; + k->chip_pir = pnv_chip_pir_p9; k->intc_create = pnv_chip_power9_intc_create; k->intc_reset = pnv_chip_power9_intc_reset; k->intc_destroy = pnv_chip_power9_intc_destroy; @@ -1677,9 +1811,13 @@ static void pnv_chip_power10_instance_init(Object *obj) "xive-fabric"); object_initialize_child(obj, "psi", &chip10->psi, TYPE_PNV10_PSI); object_initialize_child(obj, "lpc", &chip10->lpc, TYPE_PNV10_LPC); + object_initialize_child(obj, "chiptod", &chip10->chiptod, + TYPE_PNV10_CHIPTOD); object_initialize_child(obj, "occ", &chip10->occ, TYPE_PNV10_OCC); object_initialize_child(obj, "sbe", &chip10->sbe, TYPE_PNV10_SBE); object_initialize_child(obj, "homer", &chip10->homer, TYPE_PNV10_HOMER); + object_initialize_child(obj, "n1-chiplet", &chip10->n1_chiplet, + TYPE_PNV_N1_CHIPLET); chip->num_pecs = pcc->num_pecs; @@ -1810,6 +1948,19 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) chip->dt_isa_nodename = g_strdup_printf("/lpcm-opb@%" PRIx64 "/lpc@0", (uint64_t) PNV10_LPCM_BASE(chip)); + /* ChipTOD */ + object_property_set_bool(OBJECT(&chip10->chiptod), "primary", + chip->chip_id == 0, &error_abort); + object_property_set_bool(OBJECT(&chip10->chiptod), "secondary", + chip->chip_id == 1, &error_abort); + object_property_set_link(OBJECT(&chip10->chiptod), "chip", OBJECT(chip), + &error_abort); + if (!qdev_realize(DEVICE(&chip10->chiptod), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV10_XSCOM_CHIPTOD_BASE, + &chip10->chiptod.xscom_regs); + /* Create the simplified OCC model */ if (!qdev_realize(DEVICE(&chip10->occ), NULL, errp)) { return; @@ -1849,6 +2000,19 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(get_system_memory(), PNV10_HOMER_BASE(chip), &chip10->homer.regs); + /* N1 chiplet */ + if (!qdev_realize(DEVICE(&chip10->n1_chiplet), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV10_XSCOM_N1_CHIPLET_CTRL_REGS_BASE, + &chip10->n1_chiplet.nest_pervasive.xscom_ctrl_regs_mr); + + pnv_xscom_add_subregion(chip, PNV10_XSCOM_N1_PB_SCOM_EQ_BASE, + &chip10->n1_chiplet.xscom_pb_eq_mr); + + pnv_xscom_add_subregion(chip, PNV10_XSCOM_N1_PB_SCOM_ES_BASE, + &chip10->n1_chiplet.xscom_pb_es_mr); + /* PHBs */ pnv_chip_power10_phb_realize(chip, &local_err); if (local_err) { @@ -1879,6 +2043,39 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_SBE_I2C)); } + +} + +static void pnv_rainier_i2c_init(PnvMachineState *pnv) +{ + int i; + for (i = 0; i < pnv->num_chips; i++) { + Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); + + /* + * Add a PCA9552 I2C device for PCIe hotplug control + * to engine 2, bus 1, address 0x63 + */ + I2CSlave *dev = i2c_slave_create_simple(chip10->i2c[2].busses[1], + "pca9552", 0x63); + + /* + * Connect PCA9552 GPIO pins 0-4 (SLOTx_EN) outputs to GPIO pins 5-9 + * (SLOTx_PG) inputs in order to fake the pgood state of PCIe slots + * after hypervisor code sets a SLOTx_EN pin high. + */ + qdev_connect_gpio_out(DEVICE(dev), 0, qdev_get_gpio_in(DEVICE(dev), 5)); + qdev_connect_gpio_out(DEVICE(dev), 1, qdev_get_gpio_in(DEVICE(dev), 6)); + qdev_connect_gpio_out(DEVICE(dev), 2, qdev_get_gpio_in(DEVICE(dev), 7)); + qdev_connect_gpio_out(DEVICE(dev), 3, qdev_get_gpio_in(DEVICE(dev), 8)); + qdev_connect_gpio_out(DEVICE(dev), 4, qdev_get_gpio_in(DEVICE(dev), 9)); + + /* + * Add a PCA9554 I2C device for cable card presence detection + * to engine 2, bus 1, address 0x25 + */ + i2c_slave_create_simple(chip10->i2c[2].busses[1], "pca9554", 0x25); + } } static uint32_t pnv_chip_power10_xscom_pcba(PnvChip *chip, uint64_t addr) @@ -1895,7 +2092,7 @@ static void pnv_chip_power10_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x120da04900008000ull; /* P10 DD1.0 (with NX) */ k->cores_mask = POWER10_CORE_MASK; - k->core_pir = pnv_chip_core_pir_p10; + k->chip_pir = pnv_chip_pir_p10; k->intc_create = pnv_chip_power10_intc_create; k->intc_reset = pnv_chip_power10_intc_reset; k->intc_destroy = pnv_chip_power10_intc_destroy; @@ -1985,8 +2182,8 @@ static void pnv_chip_core_realize(PnvChip *chip, Error **errp) chip->nr_threads, &error_fatal); object_property_set_int(OBJECT(pnv_core), CPU_CORE_PROP_CORE_ID, core_hwid, &error_fatal); - object_property_set_int(OBJECT(pnv_core), "pir", - pcc->core_pir(chip, core_hwid), &error_fatal); + object_property_set_int(OBJECT(pnv_core), "hwid", core_hwid, + &error_fatal); object_property_set_int(OBJECT(pnv_core), "hrmor", pnv->fw_load_addr, &error_fatal); object_property_set_link(OBJECT(pnv_core), "chip", OBJECT(chip), @@ -2035,6 +2232,21 @@ static void pnv_chip_class_init(ObjectClass *klass, void *data) dc->desc = "PowerNV Chip"; } +PnvCore *pnv_chip_find_core(PnvChip *chip, uint32_t core_id) +{ + int i; + + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pc = chip->cores[i]; + CPUCore *cc = CPU_CORE(pc); + + if (cc->core_id == core_id) { + return pc; + } + } + return NULL; +} + PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir) { int i, j; @@ -2242,8 +2454,6 @@ static void pnv_machine_power9_class_init(ObjectClass *oc, void *data) xfc->match_nvt = pnv_match_nvt; - mc->alias = "powernv"; - pmc->compat = compat; pmc->compat_size = sizeof(compat); pmc->dt_power_mgt = pnv_dt_power_mgt; @@ -2251,7 +2461,7 @@ static void pnv_machine_power9_class_init(ObjectClass *oc, void *data) machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB); } -static void pnv_machine_power10_class_init(ObjectClass *oc, void *data) +static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); @@ -2263,10 +2473,11 @@ static void pnv_machine_power10_class_init(ObjectClass *oc, void *data) { TYPE_PNV_PHB_ROOT_PORT, "version", "5" }, }; - mc->desc = "IBM PowerNV (Non-Virtualized) POWER10"; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0"); compat_props_add(mc->compat_props, phb_compat, G_N_ELEMENTS(phb_compat)); + mc->alias = "powernv"; + pmc->compat = compat; pmc->compat_size = sizeof(compat); pmc->dt_power_mgt = pnv_dt_power_mgt; @@ -2276,6 +2487,24 @@ static void pnv_machine_power10_class_init(ObjectClass *oc, void *data) machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB); } +static void pnv_machine_power10_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + pnv_machine_p10_common_class_init(oc, data); + mc->desc = "IBM PowerNV (Non-Virtualized) POWER10"; +} + +static void pnv_machine_p10_rainier_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); + + pnv_machine_p10_common_class_init(oc, data); + mc->desc = "IBM PowerNV (Non-Virtualized) POWER10 Rainier"; + pmc->i2c_init = pnv_rainier_i2c_init; +} + static bool pnv_machine_get_hb(Object *obj, Error **errp) { PnvMachineState *pnv = PNV_MACHINE(obj); @@ -2294,8 +2523,7 @@ static void pnv_machine_set_hb(Object *obj, bool value, Error **errp) static void pnv_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); cpu_synchronize_state(cs); ppc_cpu_do_system_reset(cs); @@ -2381,6 +2609,11 @@ static void pnv_machine_class_init(ObjectClass *oc, void *data) } static const TypeInfo types[] = { + { + .name = MACHINE_TYPE_NAME("powernv10-rainier"), + .parent = MACHINE_TYPE_NAME("powernv10"), + .class_init = pnv_machine_p10_rainier_class_init, + }, { .name = MACHINE_TYPE_NAME("powernv10"), .parent = TYPE_PNV_MACHINE, diff --git a/hw/ppc/pnv_bmc.c b/hw/ppc/pnv_bmc.c index 99f1e8d7f9f..0c1274df21a 100644 --- a/hw/ppc/pnv_bmc.c +++ b/hw/ppc/pnv_bmc.c @@ -269,13 +269,13 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor) */ IPMIBmc *pnv_bmc_create(PnvPnor *pnor) { - Object *obj; + DeviceState *dev; - obj = object_new(TYPE_IPMI_BMC_SIMULATOR); - qdev_realize(DEVICE(obj), NULL, &error_fatal); - pnv_bmc_set_pnor(IPMI_BMC(obj), pnor); + dev = qdev_new(TYPE_IPMI_BMC_SIMULATOR); + qdev_realize(dev, NULL, &error_fatal); + pnv_bmc_set_pnor(IPMI_BMC(dev), pnor); - return IPMI_BMC(obj); + return IPMI_BMC(dev); } typedef struct ForeachArgs { diff --git a/hw/ppc/pnv_chiptod.c b/hw/ppc/pnv_chiptod.c new file mode 100644 index 00000000000..3831a72101e --- /dev/null +++ b/hw/ppc/pnv_chiptod.c @@ -0,0 +1,586 @@ +/* + * QEMU PowerPC PowerNV Emulation of some ChipTOD behaviour + * + * Copyright (c) 2022-2023, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * ChipTOD (aka TOD) is a facility implemented in the nest / pervasive. The + * purpose is to keep time-of-day across chips and cores. + * + * There is a master chip TOD, which sends signals to slave chip TODs to + * keep them synchronized. There are two sets of configuration registers + * called primary and secondary, which can be used fail over. + * + * The chip TOD also distributes synchronisation signals to the timebase + * facility in each of the cores on the chip. In particular there is a + * feature that can move the TOD value in the ChipTOD to and from the TB. + * + * Initialisation typically brings all ChipTOD into sync (see tod_state), + * and then brings each core TB into sync with the ChipTODs (see timebase + * state and TFMR). This model is a very basic simulation of the init sequence + * performed by skiboot. + */ + +#include "qemu/osdep.h" +#include "sysemu/reset.h" +#include "target/ppc/cpu.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/fdt.h" +#include "hw/ppc/ppc.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_chip.h" +#include "hw/ppc/pnv_core.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/ppc/pnv_chiptod.h" +#include "trace.h" + +#include + +/* TOD chip XSCOM addresses */ +#define TOD_M_PATH_CTRL_REG 0x00000000 /* Master Path ctrl reg */ +#define TOD_PRI_PORT_0_CTRL_REG 0x00000001 /* Primary port0 ctrl reg */ +#define TOD_PRI_PORT_1_CTRL_REG 0x00000002 /* Primary port1 ctrl reg */ +#define TOD_SEC_PORT_0_CTRL_REG 0x00000003 /* Secondary p0 ctrl reg */ +#define TOD_SEC_PORT_1_CTRL_REG 0x00000004 /* Secondary p1 ctrl reg */ +#define TOD_S_PATH_CTRL_REG 0x00000005 /* Slave Path ctrl reg */ +#define TOD_I_PATH_CTRL_REG 0x00000006 /* Internal Path ctrl reg */ + +/* -- TOD primary/secondary master/slave control register -- */ +#define TOD_PSS_MSS_CTRL_REG 0x00000007 + +/* -- TOD primary/secondary master/slave status register -- */ +#define TOD_PSS_MSS_STATUS_REG 0x00000008 + +/* TOD chip XSCOM addresses */ +#define TOD_CHIP_CTRL_REG 0x00000010 /* Chip control reg */ + +#define TOD_TX_TTYPE_0_REG 0x00000011 +#define TOD_TX_TTYPE_1_REG 0x00000012 /* PSS switch reg */ +#define TOD_TX_TTYPE_2_REG 0x00000013 /* Enable step checkers */ +#define TOD_TX_TTYPE_3_REG 0x00000014 /* Request TOD reg */ +#define TOD_TX_TTYPE_4_REG 0x00000015 /* Send TOD reg */ +#define TOD_TX_TTYPE_5_REG 0x00000016 /* Invalidate TOD reg */ + +#define TOD_MOVE_TOD_TO_TB_REG 0x00000017 +#define TOD_LOAD_TOD_MOD_REG 0x00000018 +#define TOD_LOAD_TOD_REG 0x00000021 +#define TOD_START_TOD_REG 0x00000022 +#define TOD_FSM_REG 0x00000024 + +#define TOD_TX_TTYPE_CTRL_REG 0x00000027 /* TX TTYPE Control reg */ +#define TOD_TX_TTYPE_PIB_SLAVE_ADDR PPC_BITMASK(26, 31) + +/* -- TOD Error interrupt register -- */ +#define TOD_ERROR_REG 0x00000030 + +/* PC unit PIB address which recieves the timebase transfer from TOD */ +#define PC_TOD 0x4A3 + +/* + * The TOD FSM: + * - The reset state is 0 error. + * - A hardware error detected will transition to state 0 from any state. + * - LOAD_TOD_MOD and TTYPE5 will transition to state 7 from any state. + * + * | state | action | new | + * |------------+------------------------------+-----| + * | 0 error | LOAD_TOD_MOD | 7 | + * | 0 error | Recv TTYPE5 (invalidate TOD) | 7 | + * | 7 not_set | LOAD_TOD (bit-63 = 0) | 2 | + * | 7 not_set | LOAD_TOD (bit-63 = 1) | 1 | + * | 7 not_set | Recv TTYPE4 (send TOD) | 2 | + * | 2 running | | | + * | 1 stopped | START_TOD | 2 | + * + * Note the hardware has additional states but they relate to the sending + * and receiving and waiting on synchronisation signals between chips and + * are not described or modeled here. + */ + +static uint64_t pnv_chiptod_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvChipTOD *chiptod = PNV_CHIPTOD(opaque); + uint32_t offset = addr >> 3; + uint64_t val = 0; + + switch (offset) { + case TOD_PSS_MSS_STATUS_REG: + /* + * ChipTOD does not support configurations other than primary + * master, does not support errors, etc. + */ + val |= PPC_BITMASK(6, 10); /* STEP checker validity */ + val |= PPC_BIT(12); /* Primary config master path select */ + if (chiptod->tod_state == tod_running) { + val |= PPC_BIT(20); /* Is running */ + } + val |= PPC_BIT(21); /* Is using primary config */ + val |= PPC_BIT(26); /* Is using master path select */ + + if (chiptod->primary) { + val |= PPC_BIT(23); /* Is active master */ + } else if (chiptod->secondary) { + val |= PPC_BIT(24); /* Is backup master */ + } else { + val |= PPC_BIT(25); /* Is slave (should backup master set this?) */ + } + break; + case TOD_PSS_MSS_CTRL_REG: + val = chiptod->pss_mss_ctrl_reg; + break; + case TOD_TX_TTYPE_CTRL_REG: + val = 0; + break; + case TOD_ERROR_REG: + val = chiptod->tod_error; + break; + case TOD_FSM_REG: + if (chiptod->tod_state == tod_running) { + val |= PPC_BIT(4); + } + break; + default: + qemu_log_mask(LOG_UNIMP, "pnv_chiptod: unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } + + trace_pnv_chiptod_xscom_read(addr >> 3, val); + + return val; +} + +static void chiptod_receive_ttype(PnvChipTOD *chiptod, uint32_t trigger) +{ + switch (trigger) { + case TOD_TX_TTYPE_4_REG: + if (chiptod->tod_state != tod_not_set) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: received TTYPE4 in " + " state %d, should be in 7 (TOD_NOT_SET)\n", + chiptod->tod_state); + } else { + chiptod->tod_state = tod_running; + } + break; + case TOD_TX_TTYPE_5_REG: + /* Works from any state */ + chiptod->tod_state = tod_not_set; + break; + default: + qemu_log_mask(LOG_UNIMP, "pnv_chiptod: received unimplemented " + " TTYPE %u\n", trigger); + break; + } +} + +static void chiptod_power9_broadcast_ttype(PnvChipTOD *sender, + uint32_t trigger) +{ + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); + int i; + + for (i = 0; i < pnv->num_chips; i++) { + Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); + PnvChipTOD *chiptod = &chip9->chiptod; + + if (chiptod != sender) { + chiptod_receive_ttype(chiptod, trigger); + } + } +} + +static void chiptod_power10_broadcast_ttype(PnvChipTOD *sender, + uint32_t trigger) +{ + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); + int i; + + for (i = 0; i < pnv->num_chips; i++) { + Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); + PnvChipTOD *chiptod = &chip10->chiptod; + + if (chiptod != sender) { + chiptod_receive_ttype(chiptod, trigger); + } + } +} + +static PnvCore *pnv_chip_get_core_by_xscom_base(PnvChip *chip, + uint32_t xscom_base) +{ + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip); + int i; + + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pc = chip->cores[i]; + CPUCore *cc = CPU_CORE(pc); + int core_hwid = cc->core_id; + + if (pcc->xscom_core_base(chip, core_hwid) == xscom_base) { + return pc; + } + } + return NULL; +} + +static PnvCore *chiptod_power9_tx_ttype_target(PnvChipTOD *chiptod, + uint64_t val) +{ + /* + * skiboot uses Core ID for P9, though SCOM should work too. + */ + if (val & PPC_BIT(35)) { /* SCOM addressing */ + uint32_t addr = val >> 32; + uint32_t reg = addr & 0xfff; + + if (reg != PC_TOD) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: SCOM addressing: " + "unimplemented slave register 0x%" PRIx32 "\n", reg); + return NULL; + } + + return pnv_chip_get_core_by_xscom_base(chiptod->chip, addr & ~0xfff); + + } else { /* Core ID addressing */ + uint32_t core_id = GETFIELD(TOD_TX_TTYPE_PIB_SLAVE_ADDR, val) & 0x1f; + return pnv_chip_find_core(chiptod->chip, core_id); + } +} + +static PnvCore *chiptod_power10_tx_ttype_target(PnvChipTOD *chiptod, + uint64_t val) +{ + /* + * skiboot uses SCOM for P10 because Core ID was unable to be made to + * work correctly. For this reason only SCOM addressing is implemented. + */ + if (val & PPC_BIT(35)) { /* SCOM addressing */ + uint32_t addr = val >> 32; + uint32_t reg = addr & 0xfff; + + if (reg != PC_TOD) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: SCOM addressing: " + "unimplemented slave register 0x%" PRIx32 "\n", reg); + return NULL; + } + + /* + * This may not deal with P10 big-core addressing at the moment. + * The big-core code in skiboot syncs small cores, but it targets + * the even PIR (first small-core) when syncing second small-core. + */ + return pnv_chip_get_core_by_xscom_base(chiptod->chip, addr & ~0xfff); + + } else { /* Core ID addressing */ + qemu_log_mask(LOG_UNIMP, "pnv_chiptod: TX TTYPE Core ID " + "addressing is not implemented for POWER10\n"); + return NULL; + } +} + +static void pnv_chiptod_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvChipTOD *chiptod = PNV_CHIPTOD(opaque); + PnvChipTODClass *pctc = PNV_CHIPTOD_GET_CLASS(chiptod); + uint32_t offset = addr >> 3; + + trace_pnv_chiptod_xscom_write(addr >> 3, val); + + switch (offset) { + case TOD_PSS_MSS_CTRL_REG: + /* Is this correct? */ + if (chiptod->primary) { + val |= PPC_BIT(1); /* TOD is master */ + } else { + val &= ~PPC_BIT(1); + } + val |= PPC_BIT(2); /* Drawer is master (don't simulate multi-drawer) */ + chiptod->pss_mss_ctrl_reg = val & PPC_BITMASK(0, 31); + break; + + case TOD_TX_TTYPE_CTRL_REG: + /* + * This register sets the target of the TOD value transfer initiated + * by TOD_MOVE_TOD_TO_TB. The TOD is able to send the address to + * any target register, though in practice only the PC TOD register + * should be used. ChipTOD has a "SCOM addressing" mode which fully + * specifies the SCOM address, and a core-ID mode which uses the + * core ID to target the PC TOD for a given core. + */ + chiptod->slave_pc_target = pctc->tx_ttype_target(chiptod, val); + if (!chiptod->slave_pc_target) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: xscom write reg" + " TOD_TX_TTYPE_CTRL_REG val 0x%" PRIx64 + " invalid slave address\n", val); + } + break; + case TOD_ERROR_REG: + chiptod->tod_error &= ~val; + break; + case TOD_LOAD_TOD_MOD_REG: + if (!(val & PPC_BIT(0))) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: xscom write reg" + " TOD_LOAD_TOD_MOD_REG with bad val 0x%" PRIx64"\n", + val); + } else { + chiptod->tod_state = tod_not_set; + } + break; + case TOD_LOAD_TOD_REG: + if (chiptod->tod_state != tod_not_set) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: LOAD_TOG_REG in " + " state %d, should be in 7 (TOD_NOT_SET)\n", + chiptod->tod_state); + } else { + if (val & PPC_BIT(63)) { + chiptod->tod_state = tod_stopped; + } else { + chiptod->tod_state = tod_running; + } + } + break; + + case TOD_MOVE_TOD_TO_TB_REG: + /* + * XXX: it should be a cleaner model to have this drive a SCOM + * transaction to the target address, and implement the state machine + * in the PnvCore. For now, this hack makes things work. + */ + if (chiptod->tod_state != tod_running) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: xscom write reg" + " TOD_MOVE_TOD_TO_TB_REG in bad state %d\n", + chiptod->tod_state); + } else if (!(val & PPC_BIT(0))) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: xscom write reg" + " TOD_MOVE_TOD_TO_TB_REG with bad val 0x%" PRIx64"\n", + val); + } else if (chiptod->slave_pc_target == NULL) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: xscom write reg" + " TOD_MOVE_TOD_TO_TB_REG with no slave target\n"); + } else { + PowerPCCPU *cpu = chiptod->slave_pc_target->threads[0]; + CPUPPCState *env = &cpu->env; + + /* + * Moving TOD to TB will set the TB of all threads in a + * core, so skiboot only does this once per thread0, so + * that is where we keep the timebase state machine. + * + * It is likely possible for TBST to be driven from other + * threads in the core, but for now we only implement it for + * thread 0. + */ + + if (env->pnv_tod_tbst.tb_ready_for_tod) { + env->pnv_tod_tbst.tod_sent_to_tb = 1; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: xscom write reg" + " TOD_MOVE_TOD_TO_TB_REG with TB not ready to" + " receive TOD\n"); + } + } + break; + case TOD_START_TOD_REG: + if (chiptod->tod_state != tod_stopped) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: LOAD_TOG_REG in " + " state %d, should be in 1 (TOD_STOPPED)\n", + chiptod->tod_state); + } else { + chiptod->tod_state = tod_running; + } + break; + case TOD_TX_TTYPE_4_REG: + case TOD_TX_TTYPE_5_REG: + pctc->broadcast_ttype(chiptod, offset); + break; + default: + qemu_log_mask(LOG_UNIMP, "pnv_chiptod: unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } +} + +static const MemoryRegionOps pnv_chiptod_xscom_ops = { + .read = pnv_chiptod_xscom_read, + .write = pnv_chiptod_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static int pnv_chiptod_dt_xscom(PnvXScomInterface *dev, void *fdt, + int xscom_offset, + const char compat[], size_t compat_size) +{ + PnvChipTOD *chiptod = PNV_CHIPTOD(dev); + g_autofree char *name = NULL; + int offset; + uint32_t chiptod_pcba = PNV9_XSCOM_CHIPTOD_BASE; + uint32_t reg[] = { + cpu_to_be32(chiptod_pcba), + cpu_to_be32(PNV9_XSCOM_CHIPTOD_SIZE) + }; + + name = g_strdup_printf("chiptod@%x", chiptod_pcba); + offset = fdt_add_subnode(fdt, xscom_offset, name); + _FDT(offset); + + if (chiptod->primary) { + _FDT((fdt_setprop(fdt, offset, "primary", NULL, 0))); + } else if (chiptod->secondary) { + _FDT((fdt_setprop(fdt, offset, "secondary", NULL, 0))); + } + + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); + _FDT((fdt_setprop(fdt, offset, "compatible", compat, compat_size))); + return 0; +} + +static int pnv_chiptod_power9_dt_xscom(PnvXScomInterface *dev, void *fdt, + int xscom_offset) +{ + const char compat[] = "ibm,power-chiptod\0ibm,power9-chiptod"; + + return pnv_chiptod_dt_xscom(dev, fdt, xscom_offset, compat, sizeof(compat)); +} + +static Property pnv_chiptod_properties[] = { + DEFINE_PROP_BOOL("primary", PnvChipTOD, primary, false), + DEFINE_PROP_BOOL("secondary", PnvChipTOD, secondary, false), + DEFINE_PROP_LINK("chip", PnvChipTOD , chip, TYPE_PNV_CHIP, PnvChip *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_chiptod_power9_class_init(ObjectClass *klass, void *data) +{ + PnvChipTODClass *pctc = PNV_CHIPTOD_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); + + dc->desc = "PowerNV ChipTOD Controller (POWER9)"; + device_class_set_props(dc, pnv_chiptod_properties); + + xdc->dt_xscom = pnv_chiptod_power9_dt_xscom; + + pctc->broadcast_ttype = chiptod_power9_broadcast_ttype; + pctc->tx_ttype_target = chiptod_power9_tx_ttype_target; + + pctc->xscom_size = PNV_XSCOM_CHIPTOD_SIZE; +} + +static const TypeInfo pnv_chiptod_power9_type_info = { + .name = TYPE_PNV9_CHIPTOD, + .parent = TYPE_PNV_CHIPTOD, + .instance_size = sizeof(PnvChipTOD), + .class_init = pnv_chiptod_power9_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static int pnv_chiptod_power10_dt_xscom(PnvXScomInterface *dev, void *fdt, + int xscom_offset) +{ + const char compat[] = "ibm,power-chiptod\0ibm,power10-chiptod"; + + return pnv_chiptod_dt_xscom(dev, fdt, xscom_offset, compat, sizeof(compat)); +} + +static void pnv_chiptod_power10_class_init(ObjectClass *klass, void *data) +{ + PnvChipTODClass *pctc = PNV_CHIPTOD_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); + + dc->desc = "PowerNV ChipTOD Controller (POWER10)"; + device_class_set_props(dc, pnv_chiptod_properties); + + xdc->dt_xscom = pnv_chiptod_power10_dt_xscom; + + pctc->broadcast_ttype = chiptod_power10_broadcast_ttype; + pctc->tx_ttype_target = chiptod_power10_tx_ttype_target; + + pctc->xscom_size = PNV_XSCOM_CHIPTOD_SIZE; +} + +static const TypeInfo pnv_chiptod_power10_type_info = { + .name = TYPE_PNV10_CHIPTOD, + .parent = TYPE_PNV_CHIPTOD, + .instance_size = sizeof(PnvChipTOD), + .class_init = pnv_chiptod_power10_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_chiptod_reset(void *dev) +{ + PnvChipTOD *chiptod = PNV_CHIPTOD(dev); + + chiptod->pss_mss_ctrl_reg = 0; + if (chiptod->primary) { + chiptod->pss_mss_ctrl_reg |= PPC_BIT(1); /* TOD is master */ + } + /* Drawer is master (we do not simulate multi-drawer) */ + chiptod->pss_mss_ctrl_reg |= PPC_BIT(2); + + chiptod->tod_error = 0; + chiptod->tod_state = tod_error; +} + +static void pnv_chiptod_realize(DeviceState *dev, Error **errp) +{ + PnvChipTOD *chiptod = PNV_CHIPTOD(dev); + PnvChipTODClass *pctc = PNV_CHIPTOD_GET_CLASS(chiptod); + + /* XScom regions for ChipTOD registers */ + pnv_xscom_region_init(&chiptod->xscom_regs, OBJECT(dev), + &pnv_chiptod_xscom_ops, chiptod, "xscom-chiptod", + pctc->xscom_size); + + qemu_register_reset(pnv_chiptod_reset, chiptod); +} + +static void pnv_chiptod_unrealize(DeviceState *dev) +{ + PnvChipTOD *chiptod = PNV_CHIPTOD(dev); + + qemu_unregister_reset(pnv_chiptod_reset, chiptod); +} + +static void pnv_chiptod_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pnv_chiptod_realize; + dc->unrealize = pnv_chiptod_unrealize; + dc->desc = "PowerNV ChipTOD Controller"; + dc->user_creatable = false; +} + +static const TypeInfo pnv_chiptod_type_info = { + .name = TYPE_PNV_CHIPTOD, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvChipTOD), + .class_init = pnv_chiptod_class_init, + .class_size = sizeof(PnvChipTODClass), + .abstract = true, +}; + +static void pnv_chiptod_register_types(void) +{ + type_register_static(&pnv_chiptod_type_info); + type_register_static(&pnv_chiptod_power9_type_info); + type_register_static(&pnv_chiptod_power10_type_info); +} + +type_init(pnv_chiptod_register_types); diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c index 8c7afe037f0..f40ab721d6f 100644 --- a/hw/ppc/pnv_core.c +++ b/hw/ppc/pnv_core.c @@ -226,7 +226,7 @@ static void pnv_core_cpu_realize(PnvCore *pc, PowerPCCPU *cpu, Error **errp, int thread_index) { CPUPPCState *env = &cpu->env; - int core_pir; + int core_hwid; ppc_spr_t *pir = &env->spr_cb[SPR_PIR]; ppc_spr_t *tir = &env->spr_cb[SPR_TIR]; Error *local_err = NULL; @@ -242,10 +242,10 @@ static void pnv_core_cpu_realize(PnvCore *pc, PowerPCCPU *cpu, Error **errp, return; } - core_pir = object_property_get_uint(OBJECT(pc), "pir", &error_abort); + core_hwid = object_property_get_uint(OBJECT(pc), "hwid", &error_abort); tir->default_value = thread_index; - pir->default_value = core_pir + thread_index; + pir->default_value = pcc->chip_pir(pc->chip, core_hwid, thread_index); /* Set time-base frequency to 512 MHz */ cpu_ppc_tb_init(env, PNV_TIMEBASE_FREQ); @@ -342,7 +342,7 @@ static void pnv_core_unrealize(DeviceState *dev) } static Property pnv_core_properties[] = { - DEFINE_PROP_UINT32("pir", PnvCore, pir, 0), + DEFINE_PROP_UINT32("hwid", PnvCore, hwid, 0), DEFINE_PROP_UINT64("hrmor", PnvCore, hrmor, 0), DEFINE_PROP_LINK("chip", PnvCore, chip, TYPE_PNV_CHIP, PnvChip *), DEFINE_PROP_END_OF_LIST(), diff --git a/hw/ppc/pnv_i2c.c b/hw/ppc/pnv_i2c.c index 656a48eebe5..eec5047ce83 100644 --- a/hw/ppc/pnv_i2c.c +++ b/hw/ppc/pnv_i2c.c @@ -22,136 +22,7 @@ #include -/* I2C FIFO register */ -#define I2C_FIFO_REG 0x4 -#define I2C_FIFO PPC_BITMASK(0, 7) - -/* I2C command register */ -#define I2C_CMD_REG 0x5 -#define I2C_CMD_WITH_START PPC_BIT(0) -#define I2C_CMD_WITH_ADDR PPC_BIT(1) -#define I2C_CMD_READ_CONT PPC_BIT(2) -#define I2C_CMD_WITH_STOP PPC_BIT(3) -#define I2C_CMD_INTR_STEERING PPC_BITMASK(6, 7) /* P9 */ -#define I2C_CMD_INTR_STEER_HOST 1 -#define I2C_CMD_INTR_STEER_OCC 2 -#define I2C_CMD_DEV_ADDR PPC_BITMASK(8, 14) -#define I2C_CMD_READ_NOT_WRITE PPC_BIT(15) -#define I2C_CMD_LEN_BYTES PPC_BITMASK(16, 31) -#define I2C_MAX_TFR_LEN 0xfff0ull - -/* I2C mode register */ -#define I2C_MODE_REG 0x6 -#define I2C_MODE_BIT_RATE_DIV PPC_BITMASK(0, 15) -#define I2C_MODE_PORT_NUM PPC_BITMASK(16, 21) -#define I2C_MODE_ENHANCED PPC_BIT(28) -#define I2C_MODE_DIAGNOSTIC PPC_BIT(29) -#define I2C_MODE_PACING_ALLOW PPC_BIT(30) -#define I2C_MODE_WRAP PPC_BIT(31) - -/* I2C watermark register */ -#define I2C_WATERMARK_REG 0x7 -#define I2C_WATERMARK_HIGH PPC_BITMASK(16, 19) -#define I2C_WATERMARK_LOW PPC_BITMASK(24, 27) - -/* - * I2C interrupt mask and condition registers - * - * NB: The function of 0x9 and 0xa changes depending on whether you're reading - * or writing to them. When read they return the interrupt condition bits - * and on writes they update the interrupt mask register. - * - * The bit definitions are the same for all the interrupt registers. - */ -#define I2C_INTR_MASK_REG 0x8 - -#define I2C_INTR_RAW_COND_REG 0x9 /* read */ -#define I2C_INTR_MASK_OR_REG 0x9 /* write*/ - -#define I2C_INTR_COND_REG 0xa /* read */ -#define I2C_INTR_MASK_AND_REG 0xa /* write */ - -#define I2C_INTR_ALL PPC_BITMASK(16, 31) -#define I2C_INTR_INVALID_CMD PPC_BIT(16) -#define I2C_INTR_LBUS_PARITY_ERR PPC_BIT(17) -#define I2C_INTR_BKEND_OVERRUN_ERR PPC_BIT(18) -#define I2C_INTR_BKEND_ACCESS_ERR PPC_BIT(19) -#define I2C_INTR_ARBT_LOST_ERR PPC_BIT(20) -#define I2C_INTR_NACK_RCVD_ERR PPC_BIT(21) -#define I2C_INTR_DATA_REQ PPC_BIT(22) -#define I2C_INTR_CMD_COMP PPC_BIT(23) -#define I2C_INTR_STOP_ERR PPC_BIT(24) -#define I2C_INTR_I2C_BUSY PPC_BIT(25) -#define I2C_INTR_NOT_I2C_BUSY PPC_BIT(26) -#define I2C_INTR_SCL_EQ_1 PPC_BIT(28) -#define I2C_INTR_SCL_EQ_0 PPC_BIT(29) -#define I2C_INTR_SDA_EQ_1 PPC_BIT(30) -#define I2C_INTR_SDA_EQ_0 PPC_BIT(31) - -/* I2C status register */ -#define I2C_RESET_I2C_REG 0xb /* write */ -#define I2C_RESET_ERRORS 0xc -#define I2C_STAT_REG 0xb /* read */ -#define I2C_STAT_INVALID_CMD PPC_BIT(0) -#define I2C_STAT_LBUS_PARITY_ERR PPC_BIT(1) -#define I2C_STAT_BKEND_OVERRUN_ERR PPC_BIT(2) -#define I2C_STAT_BKEND_ACCESS_ERR PPC_BIT(3) -#define I2C_STAT_ARBT_LOST_ERR PPC_BIT(4) -#define I2C_STAT_NACK_RCVD_ERR PPC_BIT(5) -#define I2C_STAT_DATA_REQ PPC_BIT(6) -#define I2C_STAT_CMD_COMP PPC_BIT(7) -#define I2C_STAT_STOP_ERR PPC_BIT(8) -#define I2C_STAT_UPPER_THRS PPC_BITMASK(9, 15) -#define I2C_STAT_ANY_I2C_INTR PPC_BIT(16) -#define I2C_STAT_PORT_HISTORY_BUSY PPC_BIT(19) -#define I2C_STAT_SCL_INPUT_LEVEL PPC_BIT(20) -#define I2C_STAT_SDA_INPUT_LEVEL PPC_BIT(21) -#define I2C_STAT_PORT_BUSY PPC_BIT(22) -#define I2C_STAT_INTERFACE_BUSY PPC_BIT(23) -#define I2C_STAT_FIFO_ENTRY_COUNT PPC_BITMASK(24, 31) - -#define I2C_STAT_ANY_ERR (I2C_STAT_INVALID_CMD | I2C_STAT_LBUS_PARITY_ERR | \ - I2C_STAT_BKEND_OVERRUN_ERR | \ - I2C_STAT_BKEND_ACCESS_ERR | I2C_STAT_ARBT_LOST_ERR | \ - I2C_STAT_NACK_RCVD_ERR | I2C_STAT_STOP_ERR) - - -#define I2C_INTR_ACTIVE \ - ((I2C_STAT_ANY_ERR >> 16) | I2C_INTR_CMD_COMP | I2C_INTR_DATA_REQ) - -/* Pseudo-status used for timeouts */ -#define I2C_STAT_PSEUDO_TIMEOUT PPC_BIT(63) - -/* I2C extended status register */ -#define I2C_EXTD_STAT_REG 0xc -#define I2C_EXTD_STAT_FIFO_SIZE PPC_BITMASK(0, 7) -#define I2C_EXTD_STAT_MSM_CURSTATE PPC_BITMASK(11, 15) -#define I2C_EXTD_STAT_SCL_IN_SYNC PPC_BIT(16) -#define I2C_EXTD_STAT_SDA_IN_SYNC PPC_BIT(17) -#define I2C_EXTD_STAT_S_SCL PPC_BIT(18) -#define I2C_EXTD_STAT_S_SDA PPC_BIT(19) -#define I2C_EXTD_STAT_M_SCL PPC_BIT(20) -#define I2C_EXTD_STAT_M_SDA PPC_BIT(21) -#define I2C_EXTD_STAT_HIGH_WATER PPC_BIT(22) -#define I2C_EXTD_STAT_LOW_WATER PPC_BIT(23) -#define I2C_EXTD_STAT_I2C_BUSY PPC_BIT(24) -#define I2C_EXTD_STAT_SELF_BUSY PPC_BIT(25) -#define I2C_EXTD_STAT_I2C_VERSION PPC_BITMASK(27, 31) - -/* I2C residual front end/back end length */ -#define I2C_RESIDUAL_LEN_REG 0xd -#define I2C_RESIDUAL_FRONT_END PPC_BITMASK(0, 15) -#define I2C_RESIDUAL_BACK_END PPC_BITMASK(16, 31) - -/* Port busy register */ -#define I2C_PORT_BUSY_REG 0xe -#define I2C_SET_S_SCL_REG 0xd -#define I2C_RESET_S_SCL_REG 0xf -#define I2C_SET_S_SDA_REG 0x10 -#define I2C_RESET_S_SDA_REG 0x11 - -#define PNV_I2C_FIFO_SIZE 8 -#define PNV_I2C_MAX_BUSSES 64 +#include "hw/i2c/pnv_i2c_regs.h" static I2CBus *pnv_i2c_get_bus(PnvI2C *i2c) { @@ -629,6 +500,19 @@ static int pnv_i2c_dt_xscom(PnvXScomInterface *dev, void *fdt, return 0; } +static void pnv_i2c_sys_reset(void *dev) +{ + int port; + PnvI2C *i2c = PNV_I2C(dev); + + pnv_i2c_reset(dev); + + /* reset all buses connected to this i2c controller */ + for (port = 0; port < i2c->num_busses; port++) { + bus_cold_reset(BUS(i2c->busses[port])); + } +} + static void pnv_i2c_realize(DeviceState *dev, Error **errp) { PnvI2C *i2c = PNV_I2C(dev); @@ -654,7 +538,7 @@ static void pnv_i2c_realize(DeviceState *dev, Error **errp) fifo8_create(&i2c->fifo, PNV_I2C_FIFO_SIZE); - qemu_register_reset(pnv_i2c_reset, dev); + qemu_register_reset(pnv_i2c_sys_reset, dev); qdev_init_gpio_out(DEVICE(dev), &i2c->psi_irq, 1); } @@ -673,6 +557,9 @@ static void pnv_i2c_class_init(ObjectClass *klass, void *data) xscomc->dt_xscom = pnv_i2c_dt_xscom; + /* Reason: This device is part of the CPU and cannot be used separately */ + dc->user_creatable = false; + dc->desc = "PowerNV I2C"; dc->realize = pnv_i2c_realize; device_class_set_props(dc, pnv_i2c_properties); diff --git a/hw/ppc/pnv_n1_chiplet.c b/hw/ppc/pnv_n1_chiplet.c new file mode 100644 index 00000000000..03ff9fbad0d --- /dev/null +++ b/hw/ppc/pnv_n1_chiplet.c @@ -0,0 +1,173 @@ +/* + * QEMU PowerPC N1 chiplet model + * + * Copyright (c) 2023, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/ppc/pnv_n1_chiplet.h" +#include "hw/ppc/pnv_nest_pervasive.h" + +/* + * The n1 chiplet contains chiplet control unit, + * PowerBus/RaceTrack/Bridge logic, nest Memory Management Unit(nMMU) + * and more. + * + * In this model Nest1 chiplet control registers are modelled via common + * nest pervasive model and few PowerBus racetrack registers are modelled. + */ + +#define PB_SCOM_EQ0_HP_MODE2_CURR 0xe +#define PB_SCOM_ES3_MODE 0x8a + +static uint64_t pnv_n1_chiplet_pb_scom_eq_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvN1Chiplet *n1_chiplet = PNV_N1_CHIPLET(opaque); + uint32_t reg = addr >> 3; + uint64_t val = ~0ull; + + switch (reg) { + case PB_SCOM_EQ0_HP_MODE2_CURR: + val = n1_chiplet->eq[0].hp_mode2_curr; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Invalid xscom read at 0x%" PRIx32 "\n", + __func__, reg); + } + return val; +} + +static void pnv_n1_chiplet_pb_scom_eq_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvN1Chiplet *n1_chiplet = PNV_N1_CHIPLET(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PB_SCOM_EQ0_HP_MODE2_CURR: + n1_chiplet->eq[0].hp_mode2_curr = val; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Invalid xscom write at 0x%" PRIx32 "\n", + __func__, reg); + } +} + +static const MemoryRegionOps pnv_n1_chiplet_pb_scom_eq_ops = { + .read = pnv_n1_chiplet_pb_scom_eq_read, + .write = pnv_n1_chiplet_pb_scom_eq_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static uint64_t pnv_n1_chiplet_pb_scom_es_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvN1Chiplet *n1_chiplet = PNV_N1_CHIPLET(opaque); + uint32_t reg = addr >> 3; + uint64_t val = ~0ull; + + switch (reg) { + case PB_SCOM_ES3_MODE: + val = n1_chiplet->es[3].mode; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Invalid xscom read at 0x%" PRIx32 "\n", + __func__, reg); + } + return val; +} + +static void pnv_n1_chiplet_pb_scom_es_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvN1Chiplet *n1_chiplet = PNV_N1_CHIPLET(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PB_SCOM_ES3_MODE: + n1_chiplet->es[3].mode = val; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Invalid xscom write at 0x%" PRIx32 "\n", + __func__, reg); + } +} + +static const MemoryRegionOps pnv_n1_chiplet_pb_scom_es_ops = { + .read = pnv_n1_chiplet_pb_scom_es_read, + .write = pnv_n1_chiplet_pb_scom_es_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_n1_chiplet_realize(DeviceState *dev, Error **errp) +{ + PnvN1Chiplet *n1_chiplet = PNV_N1_CHIPLET(dev); + + /* Realize nest pervasive common chiplet model */ + if (!qdev_realize(DEVICE(&n1_chiplet->nest_pervasive), NULL, errp)) { + return; + } + + /* Nest1 chiplet power bus EQ xscom region */ + pnv_xscom_region_init(&n1_chiplet->xscom_pb_eq_mr, OBJECT(n1_chiplet), + &pnv_n1_chiplet_pb_scom_eq_ops, n1_chiplet, + "xscom-n1-chiplet-pb-scom-eq", + PNV10_XSCOM_N1_PB_SCOM_EQ_SIZE); + + /* Nest1 chiplet power bus ES xscom region */ + pnv_xscom_region_init(&n1_chiplet->xscom_pb_es_mr, OBJECT(n1_chiplet), + &pnv_n1_chiplet_pb_scom_es_ops, n1_chiplet, + "xscom-n1-chiplet-pb-scom-es", + PNV10_XSCOM_N1_PB_SCOM_ES_SIZE); +} + +static void pnv_n1_chiplet_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PowerNV n1 chiplet"; + dc->realize = pnv_n1_chiplet_realize; +} + +static void pnv_n1_chiplet_instance_init(Object *obj) +{ + PnvN1Chiplet *n1_chiplet = PNV_N1_CHIPLET(obj); + + object_initialize_child(OBJECT(n1_chiplet), "nest-pervasive-common", + &n1_chiplet->nest_pervasive, + TYPE_PNV_NEST_CHIPLET_PERVASIVE); +} + +static const TypeInfo pnv_n1_chiplet_info = { + .name = TYPE_PNV_N1_CHIPLET, + .parent = TYPE_DEVICE, + .instance_init = pnv_n1_chiplet_instance_init, + .instance_size = sizeof(PnvN1Chiplet), + .class_init = pnv_n1_chiplet_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_n1_chiplet_register_types(void) +{ + type_register_static(&pnv_n1_chiplet_info); +} + +type_init(pnv_n1_chiplet_register_types); diff --git a/hw/ppc/pnv_nest_pervasive.c b/hw/ppc/pnv_nest_pervasive.c new file mode 100644 index 00000000000..77476753a40 --- /dev/null +++ b/hw/ppc/pnv_nest_pervasive.c @@ -0,0 +1,208 @@ +/* + * QEMU PowerPC nest pervasive common chiplet model + * + * Copyright (c) 2023, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/ppc/pnv_nest_pervasive.h" + +/* + * Status, configuration, and control units in POWER chips is provided + * by the pervasive subsystem, which connects registers to the SCOM bus, + * which can be programmed by processor cores, other units on the chip, + * BMCs, or other POWER chips. + * + * A POWER10 chip is divided into logical units called chiplets. Chiplets + * are broadly divided into "core chiplets" (with the processor cores) and + * "nest chiplets" (with everything else). Each chiplet has an attachment + * to the pervasive bus (PIB) and with chiplet-specific registers. + * All nest chiplets have a common basic set of registers. + * + * This model will provide the registers functionality for common registers of + * nest unit (PB Chiplet, PCI Chiplets, MC Chiplet, PAU Chiplets) + * + * Currently this model provide the read/write functionality of chiplet control + * scom registers. + */ + +#define CPLT_CONF0 0x08 +#define CPLT_CONF0_OR 0x18 +#define CPLT_CONF0_CLEAR 0x28 +#define CPLT_CONF1 0x09 +#define CPLT_CONF1_OR 0x19 +#define CPLT_CONF1_CLEAR 0x29 +#define CPLT_STAT0 0x100 +#define CPLT_MASK0 0x101 +#define CPLT_PROTECT_MODE 0x3FE +#define CPLT_ATOMIC_CLOCK 0x3FF + +static uint64_t pnv_chiplet_ctrl_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvNestChipletPervasive *nest_pervasive = PNV_NEST_CHIPLET_PERVASIVE( + opaque); + uint32_t reg = addr >> 3; + uint64_t val = ~0ull; + + /* CPLT_CTRL0 to CPLT_CTRL5 */ + for (int i = 0; i < PNV_CPLT_CTRL_SIZE; i++) { + if (reg == i) { + return nest_pervasive->control_regs.cplt_ctrl[i]; + } else if ((reg == (i + 0x10)) || (reg == (i + 0x20))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Write only register, ignoring " + "xscom read at 0x%" PRIx32 "\n", + __func__, reg); + return val; + } + } + + switch (reg) { + case CPLT_CONF0: + val = nest_pervasive->control_regs.cplt_cfg0; + break; + case CPLT_CONF0_OR: + case CPLT_CONF0_CLEAR: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Write only register, ignoring " + "xscom read at 0x%" PRIx32 "\n", + __func__, reg); + break; + case CPLT_CONF1: + val = nest_pervasive->control_regs.cplt_cfg1; + break; + case CPLT_CONF1_OR: + case CPLT_CONF1_CLEAR: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Write only register, ignoring " + "xscom read at 0x%" PRIx32 "\n", + __func__, reg); + break; + case CPLT_STAT0: + val = nest_pervasive->control_regs.cplt_stat0; + break; + case CPLT_MASK0: + val = nest_pervasive->control_regs.cplt_mask0; + break; + case CPLT_PROTECT_MODE: + val = nest_pervasive->control_regs.ctrl_protect_mode; + break; + case CPLT_ATOMIC_CLOCK: + val = nest_pervasive->control_regs.ctrl_atomic_lock; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Chiplet_control_regs: Invalid xscom " + "read at 0x%" PRIx32 "\n", __func__, reg); + } + return val; +} + +static void pnv_chiplet_ctrl_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvNestChipletPervasive *nest_pervasive = PNV_NEST_CHIPLET_PERVASIVE( + opaque); + uint32_t reg = addr >> 3; + + /* CPLT_CTRL0 to CPLT_CTRL5 */ + for (int i = 0; i < PNV_CPLT_CTRL_SIZE; i++) { + if (reg == i) { + nest_pervasive->control_regs.cplt_ctrl[i] = val; + return; + } else if (reg == (i + 0x10)) { + nest_pervasive->control_regs.cplt_ctrl[i] |= val; + return; + } else if (reg == (i + 0x20)) { + nest_pervasive->control_regs.cplt_ctrl[i] &= ~val; + return; + } + } + + switch (reg) { + case CPLT_CONF0: + nest_pervasive->control_regs.cplt_cfg0 = val; + break; + case CPLT_CONF0_OR: + nest_pervasive->control_regs.cplt_cfg0 |= val; + break; + case CPLT_CONF0_CLEAR: + nest_pervasive->control_regs.cplt_cfg0 &= ~val; + break; + case CPLT_CONF1: + nest_pervasive->control_regs.cplt_cfg1 = val; + break; + case CPLT_CONF1_OR: + nest_pervasive->control_regs.cplt_cfg1 |= val; + break; + case CPLT_CONF1_CLEAR: + nest_pervasive->control_regs.cplt_cfg1 &= ~val; + break; + case CPLT_STAT0: + nest_pervasive->control_regs.cplt_stat0 = val; + break; + case CPLT_MASK0: + nest_pervasive->control_regs.cplt_mask0 = val; + break; + case CPLT_PROTECT_MODE: + nest_pervasive->control_regs.ctrl_protect_mode = val; + break; + case CPLT_ATOMIC_CLOCK: + nest_pervasive->control_regs.ctrl_atomic_lock = val; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Chiplet_control_regs: Invalid xscom " + "write at 0x%" PRIx32 "\n", + __func__, reg); + } +} + +static const MemoryRegionOps pnv_nest_pervasive_control_xscom_ops = { + .read = pnv_chiplet_ctrl_read, + .write = pnv_chiplet_ctrl_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_nest_pervasive_realize(DeviceState *dev, Error **errp) +{ + PnvNestChipletPervasive *nest_pervasive = PNV_NEST_CHIPLET_PERVASIVE(dev); + + /* Chiplet control scoms */ + pnv_xscom_region_init(&nest_pervasive->xscom_ctrl_regs_mr, + OBJECT(nest_pervasive), + &pnv_nest_pervasive_control_xscom_ops, + nest_pervasive, "pervasive-control", + PNV10_XSCOM_CHIPLET_CTRL_REGS_SIZE); +} + +static void pnv_nest_pervasive_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PowerNV nest pervasive chiplet"; + dc->realize = pnv_nest_pervasive_realize; +} + +static const TypeInfo pnv_nest_pervasive_info = { + .name = TYPE_PNV_NEST_CHIPLET_PERVASIVE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvNestChipletPervasive), + .class_init = pnv_nest_pervasive_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_nest_pervasive_register_types(void) +{ + type_register_static(&pnv_nest_pervasive_info); +} + +type_init(pnv_nest_pervasive_register_types); diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c index 805b1d0c874..a17816d0722 100644 --- a/hw/ppc/pnv_xscom.c +++ b/hw/ppc/pnv_xscom.c @@ -44,15 +44,12 @@ static void xscom_complete(CPUState *cs, uint64_t hmer_bits) * passed for the cpu, and no CPU completion is generated. */ if (cs) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; - /* * TODO: Need a CPU helper to set HMER, also handle generation * of HMIs */ cpu_synchronize_state(cs); - env->spr[SPR_HMER] |= hmer_bits; + cpu_env(cs)->spr[SPR_HMER] |= hmer_bits; } } diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index be167710a35..e6fa5580c01 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -47,7 +47,7 @@ void ppc_set_irq(PowerPCCPU *cpu, int irq, int level) unsigned int old_pending; /* We may already have the BQL if coming from the reset path */ - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); old_pending = env->pending_interrupts; @@ -314,7 +314,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val) { PowerPCCPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); switch ((val >> 28) & 0x3) { case 0x0: @@ -334,7 +334,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val) break; } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* PowerPC 40x internal IRQ controller */ @@ -633,6 +633,16 @@ void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value) ((uint64_t)value << 32) | tb); } +void cpu_ppc_increase_tb_by_offset(CPUPPCState *env, int64_t offset) +{ + env->tb_env->tb_offset += offset; +} + +void cpu_ppc_decrease_tb_by_offset(CPUPPCState *env, int64_t offset) +{ + env->tb_env->tb_offset -= offset; +} + uint64_t cpu_ppc_load_vtb(CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; @@ -1066,7 +1076,7 @@ const VMStateDescription vmstate_ppc_timebase = { .version_id = 1, .minimum_version_id = 1, .pre_save = timebase_pre_save, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT64(guest_timebase, PPCTimebase), VMSTATE_INT64(time_of_the_day_ns, PPCTimebase), VMSTATE_END_OF_LIST() diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c index a189942de4c..e18f57efce9 100644 --- a/hw/ppc/ppc440_bamboo.c +++ b/hw/ppc/ppc440_bamboo.c @@ -24,6 +24,7 @@ #include "elf.h" #include "hw/char/serial.h" #include "hw/ppc/ppc.h" +#include "hw/pci-host/ppc4xx.h" #include "sysemu/sysemu.h" #include "sysemu/reset.h" #include "hw/sysbus.h" @@ -161,7 +162,6 @@ static void bamboo_init(MachineState *machine) DeviceState *uicdev; SysBusDevice *uicsbd; int success; - int i; if (kvm_enabled()) { error_report("machine %s does not support the KVM accelerator", @@ -234,14 +234,11 @@ static void bamboo_init(MachineState *machine) } if (pcibus) { - /* Register network interfaces. */ - for (i = 0; i < nb_nics; i++) { - /* - * There are no PCI NICs on the Bamboo board, but there are - * PCI slots, so we can pick whatever default model we want. - */ - pci_nic_init_nofail(&nd_table[i], pcibus, mc->default_nic, NULL); - } + /* + * There are no PCI NICs on the Bamboo board, but there are + * PCI slots, so we can pick whatever default model we want. + */ + pci_init_nic_devices(pcibus, mc->default_nic); } /* Load kernel. */ diff --git a/hw/ppc/ppc440_uc.c b/hw/ppc/ppc440_uc.c index 7d6ca703875..1312aa2080e 100644 --- a/hw/ppc/ppc440_uc.c +++ b/hw/ppc/ppc440_uc.c @@ -14,6 +14,7 @@ #include "qemu/log.h" #include "hw/irq.h" #include "hw/ppc/ppc4xx.h" +#include "hw/pci-host/ppc4xx.h" #include "hw/qdev-properties.h" #include "hw/pci/pci.h" #include "sysemu/reset.h" diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c index bbce63e8a42..dfbe759481a 100644 --- a/hw/ppc/ppce500_spin.c +++ b/hw/ppc/ppce500_spin.c @@ -90,8 +90,7 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env, static void spin_kick(CPUState *cs, run_on_cpu_data data) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); SpinInfo *curspin = data.host_ptr; hwaddr map_size = 64 * MiB; hwaddr map_start; diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index 137276bcb92..4eb54770690 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -241,7 +241,6 @@ static void ibm_40p_init(MachineState *machine) ISADevice *isa_dev; ISABus *isa_bus; void *fw_cfg; - int i; uint32_t kernel_base = 0, initrd_base = 0; long kernel_size = 0, initrd_size = 0; char boot_device; @@ -279,9 +278,9 @@ static void ibm_40p_init(MachineState *machine) /* PCI -> ISA bridge */ i82378_dev = DEVICE(pci_new(PCI_DEVFN(11, 0), "i82378")); + qdev_realize_and_unref(i82378_dev, BUS(pci_bus), &error_fatal); qdev_connect_gpio_out(i82378_dev, 0, qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT)); - qdev_realize_and_unref(i82378_dev, BUS(pci_bus), &error_fatal); sysbus_connect_irq(pcihost, 0, qdev_get_gpio_in(i82378_dev, 15)); isa_bus = ISA_BUS(qdev_get_child_bus(i82378_dev, "isa.0")); @@ -336,10 +335,9 @@ static void ibm_40p_init(MachineState *machine) /* XXX: s3-trio at PCI_DEVFN(2, 0) */ pci_vga_init(pci_bus); - for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, - i == 0 ? "3" : NULL); - } + /* First PCNET device at PCI_DEVFN(3, 0) */ + pci_init_nic_in_slot(pci_bus, mc->default_nic, NULL, "3"); + pci_init_nic_devices(pci_bus, mc->default_nic); } /* Prepare firmware configuration for OpenBIOS */ diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c index c96cefb13d5..4d3a251ed82 100644 --- a/hw/ppc/prep_systemio.c +++ b/hw/ppc/prep_systemio.c @@ -277,7 +277,7 @@ static const VMStateDescription vmstate_prep_systemio = { .name = "prep_systemio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(sreset, PrepSystemIoState), VMSTATE_UINT8(system_control, PrepSystemIoState), VMSTATE_UINT8(iomap_type, PrepSystemIoState), diff --git a/hw/ppc/rs6000_mc.c b/hw/ppc/rs6000_mc.c index c0bc212e924..e6ec4b4c406 100644 --- a/hw/ppc/rs6000_mc.c +++ b/hw/ppc/rs6000_mc.c @@ -143,7 +143,6 @@ static void rs6000mc_realize(DeviceState *dev, Error **errp) RS6000MCState *s = RS6000MC(dev); int socket = 0; unsigned int ram_size = s->ram_size / MiB; - Error *local_err = NULL; while (socket < 6) { if (ram_size >= 64) { @@ -165,10 +164,8 @@ static void rs6000mc_realize(DeviceState *dev, Error **errp) if (s->simm_size[socket]) { char name[] = "simm.?"; name[5] = socket + '0'; - memory_region_init_ram(&s->simm[socket], OBJECT(dev), name, - s->simm_size[socket] * MiB, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram(&s->simm[socket], OBJECT(dev), name, + s->simm_size[socket] * MiB, errp)) { return; } memory_region_add_subregion_overlap(get_system_memory(), 0, @@ -202,7 +199,7 @@ static const VMStateDescription vmstate_rs6000mc = { .name = "rs6000-mc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(port0820_index, RS6000MCState), VMSTATE_END_OF_LIST() }, diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c index 1e615b8d355..d42b6778987 100644 --- a/hw/ppc/sam460ex.c +++ b/hw/ppc/sam460ex.c @@ -25,6 +25,7 @@ #include "elf.h" #include "exec/memory.h" #include "ppc440.h" +#include "hw/pci-host/ppc4xx.h" #include "hw/block/flash.h" #include "sysemu/sysemu.h" #include "sysemu/reset.h" @@ -32,6 +33,7 @@ #include "hw/char/serial.h" #include "hw/i2c/ppc4xx_i2c.h" #include "hw/i2c/smbus_eeprom.h" +#include "hw/ide/pci.h" #include "hw/usb/hcd-ehci.h" #include "hw/ppc/fdt.h" #include "hw/qdev-properties.h" @@ -273,6 +275,7 @@ static void sam460ex_init(MachineState *machine) DeviceState *uic[4]; int i; PCIBus *pci_bus; + USBBus *usb_bus; PowerPCCPU *cpu; CPUPPCState *env; I2CBus *i2c; @@ -420,8 +423,10 @@ static void sam460ex_init(MachineState *machine) sysbus_realize_and_unref(sbdev, &error_fatal); sysbus_mmio_map(sbdev, 0, 0x4bffd0000); sysbus_connect_irq(sbdev, 0, qdev_get_gpio_in(uic[2], 30)); - usb_create_simple(usb_bus_find(-1), "usb-kbd"); - usb_create_simple(usb_bus_find(-1), "usb-mouse"); + usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS, + &error_abort)); + usb_create_simple(usb_bus, "usb-kbd"); + usb_create_simple(usb_bus, "usb-mouse"); /* PCIe buses */ dev = qdev_new(TYPE_PPC460EX_PCIE_HOST); @@ -445,15 +450,27 @@ static void sam460ex_init(MachineState *machine) /* PCI devices */ pci_create_simple(pci_bus, PCI_DEVFN(6, 0), "sm501"); - /* SoC has a single SATA port but we don't emulate that yet + /* + * SoC has a single SATA port but we don't emulate that * However, firmware and usual clients have driver for SiI311x - * so add one for convenience by default */ + * PCI SATA card so add one for convenience by default + */ if (defaults_enabled()) { - pci_create_simple(pci_bus, -1, "sii3112"); + PCIIDEState *s = PCI_IDE(pci_create_simple(pci_bus, -1, "sii3112")); + DriveInfo *di; + + di = drive_get_by_index(IF_IDE, 0); + if (di) { + ide_bus_create_drive(&s->bus[0], 0, di); + } + /* Use index 2 only if 1 does not exist, this allows -cdrom */ + di = drive_get_by_index(IF_IDE, 1) ?: drive_get_by_index(IF_IDE, 2); + if (di) { + ide_bus_create_drive(&s->bus[1], 0, di); + } } - /* SoC has 4 UARTs - * but board has only one wired and two are present in fdt */ + /* SoC has 4 UARTs but board has only one wired and two described in fdt */ if (serial_hd(0) != NULL) { serial_mm_init(get_system_memory(), 0x4ef600300, 0, qdev_get_gpio_in(uic[1], 1), @@ -527,6 +544,7 @@ static void sam460ex_machine_init(MachineClass *mc) { mc->desc = "aCube Sam460ex"; mc->init = sam460ex_init; + mc->block_default_type = IF_IDE; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("460exb"); mc->default_ram_size = 512 * MiB; mc->default_ram_id = "ppc4xx.sdram"; diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index df09aa9d6a0..e9bc97fee08 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -35,6 +35,7 @@ #include "sysemu/sysemu.h" #include "sysemu/hostmem.h" #include "sysemu/numa.h" +#include "sysemu/tcg.h" #include "sysemu/qtest.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" @@ -152,7 +153,7 @@ static const VMStateDescription pre_2_10_vmstate_dummy_icp = { .version_id = 1, .minimum_version_id = 1, .needed = pre_2_10_vmstate_dummy_icp_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UNUSED(4), /* uint32_t xirr */ VMSTATE_UNUSED(1), /* uint8_t pending_priority */ VMSTATE_UNUSED(1), /* uint8_t mfrr */ @@ -233,29 +234,66 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr, PowerPCCPU *cpu, void *fdt, int offset) { + /* + * SSO (SAO) ordering is supported on KVM and thread=single hosts, + * but not MTTCG, so disable it. To advertise it, a cap would have + * to be added, or support implemented for MTTCG. + * + * Copy/paste is not supported by TCG, so it is not advertised. KVM + * can execute them but it has no accelerator drivers which are usable, + * so there isn't much need for it anyway. + */ + + /* These should be kept in sync with pnv */ uint8_t pa_features_206[] = { 6, 0, - 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 }; + 0xf6, 0x1f, 0xc7, 0x00, 0x00, 0xc0 }; uint8_t pa_features_207[] = { 24, 0, - 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, + 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 }; uint8_t pa_features_300[] = { 66, 0, /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ - /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */ - 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */ + /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */ + 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */ + /* 6: DS207 */ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ + /* 16: Vector */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ + /* 18: Vec. Scalar, 20: Vec. XOR */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ + /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ + /* 32: LE atomic, 34: EBB + ext EBB */ + 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ + /* 40: Radix MMU */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */ + /* 42: PM, 44: PC RA, 46: SC vec'd */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ + /* 48: SIMD, 50: QP BFP, 52: String */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ + /* 54: DecFP, 56: DecI, 58: SHA */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ + /* 60: NM atomic, 62: RNG */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ + }; + /* 3.1 removes SAO, HTM support */ + uint8_t pa_features_31[] = { 74, 0, + /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ + /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */ + 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */ /* 6: DS207 */ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ /* 16: Vector */ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ - /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ + /* 18: Vec. Scalar, 20: Vec. XOR */ 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ - /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */ - 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ - /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */ - 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */ + /* 32: LE atomic, 34: EBB + ext EBB */ + 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ + /* 40: Radix MMU */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */ /* 42: PM, 44: PC RA, 46: SC vec'd */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ /* 48: SIMD, 50: QP BFP, 52: String */ @@ -264,6 +302,10 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ /* 60: NM atomic, 62: RNG */ 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ + /* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */ + 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */ + /* 72: [P]HASHST/[P]HASHCHK */ + 0x80, 0x00, /* 72 - 73 */ }; uint8_t *pa_features = NULL; size_t pa_size; @@ -280,6 +322,10 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr, pa_features = pa_features_300; pa_size = sizeof(pa_features_300); } + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, cpu->compat_pvr)) { + pa_features = pa_features_31; + pa_size = sizeof(pa_features_31); + } if (!pa_features) { return; } @@ -1304,7 +1350,7 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, CPUPPCState *env = &cpu->env; /* The TCG path should also be holding the BQL at this point */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); g_assert(!vhyp_cpu_in_nested(cpu)); @@ -1362,7 +1408,6 @@ void spapr_init_all_lpcrs(target_ulong value, target_ulong mask) } } - static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry) { @@ -1375,33 +1420,16 @@ static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu, /* Copy PATE1:GR into PATE0:HR */ entry->dw0 = spapr->patb_entry & PATE0_HR; entry->dw1 = spapr->patb_entry; - + return true; } else { - uint64_t patb, pats; - - assert(lpid != 0); - - patb = spapr->nested_ptcr & PTCR_PATB; - pats = spapr->nested_ptcr & PTCR_PATS; - - /* Check if partition table is properly aligned */ - if (patb & MAKE_64BIT_MASK(0, pats + 12)) { - return false; - } - - /* Calculate number of entries */ - pats = 1ull << (pats + 12 - 4); - if (pats <= lpid) { - return false; + if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { + return spapr_get_pate_nested_hv(spapr, cpu, lpid, entry); + } else if (spapr_nested_api(spapr) == NESTED_API_PAPR) { + return spapr_get_pate_nested_papr(spapr, cpu, lpid, entry); + } else { + g_assert_not_reached(); } - - /* Grab entry */ - patb += 16 * lpid; - entry->dw0 = ldq_phys(CPU(cpu)->as, patb); - entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8); } - - return true; } #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) @@ -1689,6 +1717,7 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason) pef_kvm_reset(machine->cgs, &error_fatal); spapr_caps_apply(spapr); + spapr_nested_reset(spapr); first_ppc_cpu = POWERPC_CPU(first_cpu); if (kvm_enabled() && kvmppc_has_cap_mmu_radix() && @@ -1919,7 +1948,7 @@ static const VMStateDescription vmstate_spapr_event_entry = { .name = "spapr_event_log_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(summary, SpaprEventLogEntry), VMSTATE_UINT32(extended_length, SpaprEventLogEntry), VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0, @@ -1933,7 +1962,7 @@ static const VMStateDescription vmstate_spapr_pending_events = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_pending_events_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1, vmstate_spapr_event_entry, SpaprEventLogEntry, next), VMSTATE_END_OF_LIST() @@ -1989,7 +2018,7 @@ static const VMStateDescription vmstate_spapr_ov5_cas = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_ov5_cas_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1, vmstate_spapr_ovec, SpaprOptionVector), VMSTATE_END_OF_LIST() @@ -2008,7 +2037,7 @@ static const VMStateDescription vmstate_spapr_patb_entry = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_patb_entry_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(patb_entry, SpaprMachineState), VMSTATE_END_OF_LIST() }, @@ -2026,7 +2055,7 @@ static const VMStateDescription vmstate_spapr_irq_map = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_irq_map_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr), VMSTATE_END_OF_LIST() }, @@ -2056,7 +2085,7 @@ static const VMStateDescription vmstate_spapr_dtb = { .minimum_version_id = 1, .needed = spapr_dtb_needed, .pre_load = spapr_dtb_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(fdt_initial_size, SpaprMachineState), VMSTATE_UINT32(fdt_size, SpaprMachineState), VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL, @@ -2094,7 +2123,7 @@ static const VMStateDescription vmstate_spapr_fwnmi = { .minimum_version_id = 1, .needed = spapr_fwnmi_needed, .pre_save = spapr_fwnmi_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState), VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState), VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState), @@ -2109,7 +2138,7 @@ static const VMStateDescription vmstate_spapr = { .pre_load = spapr_pre_load, .post_load = spapr_post_load, .pre_save = spapr_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* used to be @next_irq */ VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4), @@ -2119,7 +2148,7 @@ static const VMStateDescription vmstate_spapr = { VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_spapr_ov5_cas, &vmstate_spapr_patb_entry, &vmstate_spapr_pending_events, @@ -2138,6 +2167,7 @@ static const VMStateDescription vmstate_spapr = { &vmstate_spapr_cap_fwnmi, &vmstate_spapr_fwnmi, &vmstate_spapr_cap_rpt_invalidate, + &vmstate_spapr_cap_nested_papr, NULL } }; @@ -2796,6 +2826,7 @@ static void spapr_machine_init(MachineState *machine) MemoryRegion *sysmem = get_system_memory(); long load_limit, fw_size; Error *resize_hpt_err = NULL; + NICInfo *nd; if (!filename) { error_report("Could not find LPAR firmware '%s'", bios_name); @@ -2996,21 +3027,12 @@ static void spapr_machine_init(MachineState *machine) phb = spapr_create_default_phb(); - for (i = 0; i < nb_nics; i++) { - NICInfo *nd = &nd_table[i]; - - if (!nd->model) { - nd->model = g_strdup("spapr-vlan"); - } - - if (g_str_equal(nd->model, "spapr-vlan") || - g_str_equal(nd->model, "ibmveth")) { - spapr_vlan_create(spapr->vio_bus, nd); - } else { - pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); - } + while ((nd = qemu_find_nic_info("spapr-vlan", true, "ibmveth"))) { + spapr_vlan_create(spapr->vio_bus, nd); } + pci_init_nic_devices(phb->bus, NULL); + for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { spapr_vscsi_create(spapr->vio_bus); } @@ -3032,8 +3054,10 @@ static void spapr_machine_init(MachineState *machine) } if (has_vga) { - USBBus *usb_bus = usb_bus_find(-1); + USBBus *usb_bus; + usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS, + &error_abort)); usb_create_simple(usb_bus, "usb-kbd"); usb_create_simple(usb_bus, "usb-mouse"); } @@ -3487,8 +3511,7 @@ static void spapr_machine_finalizefn(Object *obj) void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg) { SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); cpu_synchronize_state(cs); /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */ @@ -3985,7 +4008,6 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev) SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev)); CPUCore *cc = CPU_CORE(dev); - CPUState *cs; SpaprDrc *drc; CPUArchId *core_slot; int index; @@ -4019,7 +4041,7 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev) } } - core_slot->cpu = OBJECT(dev); + core_slot->cpu = CPU(dev); /* * Set compatibility mode to match the boot CPU, which was either set @@ -4035,7 +4057,7 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev) if (smc->pre_2_10_has_unused_icps) { for (i = 0; i < cc->nr_threads; i++) { - cs = CPU(core->threads[i]); + CPUState *cs = CPU(core->threads[i]); pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index); } } @@ -4647,13 +4669,10 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_SCSI; /* - * Setting max_cpus to INT32_MAX. Both KVM and TCG max_cpus values - * should be limited by the host capability instead of hardcoded. - * max_cpus for KVM guests will be checked in kvm_init(), and TCG - * guests are welcome to have as many CPUs as the host are capable - * of emulate. + * While KVM determines max cpus in kvm_init() using kvm_max_vcpus(), + * In TCG the limit is restricted by the range of CPU IPIs available. */ - mc->max_cpus = INT32_MAX; + mc->max_cpus = SPAPR_IRQ_NR_IPIS; mc->no_parallel = 1; mc->default_boot_order = ""; @@ -4675,7 +4694,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) smc->dr_lmb_enabled = true; smc->update_dt_enabled = true; - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2"); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0"); mc->has_hotpluggable_cpus = true; mc->nvdimm_supported = true; smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; @@ -4713,6 +4732,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND; smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */ smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF; + smc->default_caps.caps[SPAPR_CAP_NESTED_PAPR] = SPAPR_CAP_OFF; smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON; @@ -4785,15 +4805,26 @@ static void spapr_machine_latest_class_options(MachineClass *mc) } \ type_init(spapr_machine_register_##suffix) +/* + * pseries-9.0 + */ +static void spapr_machine_9_0_class_options(MachineClass *mc) +{ + /* Defaults for the latest behaviour inherited from the base class */ +} + +DEFINE_SPAPR_MACHINE(9_0, "9.0", true); + /* * pseries-8.2 */ static void spapr_machine_8_2_class_options(MachineClass *mc) { - /* Defaults for the latest behaviour inherited from the base class */ + spapr_machine_9_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_SPAPR_MACHINE(8_2, "8.2", true); +DEFINE_SPAPR_MACHINE(8_2, "8.2", false); /* * pseries-8.1 @@ -5083,6 +5114,7 @@ static void spapr_machine_2_11_class_options(MachineClass *mc) spapr_machine_2_12_class_options(mc); smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON; compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); + mc->deprecation_reason = "old and not maintained - use a 2.12+ version"; } DEFINE_SPAPR_MACHINE(2_11, "2.11", false); diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index 5a0755d34fb..0a15415a1d0 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -194,8 +194,7 @@ static void cap_htm_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) static void cap_vsx_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { ERRP_GUARD(); - PowerPCCPU *cpu = POWERPC_CPU(first_cpu); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(first_cpu); if (!val) { /* TODO: We don't support disabling vsx yet */ @@ -213,14 +212,12 @@ static void cap_vsx_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) static void cap_dfp_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) { ERRP_GUARD(); - PowerPCCPU *cpu = POWERPC_CPU(first_cpu); - CPUPPCState *env = &cpu->env; if (!val) { /* TODO: We don't support disabling dfp yet */ return; } - if (!(env->insns_flags2 & PPC2_DFP)) { + if (!(cpu_env(first_cpu)->insns_flags2 & PPC2_DFP)) { error_setg(errp, "DFP support not available"); error_append_hint(errp, "Try appending -machine cap-dfp=off\n"); } @@ -487,6 +484,50 @@ static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr, error_append_hint(errp, "Try appending -machine cap-nested-hv=off " "or use threads=1 with -smp\n"); } + if (spapr_nested_api(spapr) && + spapr_nested_api(spapr) != NESTED_API_KVM_HV) { + error_setg(errp, "Nested-HV APIs are mutually exclusive"); + error_append_hint(errp, "Please use either cap-nested-hv or " + "cap-nested-papr to proceed.\n"); + return; + } else { + spapr->nested.api = NESTED_API_KVM_HV; + } + } +} + +static void cap_nested_papr_apply(SpaprMachineState *spapr, + uint8_t val, Error **errp) +{ + ERRP_GUARD(); + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + CPUPPCState *env = &cpu->env; + + if (!val) { + /* capability disabled by default */ + return; + } + + if (tcg_enabled()) { + if (!(env->insns_flags2 & PPC2_ISA300)) { + error_setg(errp, "Nested-PAPR only supported on POWER9 and later"); + error_append_hint(errp, + "Try appending -machine cap-nested-papr=off\n"); + return; + } + if (spapr_nested_api(spapr) && + spapr_nested_api(spapr) != NESTED_API_PAPR) { + error_setg(errp, "Nested-HV APIs are mutually exclusive"); + error_append_hint(errp, "Please use either cap-nested-hv or " + "cap-nested-papr to proceed.\n"); + return; + } else { + spapr->nested.api = NESTED_API_PAPR; + } + } else if (kvm_enabled()) { + error_setg(errp, "KVM implementation does not support Nested-PAPR"); + error_append_hint(errp, + "Try appending -machine cap-nested-papr=off\n"); } } @@ -735,6 +776,15 @@ SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = { .type = "bool", .apply = cap_nested_kvm_hv_apply, }, + [SPAPR_CAP_NESTED_PAPR] = { + .name = "nested-papr", + .description = "Allow Nested HV (PAPR API)", + .index = SPAPR_CAP_NESTED_PAPR, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_nested_papr_apply, + }, [SPAPR_CAP_LARGE_DECREMENTER] = { .name = "large-decr", .description = "Allow Large Decrementer", @@ -904,7 +954,7 @@ const VMStateDescription vmstate_spapr_cap_##sname = { \ .version_id = 1, \ .minimum_version_id = 1, \ .needed = spapr_cap_##sname##_needed, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT8(mig.caps[cap], \ SpaprMachineState), \ VMSTATE_END_OF_LIST() \ @@ -919,6 +969,7 @@ SPAPR_CAP_MIG_STATE(sbbc, SPAPR_CAP_SBBC); SPAPR_CAP_MIG_STATE(ibs, SPAPR_CAP_IBS); SPAPR_CAP_MIG_STATE(hpt_maxpagesize, SPAPR_CAP_HPT_MAXPAGESIZE); SPAPR_CAP_MIG_STATE(nested_kvm_hv, SPAPR_CAP_NESTED_KVM_HV); +SPAPR_CAP_MIG_STATE(nested_papr, SPAPR_CAP_NESTED_PAPR); SPAPR_CAP_MIG_STATE(large_decr, SPAPR_CAP_LARGE_DECREMENTER); SPAPR_CAP_MIG_STATE(ccf_assist, SPAPR_CAP_CCF_ASSIST); SPAPR_CAP_MIG_STATE(fwnmi, SPAPR_CAP_FWNMI); diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 91fae56573e..e7c9edd033c 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -39,9 +39,13 @@ static void spapr_reset_vcpu(PowerPCCPU *cpu) /* * "PowerPC Processor binding to IEEE 1275" defines the initial MSR state - * as 32bit (MSR_SF=0) in "8.2.1. Initial Register Values". + * as 32bit (MSR_SF=0) with MSR_ME=1 and MSR_FP=1 in "8.2.1. Initial + * Register Values". This can also be found in "LoPAPR 1.1" "C.9.2.1 + * Initial Register Values". */ env->msr &= ~(1ULL << MSR_SF); + env->msr |= (1ULL << MSR_ME) | (1ULL << MSR_FP); + env->spr[SPR_HIOR] = 0; lpcr = env->spr[SPR_LPCR]; @@ -127,7 +131,7 @@ static const VMStateDescription vmstate_spapr_cpu_slb_shadow = { .version_id = 1, .minimum_version_id = 1, .needed = slb_shadow_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(slb_shadow_addr, SpaprCpuState), VMSTATE_UINT64(slb_shadow_size, SpaprCpuState), VMSTATE_END_OF_LIST() @@ -146,7 +150,7 @@ static const VMStateDescription vmstate_spapr_cpu_dtl = { .version_id = 1, .minimum_version_id = 1, .needed = dtl_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(dtl_addr, SpaprCpuState), VMSTATE_UINT64(dtl_size, SpaprCpuState), VMSTATE_END_OF_LIST() @@ -165,11 +169,11 @@ static const VMStateDescription vmstate_spapr_cpu_vpa = { .version_id = 1, .minimum_version_id = 1, .needed = vpa_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(vpa_addr, SpaprCpuState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_spapr_cpu_slb_shadow, &vmstate_spapr_cpu_dtl, NULL @@ -180,10 +184,10 @@ static const VMStateDescription vmstate_spapr_cpu_state = { .name = "spapr_cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_spapr_cpu_vpa, NULL } @@ -245,8 +249,7 @@ static void spapr_cpu_core_unrealize(DeviceState *dev) * spapr_cpu_core_realize(), make sure we only unrealize * vCPUs that have already been realized. */ - if (object_property_get_bool(OBJECT(sc->threads[i]), "realized", - &error_abort)) { + if (qdev_is_realized(DEVICE(sc->threads[i]))) { spapr_unrealize_vcpu(sc->threads[i], sc); } spapr_delete_vcpu(sc->threads[i]); @@ -306,7 +309,7 @@ static PowerPCCPU *spapr_create_vcpu(SpaprCpuCore *sc, int i, Error **errp) * All CPUs start halted. CPU0 is unhalted from the machine level reset code * and the rest are explicitly started up by the guest using an RTAS call. */ - cs->start_powered_off = true; + qdev_prop_set_bit(DEVICE(obj), "start-powered-off", true); cs->cpu_index = cc->core_id + i; if (!spapr_set_vcpu_id(cpu, cs->cpu_index, errp)) { return NULL; @@ -389,16 +392,14 @@ static const TypeInfo spapr_cpu_core_type_infos[] = { DEFINE_SPAPR_CPU_CORE_TYPE("970_v2.2"), DEFINE_SPAPR_CPU_CORE_TYPE("970mp_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("970mp_v1.1"), - DEFINE_SPAPR_CPU_CORE_TYPE("power5+_v2.1"), + DEFINE_SPAPR_CPU_CORE_TYPE("power5p_v2.1"), DEFINE_SPAPR_CPU_CORE_TYPE("power7_v2.3"), - DEFINE_SPAPR_CPU_CORE_TYPE("power7+_v2.1"), + DEFINE_SPAPR_CPU_CORE_TYPE("power7p_v2.1"), DEFINE_SPAPR_CPU_CORE_TYPE("power8_v2.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power8e_v2.1"), DEFINE_SPAPR_CPU_CORE_TYPE("power8nvl_v1.0"), - DEFINE_SPAPR_CPU_CORE_TYPE("power9_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.2"), - DEFINE_SPAPR_CPU_CORE_TYPE("power10_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"), #ifdef CONFIG_KVM DEFINE_SPAPR_CPU_CORE_TYPE("host"), diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index 2b99d3b4b1a..1484e3209d9 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -471,7 +471,7 @@ static const VMStateDescription vmstate_spapr_drc_unplug_requested = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_drc_unplug_requested_needed, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_BOOL(unplug_requested, SpaprDrc), VMSTATE_END_OF_LIST() } @@ -504,11 +504,11 @@ static const VMStateDescription vmstate_spapr_drc = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_drc_needed, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(state, SpaprDrc), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_spapr_drc_unplug_requested, NULL } @@ -611,7 +611,7 @@ static const VMStateDescription vmstate_spapr_drc_physical = { .version_id = 1, .minimum_version_id = 1, .needed = drc_physical_needed, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(dr_indicator, SpaprDrcPhysical), VMSTATE_END_OF_LIST() } diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c index deb46415057..cb0eeee5874 100644 --- a/hw/ppc/spapr_events.c +++ b/hw/ppc/spapr_events.c @@ -899,7 +899,7 @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered) } return; } - qemu_cond_wait_iothread(&spapr->fwnmi_machine_check_interlock_cond); + qemu_cond_wait_bql(&spapr->fwnmi_machine_check_interlock_cond); if (spapr->fwnmi_machine_check_addr == -1) { /* * If the machine was reset while waiting for the interlock, diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 522a2396c7f..5e1d020e3df 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -8,7 +8,6 @@ #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/error-report.h" -#include "exec/exec-all.h" #include "exec/tb-flush.h" #include "helper_regs.h" #include "hw/ppc/ppc.h" @@ -124,9 +123,11 @@ static target_ulong h_resize_hpt_prepare(PowerPCCPU *cpu, if (kvm_enabled()) { return H_HARDWARE; + } else if (tcg_enabled()) { + return vhyp_mmu_resize_hpt_prepare(cpu, spapr, shift); + } else { + g_assert_not_reached(); } - - return softmmu_resize_hpt_prepare(cpu, spapr, shift); } static void do_push_sregs_to_kvm_pr(CPUState *cs, run_on_cpu_data data) @@ -192,9 +193,11 @@ static target_ulong h_resize_hpt_commit(PowerPCCPU *cpu, if (kvm_enabled()) { return H_HARDWARE; + } else if (tcg_enabled()) { + return vhyp_mmu_resize_hpt_commit(cpu, spapr, flags, shift); + } else { + g_assert_not_reached(); } - - return softmmu_resize_hpt_commit(cpu, spapr, flags, shift); } @@ -1522,6 +1525,28 @@ void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) *slot = fn; } +void spapr_unregister_hypercall(target_ulong opcode) +{ + spapr_hcall_fn *slot; + + if (opcode <= MAX_HCALL_OPCODE) { + assert((opcode & 0x3) == 0); + + slot = &papr_hypercall_table[opcode / 4]; + } else if (opcode >= SVM_HCALL_BASE && opcode <= SVM_HCALL_MAX) { + /* we only have SVM-related hcall numbers assigned in multiples of 4 */ + assert((opcode & 0x3) == 0); + + slot = &svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4]; + } else { + assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX)); + + slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; + } + + *slot = NULL; +} + target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, target_ulong *args) { @@ -1635,8 +1660,6 @@ static void hypercall_register_types(void) spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support); spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt); - - spapr_register_nested(); } type_init(hypercall_register_types) diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index 5e3973fc5fb..e3c01ef44f8 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -270,7 +270,7 @@ static const VMStateDescription vmstate_spapr_tce_table_ex = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_tce_table_ex_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(bus_offset, SpaprTceTable), VMSTATE_UINT32(page_shift, SpaprTceTable), VMSTATE_END_OF_LIST() @@ -283,7 +283,7 @@ static const VMStateDescription vmstate_spapr_tce_table = { .minimum_version_id = 2, .pre_save = spapr_tce_table_pre_save, .post_load = spapr_tce_table_post_load, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { /* Sanity check */ VMSTATE_UINT32_EQUAL(liobn, SpaprTceTable, NULL), @@ -296,7 +296,7 @@ static const VMStateDescription vmstate_spapr_tce_table = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_spapr_tce_table_ex, NULL } diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index a0d1e1298e1..97b2fc42ab0 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -23,6 +23,8 @@ #include "trace.h" +QEMU_BUILD_BUG_ON(SPAPR_IRQ_NR_IPIS > SPAPR_XIRQ_BASE); + static const TypeInfo spapr_intc_info = { .name = TYPE_SPAPR_INTC, .parent = TYPE_INTERFACE, @@ -329,7 +331,7 @@ void spapr_irq_init(SpaprMachineState *spapr, Error **errp) int i; dev = qdev_new(TYPE_SPAPR_XIVE); - qdev_prop_set_uint32(dev, "nr-irqs", smc->nr_xirqs + SPAPR_XIRQ_BASE); + qdev_prop_set_uint32(dev, "nr-irqs", smc->nr_xirqs + SPAPR_IRQ_NR_IPIS); /* * 8 XIVE END structures per CPU. One for each available * priority @@ -356,7 +358,7 @@ void spapr_irq_init(SpaprMachineState *spapr, Error **errp) } spapr->qirqs = qemu_allocate_irqs(spapr_set_irq, spapr, - smc->nr_xirqs + SPAPR_XIRQ_BASE); + smc->nr_xirqs + SPAPR_IRQ_NR_IPIS); /* * Mostly we don't actually need this until reset, except that not diff --git a/hw/ppc/spapr_nested.c b/hw/ppc/spapr_nested.c index 121aa96ddcd..c02785756c1 100644 --- a/hw/ppc/spapr_nested.c +++ b/hw/ppc/spapr_nested.c @@ -6,8 +6,85 @@ #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_cpu_core.h" #include "hw/ppc/spapr_nested.h" +#include "mmu-book3s-v3.h" +#include "cpu-models.h" +#include "qemu/log.h" + +void spapr_nested_reset(SpaprMachineState *spapr) +{ + if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) { + spapr_unregister_nested_hv(); + spapr_register_nested_hv(); + } else if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_PAPR)) { + spapr->nested.capabilities_set = false; + spapr_unregister_nested_papr(); + spapr_register_nested_papr(); + spapr_nested_gsb_init(); + } else { + spapr->nested.api = 0; + } +} + +uint8_t spapr_nested_api(SpaprMachineState *spapr) +{ + return spapr->nested.api; +} #ifdef CONFIG_TCG + +bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry) +{ + uint64_t patb, pats; + + assert(lpid != 0); + + patb = spapr->nested.ptcr & PTCR_PATB; + pats = spapr->nested.ptcr & PTCR_PATS; + + /* Check if partition table is properly aligned */ + if (patb & MAKE_64BIT_MASK(0, pats + 12)) { + return false; + } + + /* Calculate number of entries */ + pats = 1ull << (pats + 12 - 4); + if (pats <= lpid) { + return false; + } + + /* Grab entry */ + patb += 16 * lpid; + entry->dw0 = ldq_phys(CPU(cpu)->as, patb); + entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8); + return true; +} + +static +SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr, + target_ulong guestid) +{ + SpaprMachineStateNestedGuest *guest; + + guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); + return guest; +} + +bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry) +{ + SpaprMachineStateNestedGuest *guest; + assert(lpid != 0); + guest = spapr_get_nested_guest(spapr, lpid); + if (!guest) { + return false; + } + + entry->dw0 = guest->parttbl[0]; + entry->dw1 = guest->parttbl[1]; + return true; +} + #define PRTS_MASK 0x1f static target_ulong h_set_ptbl(PowerPCCPU *cpu, @@ -25,7 +102,7 @@ static target_ulong h_set_ptbl(PowerPCCPU *cpu, return H_PARAMETER; } - spapr->nested_ptcr = ptcr; /* Save new partition table */ + spapr->nested.ptcr = ptcr; /* Save new partition table */ return H_SUCCESS; } @@ -59,6 +136,7 @@ static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu, static void nested_save_state(struct nested_ppc_state *save, PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); memcpy(save->gpr, env->gpr, sizeof(save->gpr)); @@ -85,13 +163,79 @@ static void nested_save_state(struct nested_ppc_state *save, PowerPCCPU *cpu) save->pidr = env->spr[SPR_BOOKS_PID]; save->ppr = env->spr[SPR_PPR]; - save->tb_offset = env->tb_env->tb_offset; + if (spapr_nested_api(spapr) == NESTED_API_PAPR) { + save->amor = env->spr[SPR_AMOR]; + save->dawr0 = env->spr[SPR_DAWR0]; + save->dawrx0 = env->spr[SPR_DAWRX0]; + save->ciabr = env->spr[SPR_CIABR]; + save->purr = env->spr[SPR_PURR]; + save->spurr = env->spr[SPR_SPURR]; + save->ic = env->spr[SPR_IC]; + save->vtb = env->spr[SPR_VTB]; + save->hdar = env->spr[SPR_HDAR]; + save->hdsisr = env->spr[SPR_HDSISR]; + save->heir = env->spr[SPR_HEIR]; + save->asdr = env->spr[SPR_ASDR]; + save->dawr1 = env->spr[SPR_DAWR1]; + save->dawrx1 = env->spr[SPR_DAWRX1]; + save->dexcr = env->spr[SPR_DEXCR]; + save->hdexcr = env->spr[SPR_HDEXCR]; + save->hashkeyr = env->spr[SPR_HASHKEYR]; + save->hashpkeyr = env->spr[SPR_HASHPKEYR]; + memcpy(save->vsr, env->vsr, sizeof(save->vsr)); + save->ebbhr = env->spr[SPR_EBBHR]; + save->tar = env->spr[SPR_TAR]; + save->ebbrr = env->spr[SPR_EBBRR]; + save->bescr = env->spr[SPR_BESCR]; + save->iamr = env->spr[SPR_IAMR]; + save->amr = env->spr[SPR_AMR]; + save->uamor = env->spr[SPR_UAMOR]; + save->dscr = env->spr[SPR_DSCR]; + save->fscr = env->spr[SPR_FSCR]; + save->pspb = env->spr[SPR_PSPB]; + save->ctrl = env->spr[SPR_CTRL]; + save->vrsave = env->spr[SPR_VRSAVE]; + save->dar = env->spr[SPR_DAR]; + save->dsisr = env->spr[SPR_DSISR]; + save->pmc1 = env->spr[SPR_POWER_PMC1]; + save->pmc2 = env->spr[SPR_POWER_PMC2]; + save->pmc3 = env->spr[SPR_POWER_PMC3]; + save->pmc4 = env->spr[SPR_POWER_PMC4]; + save->pmc5 = env->spr[SPR_POWER_PMC5]; + save->pmc6 = env->spr[SPR_POWER_PMC6]; + save->mmcr0 = env->spr[SPR_POWER_MMCR0]; + save->mmcr1 = env->spr[SPR_POWER_MMCR1]; + save->mmcr2 = env->spr[SPR_POWER_MMCR2]; + save->mmcra = env->spr[SPR_POWER_MMCRA]; + save->sdar = env->spr[SPR_POWER_SDAR]; + save->siar = env->spr[SPR_POWER_SIAR]; + save->sier = env->spr[SPR_POWER_SIER]; + save->vscr = ppc_get_vscr(env); + save->fpscr = env->fpscr; + } else if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { + save->tb_offset = env->tb_env->tb_offset; + } +} + +static void nested_post_load_state(CPUPPCState *env, CPUState *cs) +{ + /* + * compute hflags and possible interrupts. + */ + hreg_compute_hflags(env); + ppc_maybe_interrupt(env); + /* + * Nested HV does not tag TLB entries between L1 and L2, so must + * flush on transition. + */ + tlb_flush(cs); + env->reserve_addr = -1; /* Reset the reservation */ } static void nested_load_state(PowerPCCPU *cpu, struct nested_ppc_state *load) { - CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); memcpy(env->gpr, load->gpr, sizeof(env->gpr)); @@ -118,20 +262,58 @@ static void nested_load_state(PowerPCCPU *cpu, struct nested_ppc_state *load) env->spr[SPR_BOOKS_PID] = load->pidr; env->spr[SPR_PPR] = load->ppr; - env->tb_env->tb_offset = load->tb_offset; - - /* - * MSR updated, compute hflags and possible interrupts. - */ - hreg_compute_hflags(env); - ppc_maybe_interrupt(env); - - /* - * Nested HV does not tag TLB entries between L1 and L2, so must - * flush on transition. - */ - tlb_flush(cs); - env->reserve_addr = -1; /* Reset the reservation */ + if (spapr_nested_api(spapr) == NESTED_API_PAPR) { + env->spr[SPR_AMOR] = load->amor; + env->spr[SPR_DAWR0] = load->dawr0; + env->spr[SPR_DAWRX0] = load->dawrx0; + env->spr[SPR_CIABR] = load->ciabr; + env->spr[SPR_PURR] = load->purr; + env->spr[SPR_SPURR] = load->purr; + env->spr[SPR_IC] = load->ic; + env->spr[SPR_VTB] = load->vtb; + env->spr[SPR_HDAR] = load->hdar; + env->spr[SPR_HDSISR] = load->hdsisr; + env->spr[SPR_HEIR] = load->heir; + env->spr[SPR_ASDR] = load->asdr; + env->spr[SPR_DAWR1] = load->dawr1; + env->spr[SPR_DAWRX1] = load->dawrx1; + env->spr[SPR_DEXCR] = load->dexcr; + env->spr[SPR_HDEXCR] = load->hdexcr; + env->spr[SPR_HASHKEYR] = load->hashkeyr; + env->spr[SPR_HASHPKEYR] = load->hashpkeyr; + memcpy(env->vsr, load->vsr, sizeof(env->vsr)); + env->spr[SPR_EBBHR] = load->ebbhr; + env->spr[SPR_TAR] = load->tar; + env->spr[SPR_EBBRR] = load->ebbrr; + env->spr[SPR_BESCR] = load->bescr; + env->spr[SPR_IAMR] = load->iamr; + env->spr[SPR_AMR] = load->amr; + env->spr[SPR_UAMOR] = load->uamor; + env->spr[SPR_DSCR] = load->dscr; + env->spr[SPR_FSCR] = load->fscr; + env->spr[SPR_PSPB] = load->pspb; + env->spr[SPR_CTRL] = load->ctrl; + env->spr[SPR_VRSAVE] = load->vrsave; + env->spr[SPR_DAR] = load->dar; + env->spr[SPR_DSISR] = load->dsisr; + env->spr[SPR_POWER_PMC1] = load->pmc1; + env->spr[SPR_POWER_PMC2] = load->pmc2; + env->spr[SPR_POWER_PMC3] = load->pmc3; + env->spr[SPR_POWER_PMC4] = load->pmc4; + env->spr[SPR_POWER_PMC5] = load->pmc5; + env->spr[SPR_POWER_PMC6] = load->pmc6; + env->spr[SPR_POWER_MMCR0] = load->mmcr0; + env->spr[SPR_POWER_MMCR1] = load->mmcr1; + env->spr[SPR_POWER_MMCR2] = load->mmcr2; + env->spr[SPR_POWER_MMCRA] = load->mmcra; + env->spr[SPR_POWER_SDAR] = load->sdar; + env->spr[SPR_POWER_SIAR] = load->siar; + env->spr[SPR_POWER_SIER] = load->sier; + ppc_store_vscr(env, load->vscr); + ppc_store_fpscr(env, load->fpscr); + } else if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { + env->tb_env->tb_offset = load->tb_offset; + } } /* @@ -146,6 +328,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); CPUPPCState *env = &cpu->env; + CPUState *cs = CPU(cpu); SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); struct nested_ppc_state l2_state; target_ulong hv_ptr = args[0]; @@ -157,7 +340,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, struct kvmppc_pt_regs *regs; hwaddr len; - if (spapr->nested_ptcr == 0) { + if (spapr->nested.ptcr == 0) { return H_NOT_AVAILABLE; } @@ -244,6 +427,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, * Switch to the nested guest environment and start the "hdec" timer. */ nested_load_state(cpu, &l2_state); + nested_post_load_state(env, cs); hdec = hv_state.hdec_expiry - now; cpu_ppc_hdecr_init(env); @@ -272,9 +456,10 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, return env->gpr[3]; } -void spapr_exit_nested(PowerPCCPU *cpu, int excp) +static void spapr_exit_nested_hv(PowerPCCPU *cpu, int excp) { CPUPPCState *env = &cpu->env; + CPUState *cs = CPU(cpu); SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); struct nested_ppc_state l2_state; target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4]; @@ -284,8 +469,6 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp) struct kvmppc_pt_regs *regs; hwaddr len; - assert(spapr_cpu->in_nested); - nested_save_state(&l2_state, cpu); hsrr0 = env->spr[SPR_HSRR0]; hsrr1 = env->spr[SPR_HSRR1]; @@ -298,6 +481,7 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp) */ assert(env->spr[SPR_LPIDR] != 0); nested_load_state(cpu, spapr_cpu->nested_host_state); + nested_post_load_state(env, cs); env->gpr[3] = env->excp_vectors[excp]; /* hcall return value */ cpu_ppc_hdecr_exit(env); @@ -375,21 +559,1347 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp) address_space_unmap(CPU(cpu)->as, regs, len, len, true); } -void spapr_register_nested(void) +static bool spapr_nested_vcpu_check(SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid, bool inoutbuf) +{ + struct SpaprMachineStateNestedGuestVcpu *vcpu; + /* + * Perform sanity checks for the provided vcpuid of a guest. + * For now, ensure its valid, allocated and enabled for use. + */ + + if (vcpuid >= PAPR_NESTED_GUEST_VCPU_MAX) { + return false; + } + + if (!(vcpuid < guest->nr_vcpus)) { + return false; + } + + vcpu = &guest->vcpus[vcpuid]; + if (!vcpu->enabled) { + return false; + } + + if (!inoutbuf) { + return true; + } + + /* Check to see if the in/out buffers are registered */ + if (vcpu->runbufin.addr && vcpu->runbufout.addr) { + return true; + } + + return false; +} + +static void *get_vcpu_state_ptr(SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid) +{ + assert(spapr_nested_vcpu_check(guest, vcpuid, false)); + return &guest->vcpus[vcpuid].state; +} + +static void *get_vcpu_ptr(SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid) +{ + assert(spapr_nested_vcpu_check(guest, vcpuid, false)); + return &guest->vcpus[vcpuid]; +} + +static void *get_guest_ptr(SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid) +{ + return guest; /* for GSBE_NESTED */ +} + +/* + * set=1 means the L1 is trying to set some state + * set=0 means the L1 is trying to get some state + */ +static void copy_state_8to8(void *a, void *b, bool set) +{ + /* set takes from the Big endian element_buf and sets internal buffer */ + + if (set) { + *(uint64_t *)a = be64_to_cpu(*(uint64_t *)b); + } else { + *(uint64_t *)b = cpu_to_be64(*(uint64_t *)a); + } +} + +static void copy_state_4to4(void *a, void *b, bool set) +{ + if (set) { + *(uint32_t *)a = be32_to_cpu(*(uint32_t *)b); + } else { + *(uint32_t *)b = cpu_to_be32(*((uint32_t *)a)); + } +} + +static void copy_state_16to16(void *a, void *b, bool set) +{ + uint64_t *src, *dst; + + if (set) { + src = b; + dst = a; + + dst[1] = be64_to_cpu(src[0]); + dst[0] = be64_to_cpu(src[1]); + } else { + src = a; + dst = b; + + dst[1] = cpu_to_be64(src[0]); + dst[0] = cpu_to_be64(src[1]); + } +} + +static void copy_state_4to8(void *a, void *b, bool set) +{ + if (set) { + *(uint64_t *)a = (uint64_t) be32_to_cpu(*(uint32_t *)b); + } else { + *(uint32_t *)b = cpu_to_be32((uint32_t) (*((uint64_t *)a))); + } +} + +static void copy_state_pagetbl(void *a, void *b, bool set) +{ + uint64_t *pagetbl; + uint64_t *buf; /* 3 double words */ + uint64_t rts; + + assert(set); + + pagetbl = a; + buf = b; + + *pagetbl = be64_to_cpu(buf[0]); + /* as per ISA section 6.7.6.1 */ + *pagetbl |= PATE0_HR; /* Host Radix bit is 1 */ + + /* RTS */ + rts = be64_to_cpu(buf[1]); + assert(rts == 52); + rts = rts - 31; /* since radix tree size = 2^(RTS+31) */ + *pagetbl |= ((rts & 0x7) << 5); /* RTS2 is bit 56:58 */ + *pagetbl |= (((rts >> 3) & 0x3) << 61); /* RTS1 is bit 1:2 */ + + /* RPDS {Size = 2^(RPDS+3) , RPDS >=5} */ + *pagetbl |= 63 - clz64(be64_to_cpu(buf[2])) - 3; +} + +static void copy_state_proctbl(void *a, void *b, bool set) +{ + uint64_t *proctbl; + uint64_t *buf; /* 2 double words */ + + assert(set); + + proctbl = a; + buf = b; + /* PRTB: Process Table Base */ + *proctbl = be64_to_cpu(buf[0]); + /* PRTS: Process Table Size = 2^(12+PRTS) */ + if (be64_to_cpu(buf[1]) == (1ULL << 12)) { + *proctbl |= 0; + } else if (be64_to_cpu(buf[1]) == (1ULL << 24)) { + *proctbl |= 12; + } else { + g_assert_not_reached(); + } +} + +static void copy_state_runbuf(void *a, void *b, bool set) +{ + uint64_t *buf; /* 2 double words */ + struct SpaprMachineStateNestedGuestVcpuRunBuf *runbuf; + + assert(set); + + runbuf = a; + buf = b; + + runbuf->addr = be64_to_cpu(buf[0]); + assert(runbuf->addr); + + /* per spec */ + assert(be64_to_cpu(buf[1]) <= 16384); + + /* + * This will also hit in the input buffer but should be fine for + * now. If not we can split this function. + */ + assert(be64_to_cpu(buf[1]) >= VCPU_OUT_BUF_MIN_SZ); + + runbuf->size = be64_to_cpu(buf[1]); +} + +/* tell the L1 how big we want the output vcpu run buffer */ +static void out_buf_min_size(void *a, void *b, bool set) +{ + uint64_t *buf; /* 1 double word */ + + assert(!set); + + buf = b; + + buf[0] = cpu_to_be64(VCPU_OUT_BUF_MIN_SZ); +} + +static void copy_logical_pvr(void *a, void *b, bool set) +{ + SpaprMachineStateNestedGuest *guest; + uint32_t *buf; /* 1 word */ + uint32_t *pvr_logical_ptr; + uint32_t pvr_logical; + target_ulong pcr = 0; + + pvr_logical_ptr = a; + buf = b; + + if (!set) { + buf[0] = cpu_to_be32(*pvr_logical_ptr); + return; + } + + pvr_logical = be32_to_cpu(buf[0]); + + *pvr_logical_ptr = pvr_logical; + + if (*pvr_logical_ptr) { + switch (*pvr_logical_ptr) { + case CPU_POWERPC_LOGICAL_3_10: + pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00; + break; + case CPU_POWERPC_LOGICAL_3_00: + pcr = PCR_COMPAT_3_00; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "Could not set PCR for LPVR=0x%08x\n", + *pvr_logical_ptr); + return; + } + } + + guest = container_of(pvr_logical_ptr, + struct SpaprMachineStateNestedGuest, + pvr_logical); + for (int i = 0; i < guest->nr_vcpus; i++) { + guest->vcpus[i].state.pcr = ~pcr | HVMASK_PCR; + } +} + +static void copy_tb_offset(void *a, void *b, bool set) +{ + SpaprMachineStateNestedGuest *guest; + uint64_t *buf; /* 1 double word */ + uint64_t *tb_offset_ptr; + uint64_t tb_offset; + + tb_offset_ptr = a; + buf = b; + + if (!set) { + buf[0] = cpu_to_be64(*tb_offset_ptr); + return; + } + + tb_offset = be64_to_cpu(buf[0]); + /* need to copy this to the individual tb_offset for each vcpu */ + guest = container_of(tb_offset_ptr, + struct SpaprMachineStateNestedGuest, + tb_offset); + for (int i = 0; i < guest->nr_vcpus; i++) { + guest->vcpus[i].tb_offset = tb_offset; + } +} + +static void copy_state_hdecr(void *a, void *b, bool set) +{ + uint64_t *buf; /* 1 double word */ + uint64_t *hdecr_expiry_tb; + + hdecr_expiry_tb = a; + buf = b; + + if (!set) { + buf[0] = cpu_to_be64(*hdecr_expiry_tb); + return; + } + + *hdecr_expiry_tb = be64_to_cpu(buf[0]); +} + +struct guest_state_element_type guest_state_element_types[] = { + GUEST_STATE_ELEMENT_NOP(GSB_HV_VCPU_IGNORED_ID, 0), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR0, gpr[0]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR1, gpr[1]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR2, gpr[2]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR3, gpr[3]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR4, gpr[4]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR5, gpr[5]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR6, gpr[6]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR7, gpr[7]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR8, gpr[8]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR9, gpr[9]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR10, gpr[10]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR11, gpr[11]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR12, gpr[12]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR13, gpr[13]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR14, gpr[14]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR15, gpr[15]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR16, gpr[16]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR17, gpr[17]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR18, gpr[18]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR19, gpr[19]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR20, gpr[20]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR21, gpr[21]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR22, gpr[22]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR23, gpr[23]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR24, gpr[24]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR25, gpr[25]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR26, gpr[26]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR27, gpr[27]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR28, gpr[28]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR29, gpr[29]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR30, gpr[30]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR31, gpr[31]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_NIA, nip), + GSE_ENV_DWM(GSB_VCPU_SPR_MSR, msr, HVMASK_MSR), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTR, ctr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_LR, lr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_XER, xer), + GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_CR, cr), + GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_MMCR3), + GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER2), + GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER3), + GUEST_STATE_ELEMENT_NOP_W(GSB_VCPU_SPR_WORT), + GSE_ENV_DWM(GSB_VCPU_SPR_LPCR, lpcr, HVMASK_LPCR), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMOR, amor), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HFSCR, hfscr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR0, dawr0), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX0, dawrx0), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CIABR, ciabr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PURR, purr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPURR, spurr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IC, ic), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_VTB, vtb), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HDAR, hdar), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HDSISR, hdsisr), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HEIR, heir), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_ASDR, asdr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR0, srr0), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR1, srr1), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG0, sprg0), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG1, sprg1), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG2, sprg2), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG3, sprg3), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PIDR, pidr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CFAR, cfar), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PPR, ppr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR1, dawr1), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX1, dawrx1), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DEXCR, dexcr), + GSE_ENV_DWM(GSB_VCPU_SPR_HDEXCR, hdexcr, HVMASK_HDEXCR), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHKEYR, hashkeyr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHPKEYR, hashpkeyr), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR0, vsr[0]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR1, vsr[1]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR2, vsr[2]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR3, vsr[3]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR4, vsr[4]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR5, vsr[5]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR6, vsr[6]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR7, vsr[7]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR8, vsr[8]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR9, vsr[9]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR10, vsr[10]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR11, vsr[11]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR12, vsr[12]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR13, vsr[13]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR14, vsr[14]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR15, vsr[15]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR16, vsr[16]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR17, vsr[17]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR18, vsr[18]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR19, vsr[19]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR20, vsr[20]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR21, vsr[21]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR22, vsr[22]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR23, vsr[23]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR24, vsr[24]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR25, vsr[25]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR26, vsr[26]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR27, vsr[27]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR28, vsr[28]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR29, vsr[29]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR30, vsr[30]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR31, vsr[31]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR32, vsr[32]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR33, vsr[33]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR34, vsr[34]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR35, vsr[35]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR36, vsr[36]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR37, vsr[37]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR38, vsr[38]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR39, vsr[39]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR40, vsr[40]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR41, vsr[41]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR42, vsr[42]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR43, vsr[43]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR44, vsr[44]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR45, vsr[45]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR46, vsr[46]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR47, vsr[47]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR48, vsr[48]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR49, vsr[49]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR50, vsr[50]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR51, vsr[51]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR52, vsr[52]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR53, vsr[53]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR54, vsr[54]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR55, vsr[55]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR56, vsr[56]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR57, vsr[57]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR58, vsr[58]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR59, vsr[59]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR60, vsr[60]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR61, vsr[61]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR62, vsr[62]), + GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR63, vsr[63]), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBHR, ebbhr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_TAR, tar), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBRR, ebbrr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_BESCR, bescr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IAMR, iamr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMR, amr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_UAMOR, uamor), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DSCR, dscr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FSCR, fscr), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PSPB, pspb), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTRL, ctrl), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_VRSAVE, vrsave), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAR, dar), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DSISR, dsisr), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC1, pmc1), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC2, pmc2), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC3, pmc3), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC4, pmc4), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC5, pmc5), + GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC6, pmc6), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR0, mmcr0), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR1, mmcr1), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR2, mmcr2), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCRA, mmcra), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SDAR , sdar), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIAR , siar), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIER , sier), + GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_VSCR, vscr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FPSCR, fpscr), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_DEC_EXPIRE_TB, dec_expiry_tb), + GSBE_NESTED(GSB_PART_SCOPED_PAGETBL, 0x18, parttbl[0], copy_state_pagetbl), + GSBE_NESTED(GSB_PROCESS_TBL, 0x10, parttbl[1], copy_state_proctbl), + GSBE_NESTED(GSB_VCPU_LPVR, 0x4, pvr_logical, copy_logical_pvr), + GSBE_NESTED_MSK(GSB_TB_OFFSET, 0x8, tb_offset, copy_tb_offset, + HVMASK_TB_OFFSET), + GSBE_NESTED_VCPU(GSB_VCPU_IN_BUFFER, 0x10, runbufin, copy_state_runbuf), + GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUFFER, 0x10, runbufout, copy_state_runbuf), + GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUF_MIN_SZ, 0x8, runbufout, out_buf_min_size), + GSBE_NESTED_VCPU(GSB_VCPU_HDEC_EXPIRY_TB, 0x8, hdecr_expiry_tb, + copy_state_hdecr) +}; + +void spapr_nested_gsb_init(void) +{ + struct guest_state_element_type *type; + + /* Init the guest state elements lookup table, flags for now */ + for (int i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) { + type = &guest_state_element_types[i]; + + assert(type->id <= GSB_LAST); + if (type->id >= GSB_VCPU_SPR_HDAR) + /* 0xf000 - 0xf005 Thread + RO */ + type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY; + else if (type->id >= GSB_VCPU_IN_BUFFER) + /* 0x0c00 - 0xf000 Thread + RW */ + type->flags = 0; + else if (type->id >= GSB_VCPU_LPVR) + /* 0x0003 - 0x0bff Guest + RW */ + type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; + else if (type->id >= GSB_HV_VCPU_STATE_SIZE) + /* 0x0001 - 0x0002 Guest + RO */ + type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY | + GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; + } +} + +static struct guest_state_element *guest_state_element_next( + struct guest_state_element *element, + int64_t *len, + int64_t *num_elements) +{ + uint16_t size; + + /* size is of element->value[] only. Not whole guest_state_element */ + size = be16_to_cpu(element->size); + + if (len) { + *len -= size + offsetof(struct guest_state_element, value); + } + + if (num_elements) { + *num_elements -= 1; + } + + return (struct guest_state_element *)(element->value + size); +} + +static +struct guest_state_element_type *guest_state_element_type_find(uint16_t id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) + if (id == guest_state_element_types[i].id) { + return &guest_state_element_types[i]; + } + + return NULL; +} + +static void log_element(struct guest_state_element *element, + struct guest_state_request *gsr) +{ + qemu_log_mask(LOG_GUEST_ERROR, "h_guest_%s_state id:0x%04x size:0x%04x", + gsr->flags & GUEST_STATE_REQUEST_SET ? "set" : "get", + be16_to_cpu(element->id), be16_to_cpu(element->size)); + qemu_log_mask(LOG_GUEST_ERROR, "buf:0x%016"PRIx64" ...\n", + be64_to_cpu(*(uint64_t *)element->value)); +} + +static bool guest_state_request_check(struct guest_state_request *gsr) +{ + int64_t num_elements, len = gsr->len; + struct guest_state_buffer *gsb = gsr->gsb; + struct guest_state_element *element; + struct guest_state_element_type *type; + uint16_t id, size; + + /* gsb->num_elements = 0 == 32 bits long */ + assert(len >= 4); + + num_elements = be32_to_cpu(gsb->num_elements); + element = gsb->elements; + len -= sizeof(gsb->num_elements); + + /* Walk the buffer to validate the length */ + while (num_elements) { + + id = be16_to_cpu(element->id); + size = be16_to_cpu(element->size); + + if (false) { + log_element(element, gsr); + } + /* buffer size too small */ + if (len < 0) { + return false; + } + + type = guest_state_element_type_find(id); + if (!type) { + qemu_log_mask(LOG_GUEST_ERROR, "Element ID %04x unknown\n", id); + log_element(element, gsr); + return false; + } + + if (id == GSB_HV_VCPU_IGNORED_ID) { + goto next_element; + } + + if (size != type->size) { + qemu_log_mask(LOG_GUEST_ERROR, "Size mismatch. Element ID:%04x." + "Size Exp:%i Got:%i\n", id, type->size, size); + log_element(element, gsr); + return false; + } + + if ((type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY) && + (gsr->flags & GUEST_STATE_REQUEST_SET)) { + qemu_log_mask(LOG_GUEST_ERROR, "Trying to set a read-only Element " + "ID:%04x.\n", id); + return false; + } + + if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) { + /* guest wide element type */ + if (!(gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE)) { + qemu_log_mask(LOG_GUEST_ERROR, "trying to set a guest wide " + "Element ID:%04x.\n", id); + return false; + } + } else { + /* thread wide element type */ + if (gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE) { + qemu_log_mask(LOG_GUEST_ERROR, "trying to set a thread wide " + "Element ID:%04x.\n", id); + return false; + } + } +next_element: + element = guest_state_element_next(element, &len, &num_elements); + + } + return true; +} + +static bool is_gsr_invalid(struct guest_state_request *gsr, + struct guest_state_element *element, + struct guest_state_element_type *type) +{ + if ((gsr->flags & GUEST_STATE_REQUEST_SET) && + (*(uint64_t *)(element->value) & ~(type->mask))) { + log_element(element, gsr); + qemu_log_mask(LOG_GUEST_ERROR, "L1 can't set reserved bits " + "(allowed mask: 0x%08"PRIx64")\n", type->mask); + return true; + } + return false; +} + +static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + target_ulong flags = args[0]; + + if (flags) { /* don't handle any flags capabilities for now */ + return H_PARAMETER; + } + + /* P10 capabilities */ + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, + spapr->max_compat_pvr)) { + env->gpr[4] |= H_GUEST_CAPABILITIES_P10_MODE; + } + + /* P9 capabilities */ + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, + spapr->max_compat_pvr)) { + env->gpr[4] |= H_GUEST_CAPABILITIES_P9_MODE; + } + + return H_SUCCESS; +} + +static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + target_ulong flags = args[0]; + target_ulong capabilities = args[1]; + env->gpr[4] = 0; + + if (flags) { /* don't handle any flags capabilities for now */ + return H_PARAMETER; + } + + if (capabilities & H_GUEST_CAPABILITIES_COPY_MEM) { + env->gpr[4] = 1; + return H_P2; /* isn't supported */ + } + + /* + * If there are no capabilities configured, set the R5 to the index of + * the first supported Power Processor Mode + */ + if (!capabilities) { + env->gpr[4] = 1; + + /* set R5 to the first supported Power Processor Mode */ + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, + spapr->max_compat_pvr)) { + env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP; + } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, + spapr->max_compat_pvr)) { + env->gpr[5] = H_GUEST_CAP_P9_MODE_BMAP; + } + + return H_P2; + } + + /* + * If an invalid capability is set, R5 should contain the index of the + * invalid capability bit + */ + if (capabilities & ~H_GUEST_CAP_VALID_MASK) { + env->gpr[4] = 1; + + /* Set R5 to the index of the invalid capability */ + env->gpr[5] = 63 - ctz64(capabilities); + + return H_P2; + } + + if (!spapr->nested.capabilities_set) { + spapr->nested.capabilities_set = true; + spapr->nested.pvr_base = env->spr[SPR_PVR]; + return H_SUCCESS; + } else { + return H_STATE; + } +} + +static void +destroy_guest_helper(gpointer value) +{ + struct SpaprMachineStateNestedGuest *guest = value; + g_free(guest->vcpus); + g_free(guest); +} + +static target_ulong h_guest_create(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + target_ulong flags = args[0]; + target_ulong continue_token = args[1]; + uint64_t guestid; + int nguests = 0; + struct SpaprMachineStateNestedGuest *guest; + + if (flags) { /* don't handle any flags for now */ + return H_UNSUPPORTED_FLAG; + } + + if (continue_token != -1) { + return H_P2; + } + + if (!spapr->nested.capabilities_set) { + return H_STATE; + } + + if (!spapr->nested.guests) { + spapr->nested.guests = g_hash_table_new_full(NULL, + NULL, + NULL, + destroy_guest_helper); + } + + nguests = g_hash_table_size(spapr->nested.guests); + + if (nguests == PAPR_NESTED_GUEST_MAX) { + return H_NO_MEM; + } + + /* Lookup for available guestid */ + for (guestid = 1; guestid < PAPR_NESTED_GUEST_MAX; guestid++) { + if (!(g_hash_table_lookup(spapr->nested.guests, + GINT_TO_POINTER(guestid)))) { + break; + } + } + + if (guestid == PAPR_NESTED_GUEST_MAX) { + return H_NO_MEM; + } + + guest = g_try_new0(struct SpaprMachineStateNestedGuest, 1); + if (!guest) { + return H_NO_MEM; + } + + guest->pvr_logical = spapr->nested.pvr_base; + g_hash_table_insert(spapr->nested.guests, GINT_TO_POINTER(guestid), guest); + env->gpr[4] = guestid; + + return H_SUCCESS; +} + +static target_ulong h_guest_delete(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong flags = args[0]; + target_ulong guestid = args[1]; + struct SpaprMachineStateNestedGuest *guest; + + /* + * handle flag deleteAllGuests, if set: + * guestid is ignored and all guests are deleted + * + */ + if (flags & ~H_GUEST_DELETE_ALL_FLAG) { + return H_UNSUPPORTED_FLAG; /* other flag bits reserved */ + } else if (flags & H_GUEST_DELETE_ALL_FLAG) { + g_hash_table_destroy(spapr->nested.guests); + return H_SUCCESS; + } + + guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); + if (!guest) { + return H_P2; + } + + g_hash_table_remove(spapr->nested.guests, GINT_TO_POINTER(guestid)); + + return H_SUCCESS; +} + +static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong flags = args[0]; + target_ulong guestid = args[1]; + target_ulong vcpuid = args[2]; + SpaprMachineStateNestedGuest *guest; + + if (flags) { /* don't handle any flags for now */ + return H_UNSUPPORTED_FLAG; + } + + guest = spapr_get_nested_guest(spapr, guestid); + if (!guest) { + return H_P2; + } + + if (vcpuid < guest->nr_vcpus) { + qemu_log_mask(LOG_UNIMP, "vcpuid " TARGET_FMT_ld " already in use.", + vcpuid); + return H_IN_USE; + } + /* linear vcpuid allocation only */ + assert(vcpuid == guest->nr_vcpus); + + if (guest->nr_vcpus >= PAPR_NESTED_GUEST_VCPU_MAX) { + return H_P3; + } + + SpaprMachineStateNestedGuestVcpu *vcpus, *curr_vcpu; + vcpus = g_try_renew(struct SpaprMachineStateNestedGuestVcpu, + guest->vcpus, + guest->nr_vcpus + 1); + if (!vcpus) { + return H_NO_MEM; + } + guest->vcpus = vcpus; + curr_vcpu = &vcpus[guest->nr_vcpus]; + memset(curr_vcpu, 0, sizeof(SpaprMachineStateNestedGuestVcpu)); + + curr_vcpu->enabled = true; + guest->nr_vcpus++; + + return H_SUCCESS; +} + +static target_ulong getset_state(SpaprMachineStateNestedGuest *guest, + uint64_t vcpuid, + struct guest_state_request *gsr) +{ + void *ptr; + uint16_t id; + struct guest_state_element *element; + struct guest_state_element_type *type; + int64_t lenleft, num_elements; + + lenleft = gsr->len; + + if (!guest_state_request_check(gsr)) { + return H_P3; + } + + num_elements = be32_to_cpu(gsr->gsb->num_elements); + element = gsr->gsb->elements; + /* Process the elements */ + while (num_elements) { + type = NULL; + /* log_element(element, gsr); */ + + id = be16_to_cpu(element->id); + if (id == GSB_HV_VCPU_IGNORED_ID) { + goto next_element; + } + + type = guest_state_element_type_find(id); + assert(type); + + /* Get pointer to guest data to get/set */ + if (type->location && type->copy) { + ptr = type->location(guest, vcpuid); + assert(ptr); + if (!~(type->mask) && is_gsr_invalid(gsr, element, type)) { + return H_INVALID_ELEMENT_VALUE; + } + type->copy(ptr + type->offset, element->value, + gsr->flags & GUEST_STATE_REQUEST_SET ? true : false); + } + +next_element: + element = guest_state_element_next(element, &lenleft, &num_elements); + } + + return H_SUCCESS; +} + +static target_ulong map_and_getset_state(PowerPCCPU *cpu, + SpaprMachineStateNestedGuest *guest, + uint64_t vcpuid, + struct guest_state_request *gsr) +{ + target_ulong rc; + int64_t len; + bool is_write; + + len = gsr->len; + /* only get_state would require write access to the provided buffer */ + is_write = (gsr->flags & GUEST_STATE_REQUEST_SET) ? false : true; + gsr->gsb = address_space_map(CPU(cpu)->as, gsr->buf, (uint64_t *)&len, + is_write, MEMTXATTRS_UNSPECIFIED); + if (!gsr->gsb) { + rc = H_P3; + goto out1; + } + + if (len != gsr->len) { + rc = H_P3; + goto out1; + } + + rc = getset_state(guest, vcpuid, gsr); + +out1: + address_space_unmap(CPU(cpu)->as, gsr->gsb, len, is_write, len); + return rc; +} + +static target_ulong h_guest_getset_state(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong *args, + bool set) +{ + target_ulong flags = args[0]; + target_ulong lpid = args[1]; + target_ulong vcpuid = args[2]; + target_ulong buf = args[3]; + target_ulong buflen = args[4]; + struct guest_state_request gsr; + SpaprMachineStateNestedGuest *guest; + + guest = spapr_get_nested_guest(spapr, lpid); + if (!guest) { + return H_P2; + } + gsr.buf = buf; + assert(buflen <= GSB_MAX_BUF_SIZE); + gsr.len = buflen; + gsr.flags = 0; + if (flags & H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { + gsr.flags |= GUEST_STATE_REQUEST_GUEST_WIDE; + } + if (flags & ~H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { + return H_PARAMETER; /* flag not supported yet */ + } + + if (set) { + gsr.flags |= GUEST_STATE_REQUEST_SET; + } + return map_and_getset_state(cpu, guest, vcpuid, &gsr); +} + +static target_ulong h_guest_set_state(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + return h_guest_getset_state(cpu, spapr, args, true); +} + +static target_ulong h_guest_get_state(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + return h_guest_getset_state(cpu, spapr, args, false); +} + +static void exit_nested_store_l2(PowerPCCPU *cpu, int excp, + SpaprMachineStateNestedGuestVcpu *vcpu) +{ + CPUPPCState *env = &cpu->env; + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + target_ulong now, hdar, hdsisr, asdr; + + assert(sizeof(env->gpr) == sizeof(vcpu->state.gpr)); /* sanity check */ + + now = cpu_ppc_load_tbl(env); /* L2 timebase */ + now -= vcpu->tb_offset; /* L1 timebase */ + vcpu->state.dec_expiry_tb = now - cpu_ppc_load_decr(env); + cpu_ppc_store_decr(env, spapr_cpu->nested_host_state->dec_expiry_tb - now); + /* backup hdar, hdsisr, asdr if reqd later below */ + hdar = vcpu->state.hdar; + hdsisr = vcpu->state.hdsisr; + asdr = vcpu->state.asdr; + + nested_save_state(&vcpu->state, cpu); + + if (excp == POWERPC_EXCP_MCHECK || + excp == POWERPC_EXCP_RESET || + excp == POWERPC_EXCP_SYSCALL) { + vcpu->state.nip = env->spr[SPR_SRR0]; + vcpu->state.msr = env->spr[SPR_SRR1] & env->msr_mask; + } else { + vcpu->state.nip = env->spr[SPR_HSRR0]; + vcpu->state.msr = env->spr[SPR_HSRR1] & env->msr_mask; + } + + /* hdar, hdsisr, asdr should be retained unless certain exceptions */ + if ((excp != POWERPC_EXCP_HDSI) && (excp != POWERPC_EXCP_HISI)) { + vcpu->state.asdr = asdr; + } else if (excp != POWERPC_EXCP_HDSI) { + vcpu->state.hdar = hdar; + vcpu->state.hdsisr = hdsisr; + } +} + +static int get_exit_ids(uint64_t srr0, uint16_t ids[16]) +{ + int nr; + + switch (srr0) { + case 0xc00: + nr = 10; + ids[0] = GSB_VCPU_GPR3; + ids[1] = GSB_VCPU_GPR4; + ids[2] = GSB_VCPU_GPR5; + ids[3] = GSB_VCPU_GPR6; + ids[4] = GSB_VCPU_GPR7; + ids[5] = GSB_VCPU_GPR8; + ids[6] = GSB_VCPU_GPR9; + ids[7] = GSB_VCPU_GPR10; + ids[8] = GSB_VCPU_GPR11; + ids[9] = GSB_VCPU_GPR12; + break; + case 0xe00: + nr = 5; + ids[0] = GSB_VCPU_SPR_HDAR; + ids[1] = GSB_VCPU_SPR_HDSISR; + ids[2] = GSB_VCPU_SPR_ASDR; + ids[3] = GSB_VCPU_SPR_NIA; + ids[4] = GSB_VCPU_SPR_MSR; + break; + case 0xe20: + nr = 4; + ids[0] = GSB_VCPU_SPR_HDAR; + ids[1] = GSB_VCPU_SPR_ASDR; + ids[2] = GSB_VCPU_SPR_NIA; + ids[3] = GSB_VCPU_SPR_MSR; + break; + case 0xe40: + nr = 3; + ids[0] = GSB_VCPU_SPR_HEIR; + ids[1] = GSB_VCPU_SPR_NIA; + ids[2] = GSB_VCPU_SPR_MSR; + break; + case 0xf80: + nr = 3; + ids[0] = GSB_VCPU_SPR_HFSCR; + ids[1] = GSB_VCPU_SPR_NIA; + ids[2] = GSB_VCPU_SPR_MSR; + break; + default: + nr = 0; + break; + } + + return nr; +} + +static void exit_process_output_buffer(PowerPCCPU *cpu, + SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid, + target_ulong *r3) +{ + SpaprMachineStateNestedGuestVcpu *vcpu = &guest->vcpus[vcpuid]; + struct guest_state_request gsr; + struct guest_state_buffer *gsb; + struct guest_state_element *element; + struct guest_state_element_type *type; + int exit_id_count = 0; + uint16_t exit_cause_ids[16]; + hwaddr len; + + len = vcpu->runbufout.size; + gsb = address_space_map(CPU(cpu)->as, vcpu->runbufout.addr, &len, true, + MEMTXATTRS_UNSPECIFIED); + if (!gsb || len != vcpu->runbufout.size) { + address_space_unmap(CPU(cpu)->as, gsb, len, true, len); + *r3 = H_P2; + return; + } + + exit_id_count = get_exit_ids(*r3, exit_cause_ids); + + /* Create a buffer of elements to send back */ + gsb->num_elements = cpu_to_be32(exit_id_count); + element = gsb->elements; + for (int i = 0; i < exit_id_count; i++) { + type = guest_state_element_type_find(exit_cause_ids[i]); + assert(type); + element->id = cpu_to_be16(exit_cause_ids[i]); + element->size = cpu_to_be16(type->size); + element = guest_state_element_next(element, NULL, NULL); + } + gsr.gsb = gsb; + gsr.len = VCPU_OUT_BUF_MIN_SZ; + gsr.flags = 0; /* get + never guest wide */ + getset_state(guest, vcpuid, &gsr); + + address_space_unmap(CPU(cpu)->as, gsb, len, true, len); + return; +} + +static +void spapr_exit_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, int excp) +{ + CPUPPCState *env = &cpu->env; + CPUState *cs = CPU(cpu); + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */ + target_ulong lpid = 0, vcpuid = 0; + struct SpaprMachineStateNestedGuestVcpu *vcpu = NULL; + struct SpaprMachineStateNestedGuest *guest = NULL; + + lpid = spapr_cpu->nested_host_state->gpr[5]; + vcpuid = spapr_cpu->nested_host_state->gpr[6]; + guest = spapr_get_nested_guest(spapr, lpid); + assert(guest); + spapr_nested_vcpu_check(guest, vcpuid, false); + vcpu = &guest->vcpus[vcpuid]; + + exit_nested_store_l2(cpu, excp, vcpu); + /* do the output buffer for run_vcpu*/ + exit_process_output_buffer(cpu, guest, vcpuid, &r3_return); + + assert(env->spr[SPR_LPIDR] != 0); + nested_load_state(cpu, spapr_cpu->nested_host_state); + cpu_ppc_decrease_tb_by_offset(env, vcpu->tb_offset); + env->gpr[3] = H_SUCCESS; + env->gpr[4] = r3_return; + nested_post_load_state(env, cs); + cpu_ppc_hdecr_exit(env); + + spapr_cpu->in_nested = false; + g_free(spapr_cpu->nested_host_state); + spapr_cpu->nested_host_state = NULL; +} + +void spapr_exit_nested(PowerPCCPU *cpu, int excp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + assert(spapr_cpu->in_nested); + if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { + spapr_exit_nested_hv(cpu, excp); + } else if (spapr_nested_api(spapr) == NESTED_API_PAPR) { + spapr_exit_nested_papr(spapr, cpu, excp); + } else { + g_assert_not_reached(); + } +} + +static void nested_papr_load_l2(PowerPCCPU *cpu, + CPUPPCState *env, + SpaprMachineStateNestedGuestVcpu *vcpu, + target_ulong now) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + target_ulong lpcr, lpcr_mask, hdec; + lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER; + + assert(vcpu); + assert(sizeof(env->gpr) == sizeof(vcpu->state.gpr)); + nested_load_state(cpu, &vcpu->state); + lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | + (vcpu->state.lpcr & lpcr_mask); + lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE; + lpcr &= ~LPCR_LPES0; + env->spr[SPR_LPCR] = lpcr & pcc->lpcr_mask; + + hdec = vcpu->hdecr_expiry_tb - now; + cpu_ppc_store_decr(env, vcpu->state.dec_expiry_tb - now); + cpu_ppc_hdecr_init(env); + cpu_ppc_store_hdecr(env, hdec); + + cpu_ppc_increase_tb_by_offset(env, vcpu->tb_offset); +} + +static void nested_papr_run_vcpu(PowerPCCPU *cpu, + uint64_t lpid, + SpaprMachineStateNestedGuestVcpu *vcpu) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + CPUPPCState *env = &cpu->env; + CPUState *cs = CPU(cpu); + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + target_ulong now = cpu_ppc_load_tbl(env); + + assert(env->spr[SPR_LPIDR] == 0); + assert(spapr->nested.api); /* ensure API version is initialized */ + spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1); + assert(spapr_cpu->nested_host_state); + nested_save_state(spapr_cpu->nested_host_state, cpu); + spapr_cpu->nested_host_state->dec_expiry_tb = now - cpu_ppc_load_decr(env); + nested_papr_load_l2(cpu, env, vcpu, now); + env->spr[SPR_LPIDR] = lpid; /* post load l2 */ + + spapr_cpu->in_nested = true; + nested_post_load_state(env, cs); +} + +static target_ulong h_guest_run_vcpu(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + target_ulong flags = args[0]; + target_ulong lpid = args[1]; + target_ulong vcpuid = args[2]; + struct SpaprMachineStateNestedGuestVcpu *vcpu; + struct guest_state_request gsr; + SpaprMachineStateNestedGuest *guest; + target_ulong rc; + + if (flags) /* don't handle any flags for now */ + return H_PARAMETER; + + guest = spapr_get_nested_guest(spapr, lpid); + if (!guest) { + return H_P2; + } + if (!spapr_nested_vcpu_check(guest, vcpuid, true)) { + return H_P3; + } + + if (guest->parttbl[0] == 0) { + /* At least need a partition scoped radix tree */ + return H_NOT_AVAILABLE; + } + + vcpu = &guest->vcpus[vcpuid]; + + /* Read run_vcpu input buffer to update state */ + gsr.buf = vcpu->runbufin.addr; + gsr.len = vcpu->runbufin.size; + gsr.flags = GUEST_STATE_REQUEST_SET; /* Thread wide + writing */ + rc = map_and_getset_state(cpu, guest, vcpuid, &gsr); + if (rc == H_SUCCESS) { + nested_papr_run_vcpu(cpu, lpid, vcpu); + } else { + env->gpr[3] = rc; + } + return env->gpr[3]; +} + +void spapr_register_nested_hv(void) { spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl); spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested); spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate); spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest); } + +void spapr_unregister_nested_hv(void) +{ + spapr_unregister_hypercall(KVMPPC_H_SET_PARTITION_TABLE); + spapr_unregister_hypercall(KVMPPC_H_ENTER_NESTED); + spapr_unregister_hypercall(KVMPPC_H_TLB_INVALIDATE); + spapr_unregister_hypercall(KVMPPC_H_COPY_TOFROM_GUEST); +} + +void spapr_register_nested_papr(void) +{ + spapr_register_hypercall(H_GUEST_GET_CAPABILITIES, + h_guest_get_capabilities); + spapr_register_hypercall(H_GUEST_SET_CAPABILITIES, + h_guest_set_capabilities); + spapr_register_hypercall(H_GUEST_CREATE, h_guest_create); + spapr_register_hypercall(H_GUEST_DELETE, h_guest_delete); + spapr_register_hypercall(H_GUEST_CREATE_VCPU, h_guest_create_vcpu); + spapr_register_hypercall(H_GUEST_SET_STATE, h_guest_set_state); + spapr_register_hypercall(H_GUEST_GET_STATE, h_guest_get_state); + spapr_register_hypercall(H_GUEST_RUN_VCPU, h_guest_run_vcpu); +} + +void spapr_unregister_nested_papr(void) +{ + spapr_unregister_hypercall(H_GUEST_GET_CAPABILITIES); + spapr_unregister_hypercall(H_GUEST_SET_CAPABILITIES); + spapr_unregister_hypercall(H_GUEST_CREATE); + spapr_unregister_hypercall(H_GUEST_DELETE); + spapr_unregister_hypercall(H_GUEST_CREATE_VCPU); + spapr_unregister_hypercall(H_GUEST_SET_STATE); + spapr_unregister_hypercall(H_GUEST_GET_STATE); + spapr_unregister_hypercall(H_GUEST_RUN_VCPU); +} + #else void spapr_exit_nested(PowerPCCPU *cpu, int excp) { g_assert_not_reached(); } -void spapr_register_nested(void) +void spapr_register_nested_hv(void) { /* DO NOTHING */ } + +void spapr_unregister_nested_hv(void) +{ + /* DO NOTHING */ +} + +bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry) +{ + return false; +} + +bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry) +{ + return false; +} + +void spapr_register_nested_papr(void) +{ + /* DO NOTHING */ +} + +void spapr_unregister_nested_papr(void) +{ + /* DO NOTHING */ +} + +void spapr_nested_gsb_init(void) +{ + /* DO NOTHING */ +} + #endif diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c index ad7afe75444..7d2dfe5e3d2 100644 --- a/hw/ppc/spapr_nvdimm.c +++ b/hw/ppc/spapr_nvdimm.c @@ -528,7 +528,7 @@ static const VMStateDescription vmstate_spapr_nvdimm_flush_state = { .name = "spapr_nvdimm_flush_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(continue_token, SpaprNVDIMMDeviceFlushState), VMSTATE_INT64(hcall_ret, SpaprNVDIMMDeviceFlushState), VMSTATE_UINT32(drcidx, SpaprNVDIMMDeviceFlushState), @@ -541,7 +541,7 @@ const VMStateDescription vmstate_spapr_nvdimm_states = { .version_id = 1, .minimum_version_id = 1, .post_load = spapr_nvdimm_flush_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(hcall_flush_required, SpaprNVDIMMDevice), VMSTATE_UINT64(nvdimm_flush_token, SpaprNVDIMMDevice), VMSTATE_QLIST_V(completed_nvdimm_flush_states, SpaprNVDIMMDevice, 1, diff --git a/hw/ppc/spapr_ovec.c b/hw/ppc/spapr_ovec.c index b2567caa5cf..88e29536aa7 100644 --- a/hw/ppc/spapr_ovec.c +++ b/hw/ppc/spapr_ovec.c @@ -36,7 +36,7 @@ const VMStateDescription vmstate_spapr_ovec = { .name = "spapr_option_vector", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BITMAP(bitmap, SpaprOptionVector, 1, bitmap_size), VMSTATE_END_OF_LIST() } diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 6760823e13f..25e0295d6fd 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -2115,7 +2115,7 @@ static const VMStateDescription vmstate_spapr_pci_lsi = { .name = "spapr_pci/lsi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_EQUAL(irq, SpaprPciLsi, NULL), VMSTATE_END_OF_LIST() @@ -2126,7 +2126,7 @@ static const VMStateDescription vmstate_spapr_pci_msi = { .name = "spapr_pci/msi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(key, SpaprPciMsiMig), VMSTATE_UINT32(value.first_irq, SpaprPciMsiMig), VMSTATE_UINT32(value.num, SpaprPciMsiMig), @@ -2216,7 +2216,7 @@ static const VMStateDescription vmstate_spapr_pci = { .pre_save = spapr_pci_pre_save, .post_save = spapr_pci_post_save, .post_load = spapr_pci_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_EQUAL(buid, SpaprPhbState, NULL), VMSTATE_UINT32_TEST(mig_liobn, SpaprPhbState, pre_2_8_migration), VMSTATE_UINT64_TEST(mig_mem_win_addr, SpaprPhbState, pre_2_8_migration), diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c index f283f7e38d6..76b2a3487b5 100644 --- a/hw/ppc/spapr_pci_vfio.c +++ b/hw/ppc/spapr_pci_vfio.c @@ -26,10 +26,12 @@ #include "hw/pci/pci_device.h" #include "hw/vfio/vfio-common.h" #include "qemu/error-report.h" +#include CONFIG_DEVICES /* CONFIG_VFIO_PCI */ /* * Interfaces for IBM EEH (Enhanced Error Handling) */ +#ifdef CONFIG_VFIO_PCI static bool vfio_eeh_container_ok(VFIOContainer *container) { /* @@ -84,27 +86,27 @@ static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) { VFIOAddressSpace *space = vfio_get_address_space(as); - VFIOContainer *container = NULL; + VFIOContainerBase *bcontainer = NULL; if (QLIST_EMPTY(&space->containers)) { /* No containers to act on */ goto out; } - container = QLIST_FIRST(&space->containers); + bcontainer = QLIST_FIRST(&space->containers); - if (QLIST_NEXT(container, next)) { + if (QLIST_NEXT(bcontainer, next)) { /* * We don't yet have logic to synchronize EEH state across * multiple containers */ - container = NULL; + bcontainer = NULL; goto out; } out: vfio_put_address_space(space); - return container; + return container_of(bcontainer, VFIOContainer, bcontainer); } static bool vfio_eeh_as_ok(AddressSpace *as) @@ -314,3 +316,37 @@ int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb) return RTAS_OUT_SUCCESS; } + +#else + +bool spapr_phb_eeh_available(SpaprPhbState *sphb) +{ + return false; +} + +void spapr_phb_vfio_reset(DeviceState *qdev) +{ +} + +int spapr_phb_vfio_eeh_set_option(SpaprPhbState *sphb, + unsigned int addr, int option) +{ + return RTAS_OUT_NOT_SUPPORTED; +} + +int spapr_phb_vfio_eeh_get_state(SpaprPhbState *sphb, int *state) +{ + return RTAS_OUT_NOT_SUPPORTED; +} + +int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option) +{ + return RTAS_OUT_NOT_SUPPORTED; +} + +int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb) +{ + return RTAS_OUT_NOT_SUPPORTED; +} + +#endif /* CONFIG_VFIO_PCI */ diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c index df5c4b96873..c2fda7ad209 100644 --- a/hw/ppc/spapr_rng.c +++ b/hw/ppc/spapr_rng.c @@ -82,9 +82,9 @@ static target_ulong h_random(PowerPCCPU *cpu, SpaprMachineState *spapr, while (hrdata.received < 8) { rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received, random_recv, &hrdata); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_sem_wait(&hrdata.sem); - qemu_mutex_lock_iothread(); + bql_lock(); } qemu_sem_destroy(&hrdata.sem); diff --git a/hw/ppc/spapr_rtc.c b/hw/ppc/spapr_rtc.c index d55b4b0c50d..deb3ea4e495 100644 --- a/hw/ppc/spapr_rtc.c +++ b/hw/ppc/spapr_rtc.c @@ -157,7 +157,7 @@ static const VMStateDescription vmstate_spapr_rtc = { .name = "spapr/rtc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(ns_offset, SpaprRtcState), VMSTATE_END_OF_LIST() }, diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_vhyp_mmu.c similarity index 97% rename from hw/ppc/spapr_softmmu.c rename to hw/ppc/spapr_vhyp_mmu.c index 278666317ef..b3dd8b3a59d 100644 --- a/hw/ppc/spapr_softmmu.c +++ b/hw/ppc/spapr_vhyp_mmu.c @@ -1,3 +1,12 @@ +/* + * MMU hypercalls for the sPAPR (pseries) vHyp hypervisor that is used by TCG + * + * Copyright (c) 2004-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * Copyright (c) 2010 David Gibson, IBM Corporation. + * + * SPDX-License-Identifier: MIT + */ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "qemu/memalign.h" @@ -334,7 +343,7 @@ static void *hpt_prepare_thread(void *opaque) pending->ret = H_NO_MEM; } - qemu_mutex_lock_iothread(); + bql_lock(); if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) { /* Ready to go */ @@ -344,7 +353,7 @@ static void *hpt_prepare_thread(void *opaque) free_pending_hpt(pending); } - qemu_mutex_unlock_iothread(); + bql_unlock(); return NULL; } @@ -369,7 +378,7 @@ static void cancel_hpt_prepare(SpaprMachineState *spapr) free_pending_hpt(pending); } -target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu, +target_ulong vhyp_mmu_resize_hpt_prepare(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong shift) { @@ -553,7 +562,7 @@ static int rehash_hpt(PowerPCCPU *cpu, return H_SUCCESS; } -target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu, +target_ulong vhyp_mmu_resize_hpt_commit(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong flags, target_ulong shift) diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c index f8ef2b6fa87..3221874848d 100644 --- a/hw/ppc/spapr_vio.c +++ b/hw/ppc/spapr_vio.c @@ -616,7 +616,7 @@ const VMStateDescription vmstate_spapr_vio = { .name = "spapr_vio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Sanity check */ VMSTATE_UINT32_EQUAL(reg, SpaprVioDevice, NULL), VMSTATE_UINT32_EQUAL(irq, SpaprVioDevice, NULL), diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events index f670e8906cc..bf29bbfd4b4 100644 --- a/hw/ppc/trace-events +++ b/hw/ppc/trace-events @@ -95,6 +95,10 @@ vof_write(uint32_t ih, unsigned cb, const char *msg) "ih=0x%x [%u] \"%s\"" vof_avail(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx64" size=0x%"PRIx64 vof_claimed(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx64" size=0x%"PRIx64 +# pnv_chiptod.c +pnv_chiptod_xscom_read(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64 +pnv_chiptod_xscom_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64 + # pnv_sbe.c pnv_sbe_xscom_ctrl_read(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64 pnv_sbe_xscom_ctrl_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64 @@ -146,18 +150,6 @@ rs6000mc_size_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" rs6000mc_size_write(uint32_t addr, uint32_t val) "write addr=0x%x val=0x%x" rs6000mc_parity_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" -# ppc4xx_pci.c -ppc4xx_pci_map_irq(int32_t devfn, int irq_num, int slot) "devfn 0x%x irq %d -> %d" -ppc4xx_pci_set_irq(int irq_num) "PCI irq %d" - -# ppc440_pcix.c -ppc440_pcix_map_irq(int32_t devfn, int irq_num, int slot) "devfn 0x%x irq %d -> %d" -ppc440_pcix_set_irq(int irq_num) "PCI irq %d" -ppc440_pcix_update_pim(int idx, uint64_t size, uint64_t la) "Added window %d of size=0x%" PRIx64 " to CPU=0x%" PRIx64 -ppc440_pcix_update_pom(int idx, uint32_t size, uint64_t la, uint64_t pcia) "Added window %d of size=0x%x from CPU=0x%" PRIx64 " to PCI=0x%" PRIx64 -ppc440_pcix_reg_read(uint64_t addr, uint32_t val) "addr 0x%" PRIx64 " = 0x%" PRIx32 -ppc440_pcix_reg_write(uint64_t addr, uint32_t val, uint32_t size) "addr 0x%" PRIx64 " = 0x%" PRIx32 " size 0x%" PRIx32 - # ppc405_boards.c opba_readb(uint64_t addr, uint32_t val) "addr 0x%" PRIx64 " = 0x%" PRIx32 opba_writeb(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " = 0x%" PRIx64 diff --git a/hw/remote/meson.build b/hw/remote/meson.build index a3aa29aaf17..41eb4971d98 100644 --- a/hw/remote/meson.build +++ b/hw/remote/meson.build @@ -11,7 +11,6 @@ remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('iommu.c')) remote_ss.add(when: 'CONFIG_VFIO_USER_SERVER', if_true: libvfio_user_dep) remote_ss.add(when: 'CONFIG_VFIO_USER_SERVER', if_true: files('vfio-user-obj.c'), if_false: files('vfio-user-obj-stub.c')) -remote_ss.add(when: 'CONFIG_ALL', if_true: files('vfio-user-obj-stub.c')) specific_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('memory.c')) specific_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('proxy-memory-listener.c')) diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c index 9bd98e82197..4394dc4d821 100644 --- a/hw/remote/mpqemu-link.c +++ b/hw/remote/mpqemu-link.c @@ -33,7 +33,7 @@ */ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) { - bool iolock = qemu_mutex_iothread_locked(); + bool drop_bql = bql_locked(); bool iothread = qemu_in_iothread(); struct iovec send[2] = {}; int *fds = NULL; @@ -58,13 +58,13 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) assert(qemu_in_coroutine() || !iothread); /* - * Skip unlocking/locking iothread lock when the IOThread is running + * Skip unlocking/locking BQL when the IOThread is running * in co-routine context. Co-routine context is asserted above * for IOThread case. * Also skip lock handling while in a co-routine in the main context. */ - if (iolock && !iothread && !qemu_in_coroutine()) { - qemu_mutex_unlock_iothread(); + if (drop_bql && !iothread && !qemu_in_coroutine()) { + bql_unlock(); } if (!qio_channel_writev_full_all(ioc, send, G_N_ELEMENTS(send), @@ -74,9 +74,9 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) trace_mpqemu_send_io_error(msg->cmd, msg->size, nfds); } - if (iolock && !iothread && !qemu_in_coroutine()) { + if (drop_bql && !iothread && !qemu_in_coroutine()) { /* See above comment why skip locking here. */ - qemu_mutex_lock_iothread(); + bql_lock(); } return ret; @@ -96,7 +96,7 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds, size_t *nfds, Error **errp) { struct iovec iov = { .iov_base = buf, .iov_len = len }; - bool iolock = qemu_mutex_iothread_locked(); + bool drop_bql = bql_locked(); bool iothread = qemu_in_iothread(); int ret = -1; @@ -106,14 +106,14 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds, */ assert(qemu_in_coroutine() || !iothread); - if (iolock && !iothread && !qemu_in_coroutine()) { - qemu_mutex_unlock_iothread(); + if (drop_bql && !iothread && !qemu_in_coroutine()) { + bql_unlock(); } ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp); - if (iolock && !iothread && !qemu_in_coroutine()) { - qemu_mutex_lock_iothread(); + if (drop_bql && !iothread && !qemu_in_coroutine()) { + bql_lock(); } return (ret <= 0) ? ret : iov.iov_len; diff --git a/hw/remote/remote-obj.c b/hw/remote/remote-obj.c index 65b6f7cc863..dc27cc8da1f 100644 --- a/hw/remote/remote-obj.c +++ b/hw/remote/remote-obj.c @@ -49,6 +49,7 @@ struct RemoteObject { static void remote_object_set_fd(Object *obj, const char *str, Error **errp) { + ERRP_GUARD(); RemoteObject *o = REMOTE_OBJECT(obj); int fd = -1; diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c index 8b10c32a3c6..d9b879e056b 100644 --- a/hw/remote/vfio-user-obj.c +++ b/hw/remote/vfio-user-obj.c @@ -400,7 +400,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset, } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); release_lock = false; } diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig index b6a5eb4452e..5d644eb7b16 100644 --- a/hw/riscv/Kconfig +++ b/hw/riscv/Kconfig @@ -41,10 +41,12 @@ config RISCV_VIRT select RISCV_IMSIC select SIFIVE_PLIC select SIFIVE_TEST + select SMBIOS select VIRTIO_MMIO select FW_CFG_DMA select PLATFORM_BUS select ACPI + select ACPI_PCI config SHAKTI_C bool diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c index 0ffca05189f..09878e722cf 100644 --- a/hw/riscv/boot.c +++ b/hw/riscv/boot.c @@ -36,7 +36,8 @@ bool riscv_is_32bit(RISCVHartArrayState *harts) { - return harts->harts[0].env.misa_mxl_max == MXL_RV32; + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(&harts->harts[0]); + return mcc->misa_mxl_max == MXL_RV32; } /* @@ -188,13 +189,13 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry) * kernel is uncompressed it will not clobber the initrd. However * on boards without much RAM we must ensure that we still leave * enough room for a decent sized initrd, and on boards with large - * amounts of RAM we must avoid the initrd being so far up in RAM - * that it is outside lowmem and inaccessible to the kernel. - * So for boards with less than 256MB of RAM we put the initrd - * halfway into RAM, and for boards with 256MB of RAM or more we put - * the initrd at 128MB. + * amounts of RAM, we put the initrd at 512MB to allow large kernels + * to boot. + * So for boards with less than 1GB of RAM we put the initrd + * halfway into RAM, and for boards with 1GB of RAM or more we put + * the initrd at 512MB. */ - start = kernel_entry + MIN(mem_size / 2, 128 * MiB); + start = kernel_entry + MIN(mem_size / 2, 512 * MiB); size = load_ramdisk(filename, start, mem_size - start); if (size == -1) { diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c index b775aa89464..7725dfbde55 100644 --- a/hw/riscv/microchip_pfsoc.c +++ b/hw/riscv/microchip_pfsoc.c @@ -202,7 +202,6 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) MemoryRegion *envm_data = g_new(MemoryRegion, 1); MemoryRegion *qspi_xip_mem = g_new(MemoryRegion, 1); char *plic_hart_config; - NICInfo *nd; int i; sysbus_realize(SYS_BUS_DEVICE(&s->e_cpus), &error_abort); @@ -411,17 +410,8 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) memmap[MICROCHIP_PFSOC_USB].size); /* GEMs */ - - nd = &nd_table[0]; - if (nd->used) { - qemu_check_nic_model(nd, TYPE_CADENCE_GEM); - qdev_set_nic_properties(DEVICE(&s->gem0), nd); - } - nd = &nd_table[1]; - if (nd->used) { - qemu_check_nic_model(nd, TYPE_CADENCE_GEM); - qdev_set_nic_properties(DEVICE(&s->gem1), nd); - } + qemu_configure_nic_device(DEVICE(&s->gem0), true, NULL); + qemu_configure_nic_device(DEVICE(&s->gem1), true, NULL); object_property_set_int(OBJECT(&s->gem0), "revision", GEM_REVISION, errp); object_property_set_int(OBJECT(&s->gem0), "phy-addr", 8, errp); diff --git a/hw/riscv/numa.c b/hw/riscv/numa.c index d319aefb451..cf686f4ff1e 100644 --- a/hw/riscv/numa.c +++ b/hw/riscv/numa.c @@ -167,7 +167,8 @@ void riscv_socket_fdt_write_id(const MachineState *ms, const char *node_name, void riscv_socket_fdt_write_distance_matrix(const MachineState *ms) { int i, j, idx; - uint32_t *dist_matrix, dist_matrix_size; + g_autofree uint32_t *dist_matrix = NULL; + uint32_t dist_matrix_size; if (numa_enabled(ms) && ms->numa_state->have_numa_distance) { dist_matrix_size = riscv_socket_count(ms) * riscv_socket_count(ms); @@ -189,7 +190,6 @@ void riscv_socket_fdt_write_distance_matrix(const MachineState *ms) "numa-distance-map-v1"); qemu_fdt_setprop(ms->fdt, "/distance-map", "distance-matrix", dist_matrix, dist_matrix_size); - g_free(dist_matrix); } } diff --git a/hw/riscv/shakti_c.c b/hw/riscv/shakti_c.c index 12ea74b0324..3888034c2b9 100644 --- a/hw/riscv/shakti_c.c +++ b/hw/riscv/shakti_c.c @@ -28,7 +28,6 @@ #include "exec/address-spaces.h" #include "hw/riscv/boot.h" - static const struct MemmapEntry { hwaddr base; hwaddr size; @@ -47,12 +46,6 @@ static void shakti_c_machine_state_init(MachineState *mstate) ShaktiCMachineState *sms = RISCV_SHAKTI_MACHINE(mstate); MemoryRegion *system_memory = get_system_memory(); - /* Allow only Shakti C CPU for this platform */ - if (strcmp(mstate->cpu_type, TYPE_RISCV_CPU_SHAKTI_C) != 0) { - error_report("This board can only be used with Shakti C CPU"); - exit(1); - } - /* Initialize SoC */ object_initialize_child(OBJECT(mstate), "soc", &sms->soc, TYPE_RISCV_SHAKTI_SOC); @@ -82,9 +75,15 @@ static void shakti_c_machine_instance_init(Object *obj) static void shakti_c_machine_class_init(ObjectClass *klass, void *data) { MachineClass *mc = MACHINE_CLASS(klass); + static const char * const valid_cpu_types[] = { + RISCV_CPU_TYPE_NAME("shakti-c"), + NULL + }; + mc->desc = "RISC-V Board compatible with Shakti SDK"; mc->init = shakti_c_machine_state_init; mc->default_cpu_type = TYPE_RISCV_CPU_SHAKTI_C; + mc->valid_cpu_types = valid_cpu_types; mc->default_ram_id = "riscv.shakti.c.ram"; } diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c index ec76dce6c95..af5f923f541 100644 --- a/hw/riscv/sifive_u.c +++ b/hw/riscv/sifive_u.c @@ -171,7 +171,6 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, int cpu_phandle = phandle++; nodename = g_strdup_printf("/cpus/cpu@%d", cpu); char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu); - char *isa; qemu_fdt_add_subnode(fdt, nodename); /* cpu 0 is the management hart that does not have mmu */ if (cpu != 0) { @@ -180,11 +179,10 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, } else { qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48"); } - isa = riscv_isa_string(&s->soc.u_cpus.harts[cpu - 1]); + riscv_isa_write_fdt(&s->soc.u_cpus.harts[cpu - 1], fdt, nodename); } else { - isa = riscv_isa_string(&s->soc.e_cpus.harts[0]); + riscv_isa_write_fdt(&s->soc.e_cpus.harts[0], fdt, nodename); } - qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa); qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv"); qemu_fdt_setprop_string(fdt, nodename, "status", "okay"); qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu); @@ -194,7 +192,6 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc"); qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0); qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1); - g_free(isa); g_free(intc); g_free(nodename); } @@ -789,7 +786,6 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp) MemoryRegion *l2lim_mem = g_new(MemoryRegion, 1); char *plic_hart_config; int i, j; - NICInfo *nd = &nd_table[0]; qdev_prop_set_uint32(DEVICE(&s->u_cpus), "num-harts", ms->smp.cpus - 1); qdev_prop_set_uint32(DEVICE(&s->u_cpus), "hartid-base", 1); @@ -893,11 +889,7 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp) } sysbus_mmio_map(SYS_BUS_DEVICE(&s->otp), 0, memmap[SIFIVE_U_DEV_OTP].base); - /* FIXME use qdev NIC properties instead of nd_table[] */ - if (nd->used) { - qemu_check_nic_model(nd, TYPE_CADENCE_GEM); - qdev_set_nic_properties(DEVICE(&s->gem), nd); - } + qemu_configure_nic_device(DEVICE(&s->gem), true, NULL); object_property_set_int(OBJECT(&s->gem), "revision", GEM_REVISION, &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->gem), errp)) { diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index 81f7e53aedd..64074395bc5 100644 --- a/hw/riscv/spike.c +++ b/hw/riscv/spike.c @@ -59,7 +59,7 @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap, MachineState *ms = MACHINE(s); uint32_t *clint_cells; uint32_t cpu_phandle, intc_phandle, phandle = 1; - char *name, *mem_name, *clint_name, *clust_name; + char *mem_name, *clint_name, *clust_name; char *core_name, *cpu_name, *intc_name; static const char * const clint_compat[2] = { "sifive,clint0", "riscv,clint0" @@ -113,9 +113,7 @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap, } else { qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv48"); } - name = riscv_isa_string(&s->soc[socket].harts[cpu]); - qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name); - g_free(name); + riscv_isa_write_fdt(&s->soc[socket].harts[cpu], fdt, cpu_name); qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv"); qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay"); qemu_fdt_setprop_cell(fdt, cpu_name, "reg", diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c index 171d56a6adc..0925528160f 100644 --- a/hw/riscv/virt-acpi-build.c +++ b/hw/riscv/virt-acpi-build.c @@ -27,16 +27,21 @@ #include "hw/acpi/acpi-defs.h" #include "hw/acpi/acpi.h" #include "hw/acpi/aml-build.h" +#include "hw/acpi/pci.h" #include "hw/acpi/utils.h" +#include "hw/intc/riscv_aclint.h" +#include "hw/nvram/fw_cfg_acpi.h" +#include "hw/pci-host/gpex.h" +#include "hw/riscv/virt.h" +#include "hw/riscv/numa.h" +#include "hw/virtio/virtio-acpi.h" +#include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "sysemu/reset.h" -#include "migration/vmstate.h" -#include "hw/riscv/virt.h" -#include "hw/riscv/numa.h" -#include "hw/intc/riscv_aclint.h" #define ACPI_BUILD_TABLE_SIZE 0x20000 +#define ACPI_BUILD_INTC_ID(socket, index) ((socket << 24) | (index)) typedef struct AcpiBuildState { /* Copy of table in RAM (for patching) */ @@ -58,17 +63,56 @@ static void acpi_align_size(GArray *blob, unsigned align) static void riscv_acpi_madt_add_rintc(uint32_t uid, const CPUArchIdList *arch_ids, - GArray *entry) + GArray *entry, + RISCVVirtState *s) { + uint8_t guest_index_bits = imsic_num_bits(s->aia_guests + 1); uint64_t hart_id = arch_ids->cpus[uid].arch_id; + uint32_t imsic_size, local_cpu_id, socket_id; + uint64_t imsic_socket_addr, imsic_addr; + MachineState *ms = MACHINE(s); + socket_id = arch_ids->cpus[uid].props.node_id; + local_cpu_id = (arch_ids->cpus[uid].arch_id - + riscv_socket_first_hartid(ms, socket_id)) % + riscv_socket_hart_count(ms, socket_id); + imsic_socket_addr = s->memmap[VIRT_IMSIC_S].base + + (socket_id * VIRT_IMSIC_GROUP_MAX_SIZE); + imsic_size = IMSIC_HART_SIZE(guest_index_bits); + imsic_addr = imsic_socket_addr + local_cpu_id * imsic_size; build_append_int_noprefix(entry, 0x18, 1); /* Type */ - build_append_int_noprefix(entry, 20, 1); /* Length */ + build_append_int_noprefix(entry, 36, 1); /* Length */ build_append_int_noprefix(entry, 1, 1); /* Version */ build_append_int_noprefix(entry, 0, 1); /* Reserved */ build_append_int_noprefix(entry, 0x1, 4); /* Flags */ build_append_int_noprefix(entry, hart_id, 8); /* Hart ID */ build_append_int_noprefix(entry, uid, 4); /* ACPI Processor UID */ + /* External Interrupt Controller ID */ + if (s->aia_type == VIRT_AIA_TYPE_APLIC) { + build_append_int_noprefix(entry, + ACPI_BUILD_INTC_ID( + arch_ids->cpus[uid].props.node_id, + local_cpu_id), + 4); + } else if (s->aia_type == VIRT_AIA_TYPE_NONE) { + build_append_int_noprefix(entry, + ACPI_BUILD_INTC_ID( + arch_ids->cpus[uid].props.node_id, + 2 * local_cpu_id + 1), + 4); + } else { + build_append_int_noprefix(entry, 0, 4); + } + + if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { + /* IMSIC Base address */ + build_append_int_noprefix(entry, imsic_addr, 8); + /* IMSIC Size */ + build_append_int_noprefix(entry, imsic_size, 4); + } else { + build_append_int_noprefix(entry, 0, 8); + build_append_int_noprefix(entry, 0, 4); + } } static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s) @@ -87,7 +131,7 @@ static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s) aml_int(arch_ids->cpus[i].arch_id))); /* build _MAT object */ - riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf); + riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf, s); aml_append(dev, aml_name_decl("_MAT", aml_buffer(madt_buf->len, (uint8_t *)madt_buf->data))); @@ -97,22 +141,75 @@ static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s) } } -static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap) +static void +acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap, + uint32_t uart_irq) { - Aml *dev = aml_device("FWCF"); - aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); - - /* device present, functioning, decoding, not shown in UI */ - aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); - aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + Aml *dev = aml_device("COM0"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); Aml *crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base, - fw_cfg_memmap->size, AML_READ_WRITE)); + aml_append(crs, aml_memory32_fixed(uart_memmap->base, + uart_memmap->size, AML_READ_WRITE)); + aml_append(crs, + aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &uart_irq, 1)); aml_append(dev, aml_name_decl("_CRS", crs)); + + Aml *pkg = aml_package(2); + aml_append(pkg, aml_string("clock-frequency")); + aml_append(pkg, aml_int(3686400)); + + Aml *UUID = aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301"); + + Aml *pkg1 = aml_package(1); + aml_append(pkg1, pkg); + + Aml *package = aml_package(2); + aml_append(package, UUID); + aml_append(package, pkg1); + + aml_append(dev, aml_name_decl("_DSD", package)); aml_append(scope, dev); } +/* + * Serial Port Console Redirection Table (SPCR) + * Rev: 1.07 + */ + +static void +spcr_setup(GArray *table_data, BIOSLinker *linker, RISCVVirtState *s) +{ + AcpiSpcrData serial = { + .interface_type = 0, /* 16550 compatible */ + .base_addr.id = AML_AS_SYSTEM_MEMORY, + .base_addr.width = 32, + .base_addr.offset = 0, + .base_addr.size = 1, + .base_addr.addr = s->memmap[VIRT_UART0].base, + .interrupt_type = (1 << 4),/* Bit[4] RISC-V PLIC/APLIC */ + .pc_interrupt = 0, + .interrupt = UART0_IRQ, + .baud_rate = 7, /* 15200 */ + .parity = 0, + .stop_bits = 1, + .flow_control = 0, + .terminal_type = 3, /* ANSI */ + .language = 0, /* Language */ + .pci_device_id = 0xffff, /* not a PCI device*/ + .pci_vendor_id = 0xffff, /* not a PCI device*/ + .pci_bus = 0, + .pci_device = 0, + .pci_function = 0, + .pci_flags = 0, + .pci_segment = 0, + }; + + build_spcr(table_data, linker, &serial, 2, s->oem_id, s->oem_table_id); +} + /* RHCT Node[N] starts at offset 56 */ #define RHCT_NODE_ARRAY_OFFSET 56 @@ -121,6 +218,7 @@ static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap) * 5.2.36 RISC-V Hart Capabilities Table (RHCT) * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/16 * https://drive.google.com/file/d/1nP3nFiH4jkPMp6COOxP6123DCZKR-tia/view + * https://drive.google.com/file/d/1sKbOa8m1UZw1JkquZYe3F1zQBN1xXsaf/view */ static void build_rhct(GArray *table_data, BIOSLinker *linker, @@ -130,8 +228,10 @@ static void build_rhct(GArray *table_data, MachineState *ms = MACHINE(s); const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); size_t len, aligned_len; - uint32_t isa_offset, num_rhct_nodes; - RISCVCPU *cpu; + uint32_t isa_offset, num_rhct_nodes, cmo_offset = 0; + RISCVCPU *cpu = &s->soc[0].harts[0]; + uint32_t mmu_offset = 0; + uint8_t satp_mode_max; g_autofree char *isa = NULL; AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id, @@ -147,6 +247,13 @@ static void build_rhct(GArray *table_data, /* ISA + N hart info */ num_rhct_nodes = 1 + ms->smp.cpus; + if (cpu->cfg.ext_zicbom || cpu->cfg.ext_zicboz) { + num_rhct_nodes++; + } + + if (cpu->cfg.satp_mode.supported != 0) { + num_rhct_nodes++; + } /* Number of RHCT nodes*/ build_append_int_noprefix(table_data, num_rhct_nodes, 4); @@ -158,7 +265,6 @@ static void build_rhct(GArray *table_data, isa_offset = table_data->len - table.table_offset; build_append_int_noprefix(table_data, 0, 2); /* Type 0 */ - cpu = &s->soc[0].harts[0]; isa = riscv_isa_string(cpu); len = 8 + strlen(isa) + 1; aligned_len = (len % 2) ? (len + 1) : len; @@ -174,14 +280,87 @@ static void build_rhct(GArray *table_data, build_append_int_noprefix(table_data, 0x0, 1); /* Optional Padding */ } + /* CMO node */ + if (cpu->cfg.ext_zicbom || cpu->cfg.ext_zicboz) { + cmo_offset = table_data->len - table.table_offset; + build_append_int_noprefix(table_data, 1, 2); /* Type */ + build_append_int_noprefix(table_data, 10, 2); /* Length */ + build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + + /* CBOM block size */ + if (cpu->cfg.cbom_blocksize) { + build_append_int_noprefix(table_data, + __builtin_ctz(cpu->cfg.cbom_blocksize), + 1); + } else { + build_append_int_noprefix(table_data, 0, 1); + } + + /* CBOP block size */ + build_append_int_noprefix(table_data, 0, 1); + + /* CBOZ block size */ + if (cpu->cfg.cboz_blocksize) { + build_append_int_noprefix(table_data, + __builtin_ctz(cpu->cfg.cboz_blocksize), + 1); + } else { + build_append_int_noprefix(table_data, 0, 1); + } + } + + /* MMU node structure */ + if (cpu->cfg.satp_mode.supported != 0) { + satp_mode_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); + mmu_offset = table_data->len - table.table_offset; + build_append_int_noprefix(table_data, 2, 2); /* Type */ + build_append_int_noprefix(table_data, 8, 2); /* Length */ + build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + /* MMU Type */ + if (satp_mode_max == VM_1_10_SV57) { + build_append_int_noprefix(table_data, 2, 1); /* Sv57 */ + } else if (satp_mode_max == VM_1_10_SV48) { + build_append_int_noprefix(table_data, 1, 1); /* Sv48 */ + } else if (satp_mode_max == VM_1_10_SV39) { + build_append_int_noprefix(table_data, 0, 1); /* Sv39 */ + } else { + assert(1); + } + } + /* Hart Info Node */ for (int i = 0; i < arch_ids->len; i++) { + len = 16; + int num_offsets = 1; build_append_int_noprefix(table_data, 0xFFFF, 2); /* Type */ - build_append_int_noprefix(table_data, 16, 2); /* Length */ - build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ - build_append_int_noprefix(table_data, 1, 2); /* Number of offsets */ - build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ - build_append_int_noprefix(table_data, isa_offset, 4); /* Offsets[0] */ + + /* Length */ + if (cmo_offset) { + len += 4; + num_offsets++; + } + + if (mmu_offset) { + len += 4; + num_offsets++; + } + + build_append_int_noprefix(table_data, len, 2); + build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ + /* Number of offsets */ + build_append_int_noprefix(table_data, num_offsets, 2); + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + /* Offsets */ + build_append_int_noprefix(table_data, isa_offset, 4); + if (cmo_offset) { + build_append_int_noprefix(table_data, cmo_offset, 4); + } + + if (mmu_offset) { + build_append_int_noprefix(table_data, mmu_offset, 4); + } } acpi_table_end(linker, &table); @@ -209,6 +388,8 @@ static void build_dsdt(GArray *table_data, RISCVVirtState *s) { Aml *scope, *dsdt; + MachineState *ms = MACHINE(s); + uint8_t socket_count; const MemMapEntry *memmap = s->memmap; AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = s->oem_id, .oem_table_id = s->oem_table_id }; @@ -226,7 +407,30 @@ static void build_dsdt(GArray *table_data, scope = aml_scope("\\_SB"); acpi_dsdt_add_cpus(scope, s); - acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]); + fw_cfg_acpi_dsdt_add(scope, &memmap[VIRT_FW_CFG]); + + socket_count = riscv_socket_count(ms); + + acpi_dsdt_add_uart(scope, &memmap[VIRT_UART0], UART0_IRQ); + + if (socket_count == 1) { + virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base, + memmap[VIRT_VIRTIO].size, + VIRTIO_IRQ, 0, VIRTIO_COUNT); + acpi_dsdt_add_gpex_host(scope, PCIE_IRQ); + } else if (socket_count == 2) { + virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base, + memmap[VIRT_VIRTIO].size, + VIRTIO_IRQ + VIRT_IRQCHIP_NUM_SOURCES, 0, + VIRTIO_COUNT); + acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES); + } else { + virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base, + memmap[VIRT_VIRTIO].size, + VIRTIO_IRQ + VIRT_IRQCHIP_NUM_SOURCES, 0, + VIRTIO_COUNT); + acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES * 2); + } aml_append(dsdt, scope); @@ -242,6 +446,7 @@ static void build_dsdt(GArray *table_data, * 5.2.12 Multiple APIC Description Table (MADT) * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/15 * https://drive.google.com/file/d/1R6k4MshhN3WTT-hwqAquu5nX6xSEqK2l/view + * https://drive.google.com/file/d/1oMGPyOD58JaPgMl1pKasT-VKsIKia7zR/view */ static void build_madt(GArray *table_data, BIOSLinker *linker, @@ -250,6 +455,21 @@ static void build_madt(GArray *table_data, MachineClass *mc = MACHINE_GET_CLASS(s); MachineState *ms = MACHINE(s); const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); + uint8_t group_index_bits = imsic_num_bits(riscv_socket_count(ms)); + uint8_t guest_index_bits = imsic_num_bits(s->aia_guests + 1); + uint16_t imsic_max_hart_per_socket = 0; + uint8_t hart_index_bits; + uint64_t aplic_addr; + uint32_t gsi_base; + uint8_t socket; + + for (socket = 0; socket < riscv_socket_count(ms); socket++) { + if (imsic_max_hart_per_socket < s->soc[socket].num_harts) { + imsic_max_hart_per_socket = s->soc[socket].num_harts; + } + } + + hart_index_bits = imsic_num_bits(imsic_max_hart_per_socket); AcpiTable table = { .sig = "APIC", .rev = 6, .oem_id = s->oem_id, .oem_table_id = s->oem_table_id }; @@ -261,7 +481,133 @@ static void build_madt(GArray *table_data, /* RISC-V Local INTC structures per HART */ for (int i = 0; i < arch_ids->len; i++) { - riscv_acpi_madt_add_rintc(i, arch_ids, table_data); + riscv_acpi_madt_add_rintc(i, arch_ids, table_data, s); + } + + /* IMSIC */ + if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { + /* IMSIC */ + build_append_int_noprefix(table_data, 0x19, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + /* Number of supervisor mode Interrupt Identities */ + build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_MSIS, 2); + /* Number of guest mode Interrupt Identities */ + build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_MSIS, 2); + /* Guest Index Bits */ + build_append_int_noprefix(table_data, guest_index_bits, 1); + /* Hart Index Bits */ + build_append_int_noprefix(table_data, hart_index_bits, 1); + /* Group Index Bits */ + build_append_int_noprefix(table_data, group_index_bits, 1); + /* Group Index Shift */ + build_append_int_noprefix(table_data, IMSIC_MMIO_GROUP_MIN_SHIFT, 1); + } + + if (s->aia_type != VIRT_AIA_TYPE_NONE) { + /* APLICs */ + for (socket = 0; socket < riscv_socket_count(ms); socket++) { + aplic_addr = s->memmap[VIRT_APLIC_S].base + + s->memmap[VIRT_APLIC_S].size * socket; + gsi_base = VIRT_IRQCHIP_NUM_SOURCES * socket; + build_append_int_noprefix(table_data, 0x1A, 1); /* Type */ + build_append_int_noprefix(table_data, 36, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, socket, 1); /* APLIC ID */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + build_append_int_noprefix(table_data, 0, 8); /* Hardware ID */ + /* Number of IDCs */ + if (s->aia_type == VIRT_AIA_TYPE_APLIC) { + build_append_int_noprefix(table_data, + s->soc[socket].num_harts, + 2); + } else { + build_append_int_noprefix(table_data, 0, 2); + } + /* Total External Interrupt Sources Supported */ + build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_SOURCES, 2); + /* Global System Interrupt Base */ + build_append_int_noprefix(table_data, gsi_base, 4); + /* APLIC Address */ + build_append_int_noprefix(table_data, aplic_addr, 8); + /* APLIC size */ + build_append_int_noprefix(table_data, + s->memmap[VIRT_APLIC_S].size, 4); + } + } else { + /* PLICs */ + for (socket = 0; socket < riscv_socket_count(ms); socket++) { + aplic_addr = s->memmap[VIRT_PLIC].base + + s->memmap[VIRT_PLIC].size * socket; + gsi_base = VIRT_IRQCHIP_NUM_SOURCES * socket; + build_append_int_noprefix(table_data, 0x1B, 1); /* Type */ + build_append_int_noprefix(table_data, 36, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, socket, 1); /* PLIC ID */ + build_append_int_noprefix(table_data, 0, 8); /* Hardware ID */ + /* Total External Interrupt Sources Supported */ + build_append_int_noprefix(table_data, + VIRT_IRQCHIP_NUM_SOURCES - 1, 2); + build_append_int_noprefix(table_data, 0, 2); /* Max Priority */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + /* PLIC Size */ + build_append_int_noprefix(table_data, s->memmap[VIRT_PLIC].size, 4); + /* PLIC Address */ + build_append_int_noprefix(table_data, aplic_addr, 8); + /* Global System Interrupt Vector Base */ + build_append_int_noprefix(table_data, gsi_base, 4); + } + } + + acpi_table_end(linker, &table); +} + +/* + * ACPI spec, Revision 6.5+ + * 5.2.16 System Resource Affinity Table (SRAT) + * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/25 + * https://drive.google.com/file/d/1YTdDx2IPm5IeZjAW932EYU-tUtgS08tX/view + */ +static void +build_srat(GArray *table_data, BIOSLinker *linker, RISCVVirtState *vms) +{ + int i; + uint64_t mem_base; + MachineClass *mc = MACHINE_GET_CLASS(vms); + MachineState *ms = MACHINE(vms); + const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(ms); + AcpiTable table = { .sig = "SRAT", .rev = 3, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ + + for (i = 0; i < cpu_list->len; ++i) { + uint32_t nodeid = cpu_list->cpus[i].props.node_id; + /* + * 5.2.16.8 RINTC Affinity Structure + */ + build_append_int_noprefix(table_data, 7, 1); /* Type */ + build_append_int_noprefix(table_data, 20, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, nodeid, 4); /* Proximity Domain */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + /* Flags, Table 5-70 */ + build_append_int_noprefix(table_data, 1 /* Flags: Enabled */, 4); + build_append_int_noprefix(table_data, 0, 4); /* Clock Domain */ + } + + mem_base = vms->memmap[VIRT_DRAM].base; + for (i = 0; i < ms->numa_state->num_nodes; ++i) { + if (ms->numa_state->nodes[i].node_mem > 0) { + build_srat_memory(table_data, mem_base, + ms->numa_state->nodes[i].node_mem, i, + MEM_AFFINITY_ENABLED); + mem_base += ms->numa_state->nodes[i].node_mem; + } } acpi_table_end(linker, &table); @@ -272,6 +618,7 @@ static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables) GArray *table_offsets; unsigned dsdt, xsdt; GArray *tables_blob = tables->table_data; + MachineState *ms = MACHINE(s); table_offsets = g_array_new(false, true, sizeof(uint32_t)); @@ -294,6 +641,29 @@ static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables) acpi_add_table(table_offsets, tables_blob); build_rhct(tables_blob, tables->linker, s); + acpi_add_table(table_offsets, tables_blob); + spcr_setup(tables_blob, tables->linker, s); + + acpi_add_table(table_offsets, tables_blob); + { + AcpiMcfgInfo mcfg = { + .base = s->memmap[VIRT_PCIE_ECAM].base, + .size = s->memmap[VIRT_PCIE_ECAM].size, + }; + build_mcfg(tables_blob, tables->linker, &mcfg, s->oem_id, + s->oem_table_id); + } + + if (ms->numa_state->num_nodes > 0) { + acpi_add_table(table_offsets, tables_blob); + build_srat(tables_blob, tables->linker, s); + if (ms->numa_state->have_numa_distance) { + acpi_add_table(table_offsets, tables_blob); + build_slit(tables_blob, tables->linker, ms, s->oem_id, + s->oem_table_id); + } + } + /* XSDT is pointed to by RSDP */ xsdt = tables_blob->len; build_xsdt(tables_blob, tables->linker, table_offsets, s->oem_id, @@ -374,7 +744,7 @@ static const VMStateDescription vmstate_virt_acpi_build = { .name = "virt_acpi_build", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(patched, AcpiBuildState), VMSTATE_END_OF_LIST() }, diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index d2eac241561..d171e74f7b8 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -36,9 +36,9 @@ #include "hw/riscv/boot.h" #include "hw/riscv/numa.h" #include "kvm/kvm_riscv.h" +#include "hw/firmware/smbios.h" #include "hw/intc/riscv_aclint.h" #include "hw/intc/riscv_aplic.h" -#include "hw/intc/riscv_imsic.h" #include "hw/intc/sifive_plic.h" #include "hw/misc/sifive_test.h" #include "hw/platform-bus.h" @@ -48,33 +48,13 @@ #include "sysemu/tcg.h" #include "sysemu/kvm.h" #include "sysemu/tpm.h" +#include "sysemu/qtest.h" #include "hw/pci/pci.h" #include "hw/pci-host/gpex.h" #include "hw/display/ramfb.h" #include "hw/acpi/aml-build.h" #include "qapi/qapi-visit-common.h" - -/* - * The virt machine physical address space used by some of the devices - * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets, - * number of CPUs, and number of IMSIC guest files. - * - * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS, - * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization - * of virt machine physical address space. - */ - -#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT) -#if VIRT_IMSIC_GROUP_MAX_SIZE < \ - IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS) -#error "Can't accommodate single IMSIC group in address space" -#endif - -#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \ - VIRT_IMSIC_GROUP_MAX_SIZE) -#if 0x4000000 < VIRT_IMSIC_MAX_SIZE -#error "Can't accommodate all IMSIC groups in address space" -#endif +#include "hw/virtio/virtio-iommu.h" /* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */ static bool virt_use_kvm_aia(RISCVVirtState *s) @@ -82,6 +62,11 @@ static bool virt_use_kvm_aia(RISCVVirtState *s) return kvm_irqchip_in_kernel() && s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC; } +static bool virt_aclint_allowed(void) +{ + return tcg_enabled() || qtest_enabled(); +} + static const MemMapEntry virt_memmap[] = { [VIRT_DEBUG] = { 0x0, 0x100 }, [VIRT_MROM] = { 0x1000, 0xf000 }, @@ -238,12 +223,15 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, int cpu; uint32_t cpu_phandle; MachineState *ms = MACHINE(s); - char *name, *cpu_name, *core_name, *intc_name, *sv_name; bool is_32_bit = riscv_is_32bit(&s->soc[0]); uint8_t satp_mode_max; for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) { RISCVCPU *cpu_ptr = &s->soc[socket].harts[cpu]; + g_autofree char *cpu_name = NULL; + g_autofree char *core_name = NULL; + g_autofree char *intc_name = NULL; + g_autofree char *sv_name = NULL; cpu_phandle = (*phandle)++; @@ -256,12 +244,9 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, sv_name = g_strdup_printf("riscv,%s", satp_mode_str(satp_mode_max, is_32_bit)); qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", sv_name); - g_free(sv_name); } - name = riscv_isa_string(cpu_ptr); - qemu_fdt_setprop_string(ms->fdt, cpu_name, "riscv,isa", name); - g_free(name); + riscv_isa_write_fdt(cpu_ptr, ms->fdt, cpu_name); if (cpu_ptr->cfg.ext_zicbom) { qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbom-block-size", @@ -273,6 +258,11 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, cpu_ptr->cfg.cboz_blocksize); } + if (cpu_ptr->cfg.ext_zicbop) { + qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbop-block-size", + cpu_ptr->cfg.cbop_blocksize); + } + qemu_fdt_setprop_string(ms->fdt, cpu_name, "compatible", "riscv"); qemu_fdt_setprop_string(ms->fdt, cpu_name, "status", "okay"); qemu_fdt_setprop_cell(ms->fdt, cpu_name, "reg", @@ -295,17 +285,13 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, core_name = g_strdup_printf("%s/core%d", clust_name, cpu); qemu_fdt_add_subnode(ms->fdt, core_name); qemu_fdt_setprop_cell(ms->fdt, core_name, "cpu", cpu_phandle); - - g_free(core_name); - g_free(intc_name); - g_free(cpu_name); } } static void create_fdt_socket_memory(RISCVVirtState *s, const MemMapEntry *memmap, int socket) { - char *mem_name; + g_autofree char *mem_name = NULL; uint64_t addr, size; MachineState *ms = MACHINE(s); @@ -317,7 +303,6 @@ static void create_fdt_socket_memory(RISCVVirtState *s, addr >> 32, addr, size >> 32, size); qemu_fdt_setprop_string(ms->fdt, mem_name, "device_type", "memory"); riscv_socket_fdt_write_id(ms, mem_name, socket); - g_free(mem_name); } static void create_fdt_socket_clint(RISCVVirtState *s, @@ -325,8 +310,8 @@ static void create_fdt_socket_clint(RISCVVirtState *s, uint32_t *intc_phandles) { int cpu; - char *clint_name; - uint32_t *clint_cells; + g_autofree char *clint_name = NULL; + g_autofree uint32_t *clint_cells = NULL; unsigned long clint_addr; MachineState *ms = MACHINE(s); static const char * const clint_compat[2] = { @@ -353,9 +338,6 @@ static void create_fdt_socket_clint(RISCVVirtState *s, qemu_fdt_setprop(ms->fdt, clint_name, "interrupts-extended", clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); riscv_socket_fdt_write_id(ms, clint_name, socket); - g_free(clint_name); - - g_free(clint_cells); } static void create_fdt_socket_aclint(RISCVVirtState *s, @@ -366,9 +348,9 @@ static void create_fdt_socket_aclint(RISCVVirtState *s, char *name; unsigned long addr, size; uint32_t aclint_cells_size; - uint32_t *aclint_mswi_cells; - uint32_t *aclint_sswi_cells; - uint32_t *aclint_mtimer_cells; + g_autofree uint32_t *aclint_mswi_cells = NULL; + g_autofree uint32_t *aclint_sswi_cells = NULL; + g_autofree uint32_t *aclint_mtimer_cells = NULL; MachineState *ms = MACHINE(s); aclint_mswi_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); @@ -440,10 +422,6 @@ static void create_fdt_socket_aclint(RISCVVirtState *s, riscv_socket_fdt_write_id(ms, name, socket); g_free(name); } - - g_free(aclint_mswi_cells); - g_free(aclint_mtimer_cells); - g_free(aclint_sswi_cells); } static void create_fdt_socket_plic(RISCVVirtState *s, @@ -452,32 +430,14 @@ static void create_fdt_socket_plic(RISCVVirtState *s, uint32_t *plic_phandles) { int cpu; - char *plic_name; - uint32_t *plic_cells; + g_autofree char *plic_name = NULL; + g_autofree uint32_t *plic_cells; unsigned long plic_addr; MachineState *ms = MACHINE(s); static const char * const plic_compat[2] = { "sifive,plic-1.0.0", "riscv,plic0" }; - if (kvm_enabled()) { - plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); - } else { - plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4); - } - - for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { - if (kvm_enabled()) { - plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); - plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT); - } else { - plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]); - plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT); - plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]); - plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT); - } - } - plic_phandles[socket] = (*phandle)++; plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket); plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr); @@ -490,8 +450,33 @@ static void create_fdt_socket_plic(RISCVVirtState *s, (char **)&plic_compat, ARRAY_SIZE(plic_compat)); qemu_fdt_setprop(ms->fdt, plic_name, "interrupt-controller", NULL, 0); - qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended", - plic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); + + if (kvm_enabled()) { + plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); + + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT); + } + + qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended", + plic_cells, + s->soc[socket].num_harts * sizeof(uint32_t) * 2); + } else { + plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4); + + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT); + plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT); + } + + qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended", + plic_cells, + s->soc[socket].num_harts * sizeof(uint32_t) * 4); + } + qemu_fdt_setprop_cells(ms->fdt, plic_name, "reg", 0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size); qemu_fdt_setprop_cell(ms->fdt, plic_name, "riscv,ndev", @@ -506,13 +491,9 @@ static void create_fdt_socket_plic(RISCVVirtState *s, memmap[VIRT_PLATFORM_BUS].size, VIRT_PLATFORM_BUS_IRQ); } - - g_free(plic_name); - - g_free(plic_cells); } -static uint32_t imsic_num_bits(uint32_t count) +uint32_t imsic_num_bits(uint32_t count) { uint32_t ret = 0; @@ -528,11 +509,12 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr, bool m_mode, uint32_t imsic_guest_bits) { int cpu, socket; - char *imsic_name; + g_autofree char *imsic_name = NULL; MachineState *ms = MACHINE(s); int socket_count = riscv_socket_count(ms); - uint32_t imsic_max_hart_per_socket; - uint32_t *imsic_cells, *imsic_regs, imsic_addr, imsic_size; + uint32_t imsic_max_hart_per_socket, imsic_addr, imsic_size; + g_autofree uint32_t *imsic_cells = NULL; + g_autofree uint32_t *imsic_regs = NULL; imsic_cells = g_new0(uint32_t, ms->smp.cpus * 2); imsic_regs = g_new0(uint32_t, socket_count * 4); @@ -584,10 +566,6 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr, IMSIC_MMIO_GROUP_MIN_SHIFT); } qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", msi_phandle); - - g_free(imsic_name); - g_free(imsic_regs); - g_free(imsic_cells); } static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap, @@ -619,12 +597,10 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket, bool m_mode, int num_harts) { int cpu; - char *aplic_name; - uint32_t *aplic_cells; + g_autofree char *aplic_name = NULL; + g_autofree uint32_t *aplic_cells = g_new0(uint32_t, num_harts * 2); MachineState *ms = MACHINE(s); - aplic_cells = g_new0(uint32_t, num_harts * 2); - for (cpu = 0; cpu < num_harts; cpu++) { aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT); @@ -659,9 +635,6 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket, riscv_socket_fdt_write_id(ms, aplic_name, socket); qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_phandle); - - g_free(aplic_name); - g_free(aplic_cells); } static void create_fdt_socket_aplic(RISCVVirtState *s, @@ -673,7 +646,7 @@ static void create_fdt_socket_aplic(RISCVVirtState *s, uint32_t *aplic_phandles, int num_harts) { - char *aplic_name; + g_autofree char *aplic_name = NULL; unsigned long aplic_addr; MachineState *ms = MACHINE(s); uint32_t aplic_m_phandle, aplic_s_phandle; @@ -708,23 +681,18 @@ static void create_fdt_socket_aplic(RISCVVirtState *s, VIRT_PLATFORM_BUS_IRQ); } - g_free(aplic_name); - aplic_phandles[socket] = aplic_s_phandle; } static void create_fdt_pmu(RISCVVirtState *s) { - char *pmu_name; + g_autofree char *pmu_name = g_strdup_printf("/pmu"); MachineState *ms = MACHINE(s); RISCVCPU hart = s->soc[0].harts[0]; - pmu_name = g_strdup_printf("/pmu"); qemu_fdt_add_subnode(ms->fdt, pmu_name); qemu_fdt_setprop_string(ms->fdt, pmu_name, "compatible", "riscv,pmu"); riscv_pmu_generate_fdt_node(ms->fdt, hart.pmu_avail_ctrs, pmu_name); - - g_free(pmu_name); } static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, @@ -734,15 +702,17 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, uint32_t *irq_virtio_phandle, uint32_t *msi_pcie_phandle) { - char *clust_name; int socket, phandle_pos; MachineState *ms = MACHINE(s); uint32_t msi_m_phandle = 0, msi_s_phandle = 0; - uint32_t *intc_phandles, xplic_phandles[MAX_NODES]; + uint32_t xplic_phandles[MAX_NODES]; + g_autofree uint32_t *intc_phandles = NULL; int socket_count = riscv_socket_count(ms); qemu_fdt_add_subnode(ms->fdt, "/cpus"); qemu_fdt_setprop_cell(ms->fdt, "/cpus", "timebase-frequency", + kvm_enabled() ? + kvm_riscv_get_timebase_frequency(first_cpu) : RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ); qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0); qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1); @@ -752,6 +722,7 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, phandle_pos = ms->smp.cpus; for (socket = (socket_count - 1); socket >= 0; socket--) { + g_autofree char *clust_name = NULL; phandle_pos -= s->soc[socket].num_harts; clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket); @@ -762,16 +733,12 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, create_fdt_socket_memory(s, memmap, socket); - g_free(clust_name); - - if (tcg_enabled()) { - if (s->have_aclint) { - create_fdt_socket_aclint(s, memmap, socket, - &intc_phandles[phandle_pos]); - } else { - create_fdt_socket_clint(s, memmap, socket, - &intc_phandles[phandle_pos]); - } + if (virt_aclint_allowed() && s->have_aclint) { + create_fdt_socket_aclint(s, memmap, socket, + &intc_phandles[phandle_pos]); + } else if (tcg_enabled()) { + create_fdt_socket_clint(s, memmap, socket, + &intc_phandles[phandle_pos]); } } @@ -806,8 +773,6 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, } } - g_free(intc_phandles); - if (kvm_enabled() && virt_use_kvm_aia(s)) { *irq_mmio_phandle = xplic_phandles[0]; *irq_virtio_phandle = xplic_phandles[0]; @@ -836,12 +801,12 @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap, uint32_t irq_virtio_phandle) { int i; - char *name; MachineState *ms = MACHINE(s); for (i = 0; i < VIRTIO_COUNT; i++) { - name = g_strdup_printf("/soc/virtio_mmio@%lx", + g_autofree char *name = g_strdup_printf("/soc/virtio_mmio@%lx", (long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size)); + qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_string(ms->fdt, name, "compatible", "virtio,mmio"); qemu_fdt_setprop_cells(ms->fdt, name, "reg", @@ -856,7 +821,6 @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap, qemu_fdt_setprop_cells(ms->fdt, name, "interrupts", VIRTIO_IRQ + i, 0x4); } - g_free(name); } } @@ -864,12 +828,11 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap, uint32_t irq_pcie_phandle, uint32_t msi_pcie_phandle) { - char *name; + g_autofree char *name = NULL; MachineState *ms = MACHINE(s); name = g_strdup_printf("/soc/pci@%lx", (long) memmap[VIRT_PCIE_ECAM].base); - qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_cell(ms->fdt, name, "#address-cells", FDT_PCI_ADDR_CELLS); qemu_fdt_setprop_cell(ms->fdt, name, "#interrupt-cells", @@ -898,7 +861,6 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap, 2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size); create_pcie_irq_map(s, ms->fdt, name, irq_pcie_phandle); - g_free(name); } static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap, @@ -945,7 +907,7 @@ static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap, static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap, uint32_t irq_mmio_phandle) { - char *name; + g_autofree char *name = NULL; MachineState *ms = MACHINE(s); name = g_strdup_printf("/soc/serial@%lx", (long)memmap[VIRT_UART0].base); @@ -963,13 +925,12 @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap, } qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", name); - g_free(name); } static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap, uint32_t irq_mmio_phandle) { - char *name; + g_autofree char *name = NULL; MachineState *ms = MACHINE(s); name = g_strdup_printf("/soc/rtc@%lx", (long)memmap[VIRT_RTC].base); @@ -985,41 +946,64 @@ static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap, } else { qemu_fdt_setprop_cells(ms->fdt, name, "interrupts", RTC_IRQ, 0x4); } - g_free(name); } static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap) { - char *name; MachineState *ms = MACHINE(s); hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2; hwaddr flashbase = virt_memmap[VIRT_FLASH].base; + g_autofree char *name = g_strdup_printf("/flash@%" PRIx64, flashbase); - name = g_strdup_printf("/flash@%" PRIx64, flashbase); qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_string(ms->fdt, name, "compatible", "cfi-flash"); qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg", 2, flashbase, 2, flashsize, 2, flashbase + flashsize, 2, flashsize); qemu_fdt_setprop_cell(ms->fdt, name, "bank-width", 4); - g_free(name); } static void create_fdt_fw_cfg(RISCVVirtState *s, const MemMapEntry *memmap) { - char *nodename; MachineState *ms = MACHINE(s); hwaddr base = memmap[VIRT_FW_CFG].base; hwaddr size = memmap[VIRT_FW_CFG].size; + g_autofree char *nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base); - nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base); qemu_fdt_add_subnode(ms->fdt, nodename); qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "qemu,fw-cfg-mmio"); qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, base, 2, size); qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); - g_free(nodename); +} + +static void create_fdt_virtio_iommu(RISCVVirtState *s, uint16_t bdf) +{ + const char compat[] = "virtio,pci-iommu\0pci1af4,1057"; + void *fdt = MACHINE(s)->fdt; + uint32_t iommu_phandle; + g_autofree char *iommu_node = NULL; + g_autofree char *pci_node = NULL; + + pci_node = g_strdup_printf("/soc/pci@%lx", + (long) virt_memmap[VIRT_PCIE_ECAM].base); + iommu_node = g_strdup_printf("%s/virtio_iommu@%x,%x", pci_node, + PCI_SLOT(bdf), PCI_FUNC(bdf)); + iommu_phandle = qemu_fdt_alloc_phandle(fdt); + + qemu_fdt_add_subnode(fdt, iommu_node); + + qemu_fdt_setprop(fdt, iommu_node, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(fdt, iommu_node, "reg", + 1, bdf << 8, 1, 0, 1, 0, + 1, 0, 1, 0); + qemu_fdt_setprop_cell(fdt, iommu_node, "#iommu-cells", 1); + qemu_fdt_setprop_cell(fdt, iommu_node, "phandle", iommu_phandle); + + qemu_fdt_setprop_cells(fdt, pci_node, "iommu-map", + 0, iommu_phandle, 0, bdf, + bdf + 1, iommu_phandle, bdf + 1, 0xffff - bdf); } static void finalize_fdt(RISCVVirtState *s) @@ -1046,6 +1030,7 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap) { MachineState *ms = MACHINE(s); uint8_t rng_seed[32]; + g_autofree char *name = NULL; ms->fdt = create_device_tree(&s->fdt_size); if (!ms->fdt) { @@ -1064,6 +1049,13 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap) qemu_fdt_setprop_cell(ms->fdt, "/soc", "#size-cells", 0x2); qemu_fdt_setprop_cell(ms->fdt, "/soc", "#address-cells", 0x2); + /* + * The "/soc/pci@..." node is needed for PCIE hotplugs + * that might happen before finalize_fdt(). + */ + name = g_strdup_printf("/soc/pci@%lx", (long) memmap[VIRT_PCIE_ECAM].base); + qemu_fdt_add_subnode(ms->fdt, name); + qemu_fdt_add_subnode(ms->fdt, "/chosen"); /* Pass seed to RNG */ @@ -1077,21 +1069,45 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap) } static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, - hwaddr ecam_base, hwaddr ecam_size, - hwaddr mmio_base, hwaddr mmio_size, - hwaddr high_mmio_base, - hwaddr high_mmio_size, - hwaddr pio_base, - DeviceState *irqchip) + DeviceState *irqchip, + RISCVVirtState *s) { DeviceState *dev; MemoryRegion *ecam_alias, *ecam_reg; MemoryRegion *mmio_alias, *high_mmio_alias, *mmio_reg; + hwaddr ecam_base = s->memmap[VIRT_PCIE_ECAM].base; + hwaddr ecam_size = s->memmap[VIRT_PCIE_ECAM].size; + hwaddr mmio_base = s->memmap[VIRT_PCIE_MMIO].base; + hwaddr mmio_size = s->memmap[VIRT_PCIE_MMIO].size; + hwaddr high_mmio_base = virt_high_pcie_memmap.base; + hwaddr high_mmio_size = virt_high_pcie_memmap.size; + hwaddr pio_base = s->memmap[VIRT_PCIE_PIO].base; + hwaddr pio_size = s->memmap[VIRT_PCIE_PIO].size; qemu_irq irq; int i; dev = qdev_new(TYPE_GPEX_HOST); + /* Set GPEX object properties for the virt machine */ + object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_BASE, + ecam_base, NULL); + object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_SIZE, + ecam_size, NULL); + object_property_set_uint(OBJECT(GPEX_HOST(dev)), + PCI_HOST_BELOW_4G_MMIO_BASE, + mmio_base, NULL); + object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_BELOW_4G_MMIO_SIZE, + mmio_size, NULL); + object_property_set_uint(OBJECT(GPEX_HOST(dev)), + PCI_HOST_ABOVE_4G_MMIO_BASE, + high_mmio_base, NULL); + object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ABOVE_4G_MMIO_SIZE, + high_mmio_size, NULL); + object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_BASE, + pio_base, NULL); + object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_SIZE, + pio_size, NULL); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); ecam_alias = g_new0(MemoryRegion, 1); @@ -1122,6 +1138,7 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i); } + GPEX_HOST(dev)->gpex_cfg.bus = PCI_HOST_BRIDGE(GPEX_HOST(dev))->bus; return dev; } @@ -1141,7 +1158,7 @@ static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket, int base_hartid, int hart_count) { DeviceState *ret; - char *plic_hart_config; + g_autofree char *plic_hart_config = NULL; /* Per-socket PLIC hart topology configuration string */ plic_hart_config = riscv_plic_hart_config_string(hart_count); @@ -1160,8 +1177,6 @@ static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket, VIRT_PLIC_CONTEXT_STRIDE, memmap[VIRT_PLIC].size); - g_free(plic_hart_config); - return ret; } @@ -1249,6 +1264,45 @@ static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip) sysbus_mmio_get_region(sysbus, 0)); } +static void virt_build_smbios(RISCVVirtState *s) +{ + MachineClass *mc = MACHINE_GET_CLASS(s); + MachineState *ms = MACHINE(s); + uint8_t *smbios_tables, *smbios_anchor; + size_t smbios_tables_len, smbios_anchor_len; + struct smbios_phys_mem_area mem_array; + const char *product = "QEMU Virtual Machine"; + + if (kvm_enabled()) { + product = "KVM Virtual Machine"; + } + + smbios_set_defaults("QEMU", product, mc->name, true); + + if (riscv_is_32bit(&s->soc[0])) { + smbios_set_default_processor_family(0x200); + } else { + smbios_set_default_processor_family(0x201); + } + + /* build the array of physical mem area from base_memmap */ + mem_array.address = s->memmap[VIRT_DRAM].base; + mem_array.length = ms->ram_size; + + smbios_get_tables(ms, SMBIOS_ENTRY_POINT_TYPE_64, + &mem_array, 1, + &smbios_tables, &smbios_tables_len, + &smbios_anchor, &smbios_anchor_len, + &error_fatal); + + if (smbios_anchor) { + fw_cfg_add_file(s->fw_cfg, "etc/smbios/smbios-tables", + smbios_tables, smbios_tables_len); + fw_cfg_add_file(s->fw_cfg, "etc/smbios/smbios-anchor", + smbios_anchor, smbios_anchor_len); + } +} + static void virt_machine_done(Notifier *notifier, void *data) { RISCVVirtState *s = container_of(notifier, RISCVVirtState, @@ -1337,6 +1391,8 @@ static void virt_machine_done(Notifier *notifier, void *data) riscv_setup_direct_kernel(kernel_entry, fdt_load_addr); } + virt_build_smbios(s); + if (virt_is_acpi_enabled(s)) { virt_acpi_setup(s); } @@ -1348,7 +1404,6 @@ static void virt_machine_init(MachineState *machine) RISCVVirtState *s = RISCV_VIRT_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *mask_rom = g_new(MemoryRegion, 1); - char *soc_name; DeviceState *mmio_irqchip, *virtio_irqchip, *pcie_irqchip; int i, base_hartid, hart_count; int socket_count = riscv_socket_count(machine); @@ -1360,7 +1415,7 @@ static void virt_machine_init(MachineState *machine) exit(1); } - if (!tcg_enabled() && s->have_aclint) { + if (!virt_aclint_allowed() && s->have_aclint) { error_report("'aclint' is only available with TCG acceleration"); exit(1); } @@ -1368,6 +1423,8 @@ static void virt_machine_init(MachineState *machine) /* Initialize sockets */ mmio_irqchip = virtio_irqchip = pcie_irqchip = NULL; for (i = 0; i < socket_count; i++) { + g_autofree char *soc_name = g_strdup_printf("soc%d", i); + if (!riscv_socket_check_hartids(machine, i)) { error_report("discontinuous hartids in socket%d", i); exit(1); @@ -1385,10 +1442,8 @@ static void virt_machine_init(MachineState *machine) exit(1); } - soc_name = g_strdup_printf("soc%d", i); object_initialize_child(OBJECT(machine), soc_name, &s->soc[i], TYPE_RISCV_HART_ARRAY); - g_free(soc_name); object_property_set_str(OBJECT(&s->soc[i]), "cpu-type", machine->cpu_type, &error_abort); object_property_set_int(OBJECT(&s->soc[i]), "hartid-base", @@ -1397,23 +1452,22 @@ static void virt_machine_init(MachineState *machine) hart_count, &error_abort); sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_fatal); - if (tcg_enabled()) { - if (s->have_aclint) { - if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { - /* Per-socket ACLINT MTIMER */ - riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + + if (virt_aclint_allowed() && s->have_aclint) { + if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { + /* Per-socket ACLINT MTIMER */ + riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE, RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count, RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true); - } else { - /* Per-socket ACLINT MSWI, MTIMER, and SSWI */ - riscv_aclint_swi_create(memmap[VIRT_CLINT].base + + } else { + /* Per-socket ACLINT MSWI, MTIMER, and SSWI */ + riscv_aclint_swi_create(memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size, base_hartid, hart_count, false); - riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + + riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE, RISCV_ACLINT_DEFAULT_MTIMER_SIZE, @@ -1421,21 +1475,20 @@ static void virt_machine_init(MachineState *machine) RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true); - riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base + + riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base + i * memmap[VIRT_ACLINT_SSWI].size, base_hartid, hart_count, true); - } - } else { - /* Per-socket SiFive CLINT */ - riscv_aclint_swi_create( + } + } else if (tcg_enabled()) { + /* Per-socket SiFive CLINT */ + riscv_aclint_swi_create( memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size, base_hartid, hart_count, false); - riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + + riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE, RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count, RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true); - } } /* Per-socket interrupt controller */ @@ -1517,15 +1570,7 @@ static void virt_machine_init(MachineState *machine) qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i)); } - gpex_pcie_init(system_memory, - memmap[VIRT_PCIE_ECAM].base, - memmap[VIRT_PCIE_ECAM].size, - memmap[VIRT_PCIE_MMIO].base, - memmap[VIRT_PCIE_MMIO].size, - virt_high_pcie_memmap.base, - virt_high_pcie_memmap.size, - memmap[VIRT_PCIE_PIO].base, - pcie_irqchip); + gpex_pcie_init(system_memory, pcie_irqchip, s); create_platform_bus(s, mmio_irqchip); @@ -1668,7 +1713,8 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, { MachineClass *mc = MACHINE_GET_CLASS(machine); - if (device_is_dynamic_sysbus(mc, dev)) { + if (device_is_dynamic_sysbus(mc, dev) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { return HOTPLUG_HANDLER(machine); } return NULL; @@ -1687,6 +1733,10 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev, SYS_BUS_DEVICE(dev)); } } + + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + create_fdt_virtio_iommu(s, pci_get_bdf(PCI_DEVICE(dev))); + } } static void virt_machine_class_init(ObjectClass *oc, void *data) diff --git a/hw/rtc/allwinner-rtc.c b/hw/rtc/allwinner-rtc.c index 7e493f0e79d..2ac50b30cb8 100644 --- a/hw/rtc/allwinner-rtc.c +++ b/hw/rtc/allwinner-rtc.c @@ -305,7 +305,7 @@ static const VMStateDescription allwinner_rtc_vmstate = { .name = "allwinner-rtc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwRtcState, AW_RTC_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/rtc/aspeed_rtc.c b/hw/rtc/aspeed_rtc.c index fa861e2d494..589d9a5a7a8 100644 --- a/hw/rtc/aspeed_rtc.c +++ b/hw/rtc/aspeed_rtc.c @@ -137,7 +137,7 @@ static const MemoryRegionOps aspeed_rtc_ops = { static const VMStateDescription vmstate_aspeed_rtc = { .name = TYPE_ASPEED_RTC, .version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, AspeedRtcState, 0x18), VMSTATE_INT64(offset, AspeedRtcState), VMSTATE_END_OF_LIST() diff --git a/hw/rtc/ds1338.c b/hw/rtc/ds1338.c index 36d8121ddda..e479661c391 100644 --- a/hw/rtc/ds1338.c +++ b/hw/rtc/ds1338.c @@ -46,7 +46,7 @@ static const VMStateDescription vmstate_ds1338 = { .name = "ds1338", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, DS1338State), VMSTATE_INT64(offset, DS1338State), VMSTATE_UINT8_V(wday_offset, DS1338State, 2), diff --git a/hw/rtc/exynos4210_rtc.c b/hw/rtc/exynos4210_rtc.c index cc7101c530a..319371f97d1 100644 --- a/hw/rtc/exynos4210_rtc.c +++ b/hw/rtc/exynos4210_rtc.c @@ -122,7 +122,7 @@ static const VMStateDescription vmstate_exynos4210_rtc_state = { .name = "exynos4210.rtc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(reg_intp, Exynos4210RTCState), VMSTATE_UINT32(reg_rtccon, Exynos4210RTCState), VMSTATE_UINT32(reg_ticcnt, Exynos4210RTCState), diff --git a/hw/rtc/goldfish_rtc.c b/hw/rtc/goldfish_rtc.c index 19a56402a0c..01acf30b278 100644 --- a/hw/rtc/goldfish_rtc.c +++ b/hw/rtc/goldfish_rtc.c @@ -242,7 +242,7 @@ static const VMStateDescription goldfish_rtc_vmstate = { .version_id = 2, .pre_save = goldfish_rtc_pre_save, .post_load = goldfish_rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tick_offset_vmstate, GoldfishRTCState), VMSTATE_UINT64(alarm_next, GoldfishRTCState), VMSTATE_UINT32(alarm_running, GoldfishRTCState), diff --git a/hw/rtc/ls7a_rtc.c b/hw/rtc/ls7a_rtc.c index 1f9e38a735b..ac28c1165bf 100644 --- a/hw/rtc/ls7a_rtc.c +++ b/hw/rtc/ls7a_rtc.c @@ -454,7 +454,7 @@ static const VMStateDescription vmstate_ls7a_rtc = { .minimum_version_id = 1, .pre_save = ls7a_rtc_pre_save, .post_load = ls7a_rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(offset_toy, LS7ARtcState), VMSTATE_INT64(offset_rtc, LS7ARtcState), VMSTATE_UINT32_ARRAY(toymatch, LS7ARtcState, TIMER_NUMS), diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c index 2e2c849985c..1585a2d3997 100644 --- a/hw/rtc/m48t59.c +++ b/hw/rtc/m48t59.c @@ -36,6 +36,7 @@ #include "qemu/bcd.h" #include "qemu/module.h" #include "trace.h" +#include "sysemu/watchdog.h" #include "m48t59-internal.h" #include "migration/vmstate.h" @@ -163,8 +164,7 @@ static void watchdog_cb (void *opaque) if (NVRAM->buffer[0x1FF7] & 0x80) { NVRAM->buffer[0x1FF7] = 0x00; NVRAM->buffer[0x1FFC] &= ~0x40; - /* May it be a hw CPU Reset instead ? */ - qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + watchdog_perform_action(); } else { qemu_set_irq(NVRAM->IRQ, 1); qemu_set_irq(NVRAM->IRQ, 0); @@ -526,7 +526,7 @@ static const VMStateDescription vmstate_m48t59 = { .name = "m48t59", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(lock, M48t59State), VMSTATE_UINT16(addr, M48t59State), VMSTATE_VBUFFER_UINT32(buffer, M48t59State, 0, NULL, size), diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c index 2d391a83969..f4c18692325 100644 --- a/hw/rtc/mc146818rtc.c +++ b/hw/rtc/mc146818rtc.c @@ -817,7 +817,7 @@ static const VMStateDescription vmstate_rtc_irq_reinject_on_ack_count = { .version_id = 1, .minimum_version_id = 1, .needed = rtc_irq_reinject_on_ack_count_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(irq_reinject_on_ack_count, MC146818RtcState), VMSTATE_END_OF_LIST() } @@ -829,7 +829,7 @@ static const VMStateDescription vmstate_rtc = { .minimum_version_id = 1, .pre_save = rtc_pre_save, .post_load = rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(cmos_data, MC146818RtcState), VMSTATE_UINT8(cmos_index, MC146818RtcState), VMSTATE_UNUSED(7*4), @@ -845,7 +845,7 @@ static const VMStateDescription vmstate_rtc = { VMSTATE_UINT64_V(next_alarm_time, MC146818RtcState, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_rtc_irq_reinject_on_ack_count, NULL } diff --git a/hw/rtc/pl031.c b/hw/rtc/pl031.c index 2f3cd04eeb4..563bb4b446e 100644 --- a/hw/rtc/pl031.c +++ b/hw/rtc/pl031.c @@ -291,7 +291,7 @@ static const VMStateDescription vmstate_pl031_tick_offset = { .minimum_version_id = 1, .needed = pl031_tick_offset_needed, .post_load = pl031_tick_offset_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tick_offset, PL031State), VMSTATE_END_OF_LIST() } @@ -304,7 +304,7 @@ static const VMStateDescription vmstate_pl031 = { .pre_save = pl031_pre_save, .pre_load = pl031_pre_load, .post_load = pl031_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tick_offset_vmstate, PL031State), VMSTATE_UINT32(mr, PL031State), VMSTATE_UINT32(lr, PL031State), @@ -313,7 +313,7 @@ static const VMStateDescription vmstate_pl031 = { VMSTATE_UINT32(is, PL031State), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_pl031_tick_offset, NULL } diff --git a/hw/rtc/sun4v-rtc.c b/hw/rtc/sun4v-rtc.c index e037acd1b56..ffcc0aa25d9 100644 --- a/hw/rtc/sun4v-rtc.c +++ b/hw/rtc/sun4v-rtc.c @@ -5,7 +5,7 @@ * * Copyright (c) 2016 Artyom Tarasenko * - * This code is licensed under the GNU GPL v3 or (at your option) any later + * This code is licensed under the GNU GPL v2 or (at your option) any later * version. */ diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c index 64c61c3daeb..efd19a76e61 100644 --- a/hw/rtc/twl92230.c +++ b/hw/rtc/twl92230.c @@ -768,7 +768,7 @@ static const VMStateDescription vmstate_menelaus_tm = { .name = "menelaus_tm", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_HACK(tm_sec, struct tm), VMSTATE_UINT16_HACK(tm_min, struct tm), VMSTATE_UINT16_HACK(tm_hour, struct tm), @@ -810,7 +810,7 @@ static const VMStateDescription vmstate_menelaus = { .minimum_version_id = 0, .pre_save = menelaus_pre_save, .post_load = menelaus_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(firstbyte, MenelausState), VMSTATE_UINT8(reg, MenelausState), VMSTATE_UINT8_ARRAY(vcore, MenelausState, 5), diff --git a/hw/rtc/xlnx-zynqmp-rtc.c b/hw/rtc/xlnx-zynqmp-rtc.c index 3e7d61a41c1..613c6407a60 100644 --- a/hw/rtc/xlnx-zynqmp-rtc.c +++ b/hw/rtc/xlnx-zynqmp-rtc.c @@ -244,7 +244,7 @@ static const VMStateDescription vmstate_rtc = { .minimum_version_id = 1, .pre_save = rtc_pre_save, .post_load = rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPRTC, XLNX_ZYNQMP_RTC_R_MAX), VMSTATE_UINT32(tick_offset, XlnxZynqMPRTC), VMSTATE_END_OF_LIST(), diff --git a/hw/rx/rx-gdbsim.c b/hw/rx/rx-gdbsim.c index 47c17026c73..bb4746c5569 100644 --- a/hw/rx/rx-gdbsim.c +++ b/hw/rx/rx-gdbsim.c @@ -20,6 +20,7 @@ #include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/guest-random.h" +#include "qemu/units.h" #include "qapi/error.h" #include "hw/loader.h" #include "hw/rx/rx62n.h" diff --git a/hw/rx/rx62n.c b/hw/rx/rx62n.c index 4dc44afd9d4..560f53a58a6 100644 --- a/hw/rx/rx62n.c +++ b/hw/rx/rx62n.c @@ -23,6 +23,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/error-report.h" +#include "qemu/units.h" #include "hw/rx/rx62n.h" #include "hw/loader.h" #include "hw/sysbus.h" @@ -147,14 +148,11 @@ static void register_icu(RX62NState *s) qlist_append_int(trigger_level, levelirq[i]); } qdev_prop_set_array(DEVICE(icu), "trigger-level", trigger_level); - - for (i = 0; i < NR_IRQS; i++) { - s->irq[i] = qdev_get_gpio_in(DEVICE(icu), i); - } sysbus_realize(icu, &error_abort); + sysbus_connect_irq(icu, 0, qdev_get_gpio_in(DEVICE(&s->cpu), RX_CPU_IRQ)); sysbus_connect_irq(icu, 1, qdev_get_gpio_in(DEVICE(&s->cpu), RX_CPU_FIR)); - sysbus_connect_irq(icu, 2, s->irq[SWI]); + sysbus_connect_irq(icu, 2, qdev_get_gpio_in(DEVICE(&s->icu), SWI)); sysbus_mmio_map(icu, 0, RX62N_ICU_BASE); } @@ -171,7 +169,8 @@ static void register_tmr(RX62NState *s, int unit) irqbase = RX62N_TMR_IRQ + TMR_NR_IRQ * unit; for (i = 0; i < TMR_NR_IRQ; i++) { - sysbus_connect_irq(tmr, i, s->irq[irqbase + i]); + sysbus_connect_irq(tmr, i, + qdev_get_gpio_in(DEVICE(&s->icu), irqbase + i)); } sysbus_mmio_map(tmr, 0, RX62N_TMR_BASE + unit * 0x10); } @@ -189,7 +188,8 @@ static void register_cmt(RX62NState *s, int unit) irqbase = RX62N_CMT_IRQ + CMT_NR_IRQ * unit; for (i = 0; i < CMT_NR_IRQ; i++) { - sysbus_connect_irq(cmt, i, s->irq[irqbase + i]); + sysbus_connect_irq(cmt, i, + qdev_get_gpio_in(DEVICE(&s->icu), irqbase + i)); } sysbus_mmio_map(cmt, 0, RX62N_CMT_BASE + unit * 0x10); } @@ -208,7 +208,8 @@ static void register_sci(RX62NState *s, int unit) irqbase = RX62N_SCI_IRQ + SCI_NR_IRQ * unit; for (i = 0; i < SCI_NR_IRQ; i++) { - sysbus_connect_irq(sci, i, s->irq[irqbase + i]); + sysbus_connect_irq(sci, i, + qdev_get_gpio_in(DEVICE(&s->icu), irqbase + i)); } sysbus_mmio_map(sci, 0, RX62N_SCI_BASE + unit * 0x08); } diff --git a/hw/s390x/Kconfig b/hw/s390x/Kconfig index 4c068d7960b..26ad1044858 100644 --- a/hw/s390x/Kconfig +++ b/hw/s390x/Kconfig @@ -6,6 +6,7 @@ config S390_CCW_VIRTIO imply VFIO_CCW imply WDT_DIAG288 imply PCIE_DEVICES + imply IOMMUFD select PCI_EXPRESS select S390_FLIC select S390_FLIC_KVM if KVM diff --git a/hw/s390x/ccw-device.c b/hw/s390x/ccw-device.c index 95f269ab441..fb8c1acc64d 100644 --- a/hw/s390x/ccw-device.c +++ b/hw/s390x/ccw-device.c @@ -66,7 +66,7 @@ const VMStateDescription vmstate_ccw_dev = { .name = "s390_ccw_dev", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER(sch, CcwDevice, vmstate_subch_dev, SubchDev), VMSTATE_END_OF_LIST() } diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c index 15d26efc951..34639f21435 100644 --- a/hw/s390x/css-bridge.c +++ b/hw/s390x/css-bridge.c @@ -56,7 +56,7 @@ static void ccw_device_unplug(HotplugHandler *hotplug_dev, qdev_unrealize(dev); } -static void virtual_css_bus_reset(BusState *qbus) +static void virtual_css_bus_reset_hold(Object *obj) { /* This should actually be modelled via the generic css */ css_reset(); @@ -81,8 +81,9 @@ static char *virtual_css_bus_get_dev_path(DeviceState *dev) static void virtual_css_bus_class_init(ObjectClass *klass, void *data) { BusClass *k = BUS_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); - k->reset = virtual_css_bus_reset; + rc->phases.hold = virtual_css_bus_reset_hold; k->get_dev_path = virtual_css_bus_get_dev_path; } diff --git a/hw/s390x/css.c b/hw/s390x/css.c index bcedec2fc82..295530963a6 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -32,7 +32,7 @@ static const VMStateDescription vmstate_crw = { .name = "s390_crw", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(flags, CRW), VMSTATE_UINT16(rsid, CRW), VMSTATE_END_OF_LIST() @@ -43,7 +43,7 @@ static const VMStateDescription vmstate_crw_container = { .name = "s390_crw_container", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(crw, CrwContainer, 0, vmstate_crw, CRW), VMSTATE_END_OF_LIST() }, @@ -59,7 +59,7 @@ static const VMStateDescription vmstate_chp_info = { .name = "s390_chp_info", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(in_use, ChpInfo), VMSTATE_UINT8(type, ChpInfo), VMSTATE_UINT8(is_virtual, ChpInfo), @@ -77,7 +77,7 @@ static const VMStateDescription vmstate_scsw = { .name = "s390_scsw", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(flags, SCSW), VMSTATE_UINT16(ctrl, SCSW), VMSTATE_UINT32(cpa, SCSW), @@ -92,7 +92,7 @@ static const VMStateDescription vmstate_pmcw = { .name = "s390_pmcw", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(intparm, PMCW), VMSTATE_UINT16(flags, PMCW), VMSTATE_UINT16(devno, PMCW), @@ -113,7 +113,7 @@ static const VMStateDescription vmstate_schib = { .name = "s390_schib", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(pmcw, SCHIB, 0, vmstate_pmcw, PMCW), VMSTATE_STRUCT(scsw, SCHIB, 0, vmstate_scsw, SCSW), VMSTATE_UINT64(mba, SCHIB), @@ -127,7 +127,7 @@ static const VMStateDescription vmstate_ccw1 = { .name = "s390_ccw1", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(cmd_code, CCW1), VMSTATE_UINT8(flags, CCW1), VMSTATE_UINT16(count, CCW1), @@ -140,7 +140,7 @@ static const VMStateDescription vmstate_ciw = { .name = "s390_ciw", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(type, CIW), VMSTATE_UINT8(command, CIW), VMSTATE_UINT16(count, CIW), @@ -152,7 +152,7 @@ static const VMStateDescription vmstate_sense_id = { .name = "s390_sense_id", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(reserved, SenseId), VMSTATE_UINT16(cu_type, SenseId), VMSTATE_UINT8(cu_model, SenseId), @@ -168,7 +168,7 @@ static const VMStateDescription vmstate_orb = { .name = "s390_orb", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(intparm, ORB), VMSTATE_UINT16(ctrl0, ORB), VMSTATE_UINT8(lpm, ORB), @@ -188,7 +188,7 @@ static const VMStateDescription vmstate_schdev_orb = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_schdev_orb_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(orb, SubchDev, 1, vmstate_orb, ORB), VMSTATE_END_OF_LIST() } @@ -207,7 +207,7 @@ const VMStateDescription vmstate_subch_dev = { .minimum_version_id = 1, .post_load = subch_dev_post_load, .pre_save = subch_dev_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_EQUAL(cssid, SubchDev, "Bug!"), VMSTATE_UINT8_EQUAL(ssid, SubchDev, "Bug!"), VMSTATE_UINT16(migrated_schid, SubchDev), @@ -223,7 +223,7 @@ const VMStateDescription vmstate_subch_dev = { VMSTATE_UINT8(ccw_no_data_cnt, SubchDev), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_schdev_orb, NULL } @@ -264,12 +264,12 @@ static int pre_save_ind_addr(void *opaque) return 0; } -const VMStateDescription vmstate_ind_addr_tmp = { +static const VMStateDescription vmstate_ind_addr_tmp = { .name = "s390_ind_addr_tmp", .pre_save = pre_save_ind_addr, .post_load = post_load_ind_addr, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(len, IndAddrPtrTmp), VMSTATE_UINT64(addr, IndAddrPtrTmp), VMSTATE_END_OF_LIST() @@ -278,7 +278,7 @@ const VMStateDescription vmstate_ind_addr_tmp = { const VMStateDescription vmstate_ind_addr = { .name = "s390_ind_addr_tmp", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP(IndAddr*, IndAddrPtrTmp, vmstate_ind_addr_tmp), VMSTATE_END_OF_LIST() } @@ -293,7 +293,7 @@ static const VMStateDescription vmstate_css_img = { .name = "s390_css_img", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Subchannel sets have no relevant state. */ VMSTATE_STRUCT_ARRAY(chpids, CssImage, MAX_CHPID + 1, 0, vmstate_chp_info, ChpInfo), @@ -330,7 +330,7 @@ static const VMStateDescription vmstate_css = { .name = "s390_css", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_QTAILQ_V(pending_crws, ChannelSubSys, 1, vmstate_crw_container, CrwContainer, sibling), VMSTATE_BOOL(sei_pending, ChannelSubSys), diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c index 6891e3cd73b..f9829de9532 100644 --- a/hw/s390x/event-facility.c +++ b/hw/s390x/event-facility.c @@ -367,7 +367,7 @@ static const VMStateDescription vmstate_event_facility_mask64 = { .version_id = 0, .minimum_version_id = 0, .needed = vmstate_event_facility_mask64_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_LOWER], SCLPEventFacility), VMSTATE_END_OF_LIST() } @@ -378,7 +378,7 @@ static const VMStateDescription vmstate_event_facility_mask_length = { .version_id = 0, .minimum_version_id = 0, .needed = vmstate_event_facility_mask_length_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(mask_length, SCLPEventFacility), VMSTATE_END_OF_LIST() } @@ -388,11 +388,11 @@ static const VMStateDescription vmstate_event_facility = { .name = "vmstate-event-facility", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_UPPER], SCLPEventFacility), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_event_facility_mask64, &vmstate_event_facility_mask_length, NULL diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c index 515dcf51b5f..e934bf89d15 100644 --- a/hw/s390x/ipl.c +++ b/hw/s390x/ipl.c @@ -35,7 +35,6 @@ #include "qemu/cutils.h" #include "qemu/option.h" #include "standard-headers/linux/virtio_ids.h" -#include "exec/exec-all.h" #define KERN_IMAGE_START 0x010000UL #define LINUX_MAGIC_ADDR 0x010008UL @@ -60,7 +59,7 @@ static const VMStateDescription vmstate_iplb_extended = { .version_id = 0, .minimum_version_id = 0, .needed = iplb_extended_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(reserved_ext, IplParameterBlock, 4096 - 200), VMSTATE_END_OF_LIST() } @@ -70,13 +69,13 @@ static const VMStateDescription vmstate_iplb = { .name = "ipl/iplb", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(reserved1, IplParameterBlock, 110), VMSTATE_UINT16(devno, IplParameterBlock), VMSTATE_UINT8_ARRAY(reserved2, IplParameterBlock, 88), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_iplb_extended, NULL } @@ -86,7 +85,7 @@ static const VMStateDescription vmstate_ipl = { .name = "ipl", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(compat_start_addr, S390IPLState), VMSTATE_UINT64(compat_bios_start_addr, S390IPLState), VMSTATE_STRUCT(iplb, S390IPLState, 0, vmstate_iplb, IplParameterBlock), @@ -703,7 +702,7 @@ static void s390_ipl_prepare_qipl(S390CPU *cpu) cpu_physical_memory_unmap(addr, len, 1, len); } -int s390_ipl_prepare_pv_header(void) +int s390_ipl_prepare_pv_header(Error **errp) { IplParameterBlock *ipib = s390_ipl_get_iplb_pv(); IPLBlockPV *ipib_pv = &ipib->pv; @@ -712,8 +711,7 @@ int s390_ipl_prepare_pv_header(void) cpu_physical_memory_read(ipib_pv->pv_header_addr, hdr, ipib_pv->pv_header_len); - rc = s390_pv_set_sec_parms((uintptr_t)hdr, - ipib_pv->pv_header_len); + rc = s390_pv_set_sec_parms((uintptr_t)hdr, ipib_pv->pv_header_len, errp); g_free(hdr); return rc; } diff --git a/hw/s390x/ipl.h b/hw/s390x/ipl.h index 7fc86e79054..57cd1257697 100644 --- a/hw/s390x/ipl.h +++ b/hw/s390x/ipl.h @@ -107,7 +107,7 @@ typedef union IplParameterBlock IplParameterBlock; int s390_ipl_set_loadparm(uint8_t *loadparm); void s390_ipl_update_diag308(IplParameterBlock *iplb); -int s390_ipl_prepare_pv_header(void); +int s390_ipl_prepare_pv_header(Error **errp); int s390_ipl_pv_unpack(void); void s390_ipl_prepare_cpu(S390CPU *cpu); IplParameterBlock *s390_ipl_get_iplb(void); diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c index e2d86d96e72..5261e66724f 100644 --- a/hw/s390x/s390-ccw.c +++ b/hw/s390x/s390-ccw.c @@ -76,7 +76,9 @@ static void s390_ccw_get_dev_info(S390CCWDevice *cdev, Error **errp) { unsigned int cssid, ssid, devid; - char dev_path[PATH_MAX] = {0}, *tmp; + char dev_path[PATH_MAX] = {0}; + g_autofree char *tmp_dir = NULL; + g_autofree char *tmp = NULL; if (!sysfsdev) { error_setg(errp, "No host device provided"); @@ -92,7 +94,8 @@ static void s390_ccw_get_dev_info(S390CCWDevice *cdev, cdev->mdevid = g_path_get_basename(dev_path); - tmp = basename(dirname(dev_path)); + tmp_dir = g_path_get_dirname(dev_path); + tmp = g_path_get_basename(tmp_dir); if (sscanf(tmp, "%2x.%1x.%4x", &cssid, &ssid, &devid) != 3) { error_setg_errno(errp, errno, "Failed to read %s", tmp); return; diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c index 8f5159d85dc..5c535d483e9 100644 --- a/hw/s390x/s390-skeys.c +++ b/hw/s390x/s390-skeys.c @@ -153,7 +153,7 @@ void qmp_dump_skeys(const char *filename, Error **errp) goto out; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); guest_phys_blocks_init(&guest_phys_blocks); guest_phys_blocks_append(&guest_phys_blocks); diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 2d6b86624f1..b1dcb3857f0 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -229,16 +229,9 @@ static void s390_init_ipl_dev(const char *kernel_filename, static void s390_create_virtio_net(BusState *bus, const char *name) { - int i; - - for (i = 0; i < nb_nics; i++) { - NICInfo *nd = &nd_table[i]; - DeviceState *dev; - - qemu_check_nic_model(nd, "virtio"); + DeviceState *dev; - dev = qdev_new(name); - qdev_set_nic_properties(dev, nd); + while ((dev = qemu_create_nic_device(name, true, "virtio"))) { qdev_realize_and_unref(dev, bus, &error_fatal); } } @@ -319,12 +312,12 @@ static void ccw_init(MachineState *machine) static void s390_cpu_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { + ERRP_GUARD(); MachineState *ms = MACHINE(hotplug_dev); S390CPU *cpu = S390_CPU(dev); - ERRP_GUARD(); g_assert(!ms->possible_cpus->cpus[cpu->env.core_id].cpu); - ms->possible_cpus->cpus[cpu->env.core_id].cpu = OBJECT(dev); + ms->possible_cpus->cpus[cpu->env.core_id].cpu = CPU(dev); if (s390_has_topology()) { s390_topology_setup_cpu(ms, cpu, errp); @@ -399,7 +392,7 @@ static int s390_machine_protect(S390CcwMachineState *ms) } /* Set SE header and unpack */ - rc = s390_ipl_prepare_pv_header(); + rc = s390_ipl_prepare_pv_header(&local_err); if (rc) { goto out_err; } @@ -418,6 +411,9 @@ static int s390_machine_protect(S390CcwMachineState *ms) return rc; out_err: + if (local_err) { + error_report_err(local_err); + } s390_machine_unprotect(ms); return rc; } @@ -863,14 +859,26 @@ bool css_migration_enabled(void) } \ type_init(ccw_machine_register_##suffix) +static void ccw_machine_9_0_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_9_0_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE(9_0, "9.0", true); + static void ccw_machine_8_2_instance_options(MachineState *machine) { + ccw_machine_9_0_instance_options(machine); } static void ccw_machine_8_2_class_options(MachineClass *mc) { + ccw_machine_9_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_CCW_MACHINE(8_2, "8.2", true); +DEFINE_CCW_MACHINE(8_2, "8.2", false); static void ccw_machine_8_1_instance_options(MachineState *machine) { diff --git a/hw/s390x/sclpcpu.c b/hw/s390x/sclpcpu.c index f2b1a4b0371..fa79891f5a4 100644 --- a/hw/s390x/sclpcpu.c +++ b/hw/s390x/sclpcpu.c @@ -73,7 +73,7 @@ static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr, return 1; } -static void cpu_class_init(ObjectClass *oc, void *data) +static void sclp_cpu_class_init(ObjectClass *oc, void *data) { SCLPEventClass *k = SCLP_EVENT_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -94,7 +94,7 @@ static const TypeInfo sclp_cpu_info = { .name = TYPE_SCLP_CPU_HOTPLUG, .parent = TYPE_SCLP_EVENT, .instance_size = sizeof(SCLPEvent), - .class_init = cpu_class_init, + .class_init = sclp_cpu_class_init, .class_size = sizeof(SCLPEventClass), }; diff --git a/hw/s390x/sclpquiesce.c b/hw/s390x/sclpquiesce.c index a641089929b..14936aa94ba 100644 --- a/hw/s390x/sclpquiesce.c +++ b/hw/s390x/sclpquiesce.c @@ -72,7 +72,7 @@ static const VMStateDescription vmstate_sclpquiesce = { .name = TYPE_SCLP_QUIESCE, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(event_pending, SCLPEvent), VMSTATE_END_OF_LIST() } diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 80453718a33..b4676909dd6 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -87,7 +87,7 @@ const VMStateDescription vmstate_virtio_ccw_dev_tmp = { .name = "s390_virtio_ccw_dev_tmp", .pre_save = virtio_ccw_dev_tmp_pre_save, .post_load = virtio_ccw_dev_tmp_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp), VMSTATE_END_OF_LIST() } @@ -98,7 +98,7 @@ const VMStateDescription vmstate_virtio_ccw_dev = { .version_id = 1, .minimum_version_id = 1, .post_load = virtio_ccw_dev_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice), VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice), VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice), diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c index 51f01579340..42d9d2e4835 100644 --- a/hw/scsi/esp-pci.c +++ b/hw/scsi/esp-pci.c @@ -364,7 +364,7 @@ static const VMStateDescription vmstate_esp_pci_scsi = { .version_id = 2, .minimum_version_id = 1, .pre_save = esp_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIESPState), VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)), VMSTATE_UINT8_V(esp.mig_version_id, PCIESPState, 2), diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c index 9b11d8c5738..5d9b52632e3 100644 --- a/hw/scsi/esp.c +++ b/hw/scsi/esp.c @@ -3,6 +3,7 @@ * * Copyright (c) 2005-2006 Fabrice Bellard * Copyright (c) 2012 Herve Poussineau + * Copyright (c) 2023 Mark Cave-Ayland * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -62,14 +63,38 @@ static void esp_lower_irq(ESPState *s) static void esp_raise_drq(ESPState *s) { - qemu_irq_raise(s->irq_data); - trace_esp_raise_drq(); + if (!(s->drq_state)) { + qemu_irq_raise(s->drq_irq); + trace_esp_raise_drq(); + s->drq_state = true; + } } static void esp_lower_drq(ESPState *s) { - qemu_irq_lower(s->irq_data); - trace_esp_lower_drq(); + if (s->drq_state) { + qemu_irq_lower(s->drq_irq); + trace_esp_lower_drq(); + s->drq_state = false; + } +} + +static const char *esp_phase_names[8] = { + "DATA OUT", "DATA IN", "COMMAND", "STATUS", + "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN" +}; + +static void esp_set_phase(ESPState *s, uint8_t phase) +{ + s->rregs[ESP_RSTAT] &= ~7; + s->rregs[ESP_RSTAT] |= phase; + + trace_esp_set_phase(esp_phase_names[phase]); +} + +static uint8_t esp_get_phase(ESPState *s) +{ + return s->rregs[ESP_RSTAT] & 7; } void esp_dma_enable(ESPState *s, int irq, int level) @@ -99,42 +124,117 @@ void esp_request_cancelled(SCSIRequest *req) } } -static void esp_fifo_push(Fifo8 *fifo, uint8_t val) +static void esp_update_drq(ESPState *s) { - if (fifo8_num_used(fifo) == fifo->capacity) { - trace_esp_error_fifo_overrun(); + bool to_device; + + switch (esp_get_phase(s)) { + case STAT_MO: + case STAT_CD: + case STAT_DO: + to_device = true; + break; + + case STAT_DI: + case STAT_ST: + case STAT_MI: + to_device = false; + break; + + default: return; } - fifo8_push(fifo, val); + if (s->dma) { + /* DMA request so update DRQ according to transfer direction */ + if (to_device) { + if (fifo8_num_free(&s->fifo) < 2) { + esp_lower_drq(s); + } else { + esp_raise_drq(s); + } + } else { + if (fifo8_num_used(&s->fifo) < 2) { + esp_lower_drq(s); + } else { + esp_raise_drq(s); + } + } + } else { + /* Not a DMA request */ + esp_lower_drq(s); + } } -static uint8_t esp_fifo_pop(Fifo8 *fifo) +static void esp_fifo_push(ESPState *s, uint8_t val) { - if (fifo8_is_empty(fifo)) { - return 0; + if (fifo8_num_used(&s->fifo) == s->fifo.capacity) { + trace_esp_error_fifo_overrun(); + } else { + fifo8_push(&s->fifo, val); + } + + esp_update_drq(s); +} + +static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len) +{ + fifo8_push_all(&s->fifo, buf, len); + esp_update_drq(s); +} + +static uint8_t esp_fifo_pop(ESPState *s) +{ + uint8_t val; + + if (fifo8_is_empty(&s->fifo)) { + val = 0; + } else { + val = fifo8_pop(&s->fifo); } - return fifo8_pop(fifo); + esp_update_drq(s); + return val; } -static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen) +static uint32_t esp_fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen) { const uint8_t *buf; - uint32_t n; + uint32_t n, n2; + int len; if (maxlen == 0) { return 0; } - buf = fifo8_pop_buf(fifo, maxlen, &n); + len = maxlen; + buf = fifo8_pop_buf(fifo, len, &n); if (dest) { memcpy(dest, buf, n); } + /* Add FIFO wraparound if needed */ + len -= n; + len = MIN(len, fifo8_num_used(fifo)); + if (len) { + buf = fifo8_pop_buf(fifo, len, &n2); + if (dest) { + memcpy(&dest[n], buf, n2); + } + n += n2; + } + return n; } +static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen) +{ + uint32_t len = esp_fifo8_pop_buf(&s->fifo, dest, maxlen); + + esp_update_drq(s); + return len; +} + static uint32_t esp_get_tc(ESPState *s) { uint32_t dmalen; @@ -148,9 +248,15 @@ static uint32_t esp_get_tc(ESPState *s) static void esp_set_tc(ESPState *s, uint32_t dmalen) { + uint32_t old_tc = esp_get_tc(s); + s->rregs[ESP_TCLO] = dmalen; s->rregs[ESP_TCMID] = dmalen >> 8; s->rregs[ESP_TCHI] = dmalen >> 16; + + if (old_tc && dmalen == 0) { + s->rregs[ESP_RSTAT] |= STAT_TC; + } } static uint32_t esp_get_stc(ESPState *s) @@ -168,12 +274,7 @@ static uint8_t esp_pdma_read(ESPState *s) { uint8_t val; - if (s->do_cmd) { - val = esp_fifo_pop(&s->cmdfifo); - } else { - val = esp_fifo_pop(&s->fifo); - } - + val = esp_fifo_pop(s); return val; } @@ -181,23 +282,12 @@ static void esp_pdma_write(ESPState *s, uint8_t val) { uint32_t dmalen = esp_get_tc(s); - if (dmalen == 0) { - return; - } + esp_fifo_push(s, val); - if (s->do_cmd) { - esp_fifo_push(&s->cmdfifo, val); - } else { - esp_fifo_push(&s->fifo, val); + if (dmalen && s->drq_state) { + dmalen--; + esp_set_tc(s, dmalen); } - - dmalen--; - esp_set_tc(s, dmalen); -} - -static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb) -{ - s->pdma_cb = cb; } static int esp_select(ESPState *s) @@ -207,75 +297,31 @@ static int esp_select(ESPState *s) target = s->wregs[ESP_WBUSID] & BUSID_DID; s->ti_size = 0; - fifo8_reset(&s->fifo); + s->rregs[ESP_RSEQ] = SEQ_0; + + if (s->current_req) { + /* Started a new command before the old one finished. Cancel it. */ + scsi_req_cancel(s->current_req); + } s->current_dev = scsi_device_find(&s->bus, 0, target, 0); if (!s->current_dev) { /* No such drive */ s->rregs[ESP_RSTAT] = 0; s->rregs[ESP_RINTR] = INTR_DC; - s->rregs[ESP_RSEQ] = SEQ_0; esp_raise_irq(s); return -1; } /* * Note that we deliberately don't raise the IRQ here: this will be done - * either in do_command_phase() for DATA OUT transfers or by the deferred - * IRQ mechanism in esp_transfer_data() for DATA IN transfers + * either in esp_transfer_data() or esp_command_complete() */ - s->rregs[ESP_RINTR] |= INTR_FC; - s->rregs[ESP_RSEQ] = SEQ_CD; return 0; } -static uint32_t get_cmd(ESPState *s, uint32_t maxlen) -{ - uint8_t buf[ESP_CMDFIFO_SZ]; - uint32_t dmalen, n; - int target; - - if (s->current_req) { - /* Started a new command before the old one finished. Cancel it. */ - scsi_req_cancel(s->current_req); - } - - target = s->wregs[ESP_WBUSID] & BUSID_DID; - if (s->dma) { - dmalen = MIN(esp_get_tc(s), maxlen); - if (dmalen == 0) { - return 0; - } - if (s->dma_memory_read) { - s->dma_memory_read(s->dma_opaque, buf, dmalen); - dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen); - fifo8_push_all(&s->cmdfifo, buf, dmalen); - } else { - if (esp_select(s) < 0) { - fifo8_reset(&s->cmdfifo); - return -1; - } - esp_raise_drq(s); - fifo8_reset(&s->cmdfifo); - return 0; - } - } else { - dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); - if (dmalen == 0) { - return 0; - } - n = esp_fifo_pop_buf(&s->fifo, buf, dmalen); - n = MIN(fifo8_num_free(&s->cmdfifo), n); - fifo8_push_all(&s->cmdfifo, buf, n); - } - trace_esp_get_cmd(dmalen, target); - - if (esp_select(s) < 0) { - fifo8_reset(&s->cmdfifo); - return -1; - } - return dmalen; -} +static void esp_do_dma(ESPState *s); +static void esp_do_nodma(ESPState *s); static void do_command_phase(ESPState *s) { @@ -289,30 +335,32 @@ static void do_command_phase(ESPState *s) if (!cmdlen || !s->current_dev) { return; } - esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen); + esp_fifo8_pop_buf(&s->cmdfifo, buf, cmdlen); current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun); + if (!current_lun) { + /* No such drive */ + s->rregs[ESP_RSTAT] = 0; + s->rregs[ESP_RINTR] = INTR_DC; + s->rregs[ESP_RSEQ] = SEQ_0; + esp_raise_irq(s); + return; + } + s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s); datalen = scsi_req_enqueue(s->current_req); s->ti_size = datalen; fifo8_reset(&s->cmdfifo); + s->data_ready = false; if (datalen != 0) { - s->rregs[ESP_RSTAT] = STAT_TC; - s->rregs[ESP_RSEQ] = SEQ_CD; - s->ti_cmd = 0; - esp_set_tc(s, 0); + /* + * Switch to DATA phase but wait until initial data xfer is + * complete before raising the command completion interrupt + */ if (datalen > 0) { - /* - * Switch to DATA IN phase but wait until initial data xfer is - * complete before raising the command completion interrupt - */ - s->data_in_ready = false; - s->rregs[ESP_RSTAT] |= STAT_DI; + esp_set_phase(s, STAT_DI); } else { - s->rregs[ESP_RSTAT] |= STAT_DO; - s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; - esp_raise_irq(s); - esp_lower_drq(s); + esp_set_phase(s, STAT_DO); } scsi_req_continue(s->current_req); return; @@ -322,7 +370,8 @@ static void do_command_phase(ESPState *s) static void do_message_phase(ESPState *s) { if (s->cmdfifo_cdb_offset) { - uint8_t message = esp_fifo_pop(&s->cmdfifo); + uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 : + fifo8_pop(&s->cmdfifo); trace_esp_do_identify(message); s->lun = message & 7; @@ -332,7 +381,7 @@ static void do_message_phase(ESPState *s) /* Ignore extended messages for now */ if (s->cmdfifo_cdb_offset) { int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo)); - esp_fifo_pop_buf(&s->cmdfifo, NULL, len); + esp_fifo8_pop_buf(&s->cmdfifo, NULL, len); s->cmdfifo_cdb_offset = 0; } } @@ -344,472 +393,579 @@ static void do_cmd(ESPState *s) do_command_phase(s); } -static void satn_pdma_cb(ESPState *s) -{ - if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { - s->cmdfifo_cdb_offset = 1; - s->do_cmd = 0; - do_cmd(s); - } -} - static void handle_satn(ESPState *s) { - int32_t cmdlen; - if (s->dma && !s->dma_enabled) { s->dma_cb = handle_satn; return; } - esp_set_pdma_cb(s, SATN_PDMA_CB); - cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); - if (cmdlen > 0) { - s->cmdfifo_cdb_offset = 1; - s->do_cmd = 0; - do_cmd(s); - } else if (cmdlen == 0) { - s->do_cmd = 1; - /* Target present, but no cmd yet - switch to command phase */ - s->rregs[ESP_RSEQ] = SEQ_CD; - s->rregs[ESP_RSTAT] = STAT_CD; + + if (esp_select(s) < 0) { + return; } -} -static void s_without_satn_pdma_cb(ESPState *s) -{ - if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { - s->cmdfifo_cdb_offset = 0; - s->do_cmd = 0; - do_cmd(s); + esp_set_phase(s, STAT_MO); + + if (s->dma) { + esp_do_dma(s); + } else { + esp_do_nodma(s); } } static void handle_s_without_atn(ESPState *s) { - int32_t cmdlen; - if (s->dma && !s->dma_enabled) { s->dma_cb = handle_s_without_atn; return; } - esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB); - cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); - if (cmdlen > 0) { - s->cmdfifo_cdb_offset = 0; - s->do_cmd = 0; - do_cmd(s); - } else if (cmdlen == 0) { - s->do_cmd = 1; - /* Target present, but no cmd yet - switch to command phase */ - s->rregs[ESP_RSEQ] = SEQ_CD; - s->rregs[ESP_RSTAT] = STAT_CD; + + if (esp_select(s) < 0) { + return; } -} -static void satn_stop_pdma_cb(ESPState *s) -{ - if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { - trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); - s->do_cmd = 1; - s->cmdfifo_cdb_offset = 1; - s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; - s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; - s->rregs[ESP_RSEQ] = SEQ_CD; - esp_raise_irq(s); + esp_set_phase(s, STAT_CD); + s->cmdfifo_cdb_offset = 0; + + if (s->dma) { + esp_do_dma(s); + } else { + esp_do_nodma(s); } } static void handle_satn_stop(ESPState *s) { - int32_t cmdlen; - if (s->dma && !s->dma_enabled) { s->dma_cb = handle_satn_stop; return; } - esp_set_pdma_cb(s, SATN_STOP_PDMA_CB); - cmdlen = get_cmd(s, 1); - if (cmdlen > 0) { - trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); - s->do_cmd = 1; - s->cmdfifo_cdb_offset = 1; - s->rregs[ESP_RSTAT] = STAT_MO; - s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; - s->rregs[ESP_RSEQ] = SEQ_MO; - esp_raise_irq(s); - } else if (cmdlen == 0) { - s->do_cmd = 1; - /* Target present, switch to message out phase */ - s->rregs[ESP_RSEQ] = SEQ_MO; - s->rregs[ESP_RSTAT] = STAT_MO; + + if (esp_select(s) < 0) { + return; + } + + esp_set_phase(s, STAT_MO); + s->cmdfifo_cdb_offset = 0; + + if (s->dma) { + esp_do_dma(s); + } else { + esp_do_nodma(s); } } -static void write_response_pdma_cb(ESPState *s) +static void handle_pad(ESPState *s) { - s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; - s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; - s->rregs[ESP_RSEQ] = SEQ_CD; - esp_raise_irq(s); + if (s->dma) { + esp_do_dma(s); + } else { + esp_do_nodma(s); + } } static void write_response(ESPState *s) { - uint8_t buf[2]; - trace_esp_write_response(s->status); - buf[0] = s->status; - buf[1] = 0; - if (s->dma) { - if (s->dma_memory_write) { - s->dma_memory_write(s->dma_opaque, buf, 2); - s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; - s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; - s->rregs[ESP_RSEQ] = SEQ_CD; - } else { - esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB); - esp_raise_drq(s); - return; - } + esp_do_dma(s); } else { - fifo8_reset(&s->fifo); - fifo8_push_all(&s->fifo, buf, 2); - s->rregs[ESP_RFLAGS] = 2; + esp_do_nodma(s); } - esp_raise_irq(s); } -static void esp_dma_done(ESPState *s) +static bool esp_cdb_ready(ESPState *s) { - s->rregs[ESP_RSTAT] |= STAT_TC; - s->rregs[ESP_RINTR] |= INTR_BS; - s->rregs[ESP_RFLAGS] = 0; - esp_set_tc(s, 0); - esp_raise_irq(s); + int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset; + const uint8_t *pbuf; + uint32_t n; + int cdblen; + + if (len <= 0) { + return false; + } + + pbuf = fifo8_peek_buf(&s->cmdfifo, len, &n); + if (n < len) { + /* + * In normal use the cmdfifo should never wrap, but include this check + * to prevent a malicious guest from reading past the end of the + * cmdfifo data buffer below + */ + return false; + } + + cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]); + + return cdblen < 0 ? false : (len >= cdblen); } -static void do_dma_pdma_cb(ESPState *s) +static void esp_dma_ti_check(ESPState *s) { - int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); - int len; - uint32_t n; + if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) { + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); + } +} - if (s->do_cmd) { - /* Ensure we have received complete command after SATN and stop */ - if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) { - return; - } +static void esp_do_dma(ESPState *s) +{ + uint32_t len, cmdlen; + uint8_t buf[ESP_CMDFIFO_SZ]; - s->ti_size = 0; - if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { - /* No command received */ - if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { - return; - } + len = esp_get_tc(s); - /* Command has been received */ - s->do_cmd = 0; - do_cmd(s); + switch (esp_get_phase(s)) { + case STAT_MO: + if (s->dma_memory_read) { + len = MIN(len, fifo8_num_free(&s->cmdfifo)); + s->dma_memory_read(s->dma_opaque, buf, len); + esp_set_tc(s, esp_get_tc(s) - len); } else { - /* - * Extra message out bytes received: update cmdfifo_cdb_offset - * and then switch to command phase - */ - s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); - s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; - s->rregs[ESP_RSEQ] = SEQ_CD; - s->rregs[ESP_RINTR] |= INTR_BS; - esp_raise_irq(s); + len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); + len = MIN(fifo8_num_free(&s->cmdfifo), len); } - return; - } - if (!s->current_req) { - return; - } + fifo8_push_all(&s->cmdfifo, buf, len); + s->cmdfifo_cdb_offset += len; - if (to_device) { - /* Copy FIFO data to device */ - len = MIN(s->async_len, ESP_FIFO_SZ); - len = MIN(len, fifo8_num_used(&s->fifo)); - n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); - s->async_buf += n; - s->async_len -= n; - s->ti_size += n; - - if (n < len) { - /* Unaligned accesses can cause FIFO wraparound */ - len = len - n; - n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); - s->async_buf += n; - s->async_len -= n; - s->ti_size += n; - } + switch (s->rregs[ESP_CMD]) { + case CMD_SELATN | CMD_DMA: + if (fifo8_num_used(&s->cmdfifo) >= 1) { + /* First byte received, switch to command phase */ + esp_set_phase(s, STAT_CD); + s->rregs[ESP_RSEQ] = SEQ_CD; + s->cmdfifo_cdb_offset = 1; - if (s->async_len == 0) { - scsi_req_continue(s->current_req); - return; - } - - if (esp_get_tc(s) == 0) { - esp_lower_drq(s); - esp_dma_done(s); - } + if (fifo8_num_used(&s->cmdfifo) > 1) { + /* Process any additional command phase data */ + esp_do_dma(s); + } + } + break; - return; - } else { - if (s->async_len == 0) { - /* Defer until the scsi layer has completed */ - scsi_req_continue(s->current_req); - s->data_in_ready = false; - return; - } + case CMD_SELATNS | CMD_DMA: + if (fifo8_num_used(&s->cmdfifo) == 1) { + /* First byte received, stop in message out phase */ + s->rregs[ESP_RSEQ] = SEQ_MO; + s->cmdfifo_cdb_offset = 1; - if (esp_get_tc(s) != 0) { - /* Copy device data to FIFO */ - len = MIN(s->async_len, esp_get_tc(s)); - len = MIN(len, fifo8_num_free(&s->fifo)); - fifo8_push_all(&s->fifo, s->async_buf, len); - s->async_buf += len; - s->async_len -= len; - s->ti_size -= len; - esp_set_tc(s, esp_get_tc(s) - len); + /* Raise command completion interrupt */ + s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; + esp_raise_irq(s); + } + break; + case CMD_TI | CMD_DMA: + /* ATN remains asserted until TC == 0 */ if (esp_get_tc(s) == 0) { - /* Indicate transfer to FIFO is complete */ - s->rregs[ESP_RSTAT] |= STAT_TC; + esp_set_phase(s, STAT_CD); + s->rregs[ESP_CMD] = 0; + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); } - return; + break; } + break; - /* Partially filled a scsi buffer. Complete immediately. */ - esp_lower_drq(s); - esp_dma_done(s); - } -} - -static void esp_do_dma(ESPState *s) -{ - uint32_t len, cmdlen; - int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); - uint8_t buf[ESP_CMDFIFO_SZ]; - - len = esp_get_tc(s); - if (s->do_cmd) { - /* - * handle_ti_cmd() case: esp_do_dma() is called only from - * handle_ti_cmd() with do_cmd != NULL (see the assert()) - */ + case STAT_CD: cmdlen = fifo8_num_used(&s->cmdfifo); trace_esp_do_dma(cmdlen, len); if (s->dma_memory_read) { len = MIN(len, fifo8_num_free(&s->cmdfifo)); s->dma_memory_read(s->dma_opaque, buf, len); fifo8_push_all(&s->cmdfifo, buf, len); + esp_set_tc(s, esp_get_tc(s) - len); } else { - esp_set_pdma_cb(s, DO_DMA_PDMA_CB); - esp_raise_drq(s); - return; + len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); + len = MIN(fifo8_num_free(&s->cmdfifo), len); + fifo8_push_all(&s->cmdfifo, buf, len); } trace_esp_handle_ti_cmd(cmdlen); s->ti_size = 0; - if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { - /* No command received */ - if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { - return; - } - + if (esp_get_tc(s) == 0) { /* Command has been received */ - s->do_cmd = 0; do_cmd(s); - } else { - /* - * Extra message out bytes received: update cmdfifo_cdb_offset - * and then switch to command phase - */ - s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); - s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; - s->rregs[ESP_RSEQ] = SEQ_CD; - s->rregs[ESP_RINTR] |= INTR_BS; - esp_raise_irq(s); } - return; - } - if (!s->current_req) { - return; - } - if (s->async_len == 0) { - /* Defer until data is available. */ - return; - } - if (len > s->async_len) { - len = s->async_len; - } - if (to_device) { - if (s->dma_memory_read) { - s->dma_memory_read(s->dma_opaque, s->async_buf, len); - } else { - esp_set_pdma_cb(s, DO_DMA_PDMA_CB); - esp_raise_drq(s); + break; + + case STAT_DO: + if (!s->current_req) { return; } - } else { - if (s->dma_memory_write) { - s->dma_memory_write(s->dma_opaque, s->async_buf, len); - } else { - /* Adjust TC for any leftover data in the FIFO */ - if (!fifo8_is_empty(&s->fifo)) { - esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo)); + if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) { + /* Defer until data is available. */ + return; + } + if (len > s->async_len) { + len = s->async_len; + } + + switch (s->rregs[ESP_CMD]) { + case CMD_TI | CMD_DMA: + if (s->dma_memory_read) { + s->dma_memory_read(s->dma_opaque, s->async_buf, len); + esp_set_tc(s, esp_get_tc(s) - len); + } else { + /* Copy FIFO data to device */ + len = MIN(s->async_len, ESP_FIFO_SZ); + len = MIN(len, fifo8_num_used(&s->fifo)); + len = esp_fifo_pop_buf(s, s->async_buf, len); + } + + s->async_buf += len; + s->async_len -= len; + s->ti_size += len; + break; + + case CMD_PAD | CMD_DMA: + /* Copy TC zero bytes into the incoming stream */ + if (!s->dma_memory_read) { + len = MIN(s->async_len, ESP_FIFO_SZ); + len = MIN(len, fifo8_num_free(&s->fifo)); + } + + memset(s->async_buf, 0, len); + + s->async_buf += len; + s->async_len -= len; + s->ti_size += len; + break; + } + + if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) { + /* Defer until the scsi layer has completed */ + scsi_req_continue(s->current_req); + return; + } + + esp_dma_ti_check(s); + break; + + case STAT_DI: + if (!s->current_req) { + return; + } + if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) { + /* Defer until data is available. */ + return; + } + if (len > s->async_len) { + len = s->async_len; + } + + switch (s->rregs[ESP_CMD]) { + case CMD_TI | CMD_DMA: + if (s->dma_memory_write) { + s->dma_memory_write(s->dma_opaque, s->async_buf, len); + } else { + /* Copy device data to FIFO */ + len = MIN(len, fifo8_num_free(&s->fifo)); + esp_fifo_push_buf(s, s->async_buf, len); } - /* Copy device data to FIFO */ - len = MIN(len, fifo8_num_free(&s->fifo)); - fifo8_push_all(&s->fifo, s->async_buf, len); s->async_buf += len; s->async_len -= len; s->ti_size -= len; + esp_set_tc(s, esp_get_tc(s) - len); + break; - /* - * MacOS toolbox uses a TI length of 16 bytes for all commands, so - * commands shorter than this must be padded accordingly - */ - if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) { - while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) { - esp_fifo_push(&s->fifo, 0); - len++; - } + case CMD_PAD | CMD_DMA: + /* Drop TC bytes from the incoming stream */ + if (!s->dma_memory_write) { + len = MIN(len, fifo8_num_free(&s->fifo)); } + s->async_buf += len; + s->async_len -= len; + s->ti_size -= len; esp_set_tc(s, esp_get_tc(s) - len); - esp_set_pdma_cb(s, DO_DMA_PDMA_CB); - esp_raise_drq(s); + break; + } - /* Indicate transfer to FIFO is complete */ - s->rregs[ESP_RSTAT] |= STAT_TC; + if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) { + /* If the guest underflows TC then terminate SCSI request */ + scsi_req_continue(s->current_req); + return; + } + + if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) { + /* Defer until the scsi layer has completed */ + scsi_req_continue(s->current_req); return; } + + esp_dma_ti_check(s); + break; + + case STAT_ST: + switch (s->rregs[ESP_CMD]) { + case CMD_ICCS | CMD_DMA: + len = MIN(len, 1); + + if (len) { + buf[0] = s->status; + + if (s->dma_memory_write) { + s->dma_memory_write(s->dma_opaque, buf, len); + } else { + esp_fifo_push_buf(s, buf, len); + } + + esp_set_tc(s, esp_get_tc(s) - len); + esp_set_phase(s, STAT_MI); + + if (esp_get_tc(s) > 0) { + /* Process any message in phase data */ + esp_do_dma(s); + } + } + break; + + default: + /* Consume remaining data if the guest underflows TC */ + if (fifo8_num_used(&s->fifo) < 2) { + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); + } + break; + } + break; + + case STAT_MI: + switch (s->rregs[ESP_CMD]) { + case CMD_ICCS | CMD_DMA: + len = MIN(len, 1); + + if (len) { + buf[0] = 0; + + if (s->dma_memory_write) { + s->dma_memory_write(s->dma_opaque, buf, len); + } else { + esp_fifo_push_buf(s, buf, len); + } + + esp_set_tc(s, esp_get_tc(s) - len); + + /* Raise end of command interrupt */ + s->rregs[ESP_RINTR] |= INTR_FC; + esp_raise_irq(s); + } + break; + } + break; + } +} + +static void esp_nodma_ti_dataout(ESPState *s) +{ + int len; + + if (!s->current_req) { + return; + } + if (s->async_len == 0) { + /* Defer until data is available. */ + return; } - esp_set_tc(s, esp_get_tc(s) - len); + len = MIN(s->async_len, ESP_FIFO_SZ); + len = MIN(len, fifo8_num_used(&s->fifo)); + esp_fifo_pop_buf(s, s->async_buf, len); s->async_buf += len; s->async_len -= len; - if (to_device) { - s->ti_size += len; - } else { - s->ti_size -= len; - } + s->ti_size += len; + if (s->async_len == 0) { scsi_req_continue(s->current_req); - /* - * If there is still data to be read from the device then - * complete the DMA operation immediately. Otherwise defer - * until the scsi layer has completed. - */ - if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) { - return; - } + return; } - /* Partially filled a scsi buffer. Complete immediately. */ - esp_dma_done(s); - esp_lower_drq(s); + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); } static void esp_do_nodma(ESPState *s) { - int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); + uint8_t buf[ESP_FIFO_SZ]; uint32_t cmdlen; int len; - if (s->do_cmd) { - cmdlen = fifo8_num_used(&s->cmdfifo); - trace_esp_handle_ti_cmd(cmdlen); - s->ti_size = 0; - if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { - /* No command received */ - if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { - return; + switch (esp_get_phase(s)) { + case STAT_MO: + switch (s->rregs[ESP_CMD]) { + case CMD_SELATN: + /* Copy FIFO into cmdfifo */ + len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); + len = MIN(fifo8_num_free(&s->cmdfifo), len); + fifo8_push_all(&s->cmdfifo, buf, len); + + if (fifo8_num_used(&s->cmdfifo) >= 1) { + /* First byte received, switch to command phase */ + esp_set_phase(s, STAT_CD); + s->rregs[ESP_RSEQ] = SEQ_CD; + s->cmdfifo_cdb_offset = 1; + + if (fifo8_num_used(&s->cmdfifo) > 1) { + /* Process any additional command phase data */ + esp_do_nodma(s); + } } + break; - /* Command has been received */ - s->do_cmd = 0; - do_cmd(s); - } else { - /* - * Extra message out bytes received: update cmdfifo_cdb_offset - * and then switch to command phase - */ + case CMD_SELATNS: + /* Copy one byte from FIFO into cmdfifo */ + len = esp_fifo_pop_buf(s, buf, + MIN(fifo8_num_used(&s->fifo), 1)); + len = MIN(fifo8_num_free(&s->cmdfifo), len); + fifo8_push_all(&s->cmdfifo, buf, len); + + if (fifo8_num_used(&s->cmdfifo) >= 1) { + /* First byte received, stop in message out phase */ + s->rregs[ESP_RSEQ] = SEQ_MO; + s->cmdfifo_cdb_offset = 1; + + /* Raise command completion interrupt */ + s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; + esp_raise_irq(s); + } + break; + + case CMD_TI: + /* Copy FIFO into cmdfifo */ + len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); + len = MIN(fifo8_num_free(&s->cmdfifo), len); + fifo8_push_all(&s->cmdfifo, buf, len); + + /* ATN remains asserted until FIFO empty */ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); - s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; - s->rregs[ESP_RSEQ] = SEQ_CD; + esp_set_phase(s, STAT_CD); + s->rregs[ESP_CMD] = 0; s->rregs[ESP_RINTR] |= INTR_BS; esp_raise_irq(s); + break; } - return; - } + break; - if (!s->current_req) { - return; - } + case STAT_CD: + switch (s->rregs[ESP_CMD]) { + case CMD_TI: + /* Copy FIFO into cmdfifo */ + len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); + len = MIN(fifo8_num_free(&s->cmdfifo), len); + fifo8_push_all(&s->cmdfifo, buf, len); - if (s->async_len == 0) { - /* Defer until data is available. */ - return; - } + cmdlen = fifo8_num_used(&s->cmdfifo); + trace_esp_handle_ti_cmd(cmdlen); - if (to_device) { - len = MIN(s->async_len, ESP_FIFO_SZ); - len = MIN(len, fifo8_num_used(&s->fifo)); - esp_fifo_pop_buf(&s->fifo, s->async_buf, len); - s->async_buf += len; - s->async_len -= len; - s->ti_size += len; - } else { + /* CDB may be transferred in one or more TI commands */ + if (esp_cdb_ready(s)) { + /* Command has been received */ + do_cmd(s); + } else { + /* + * If data was transferred from the FIFO then raise bus + * service interrupt to indicate transfer complete. Otherwise + * defer until the next FIFO write. + */ + if (len) { + /* Raise interrupt to indicate transfer complete */ + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); + } + } + break; + + case CMD_SEL | CMD_DMA: + case CMD_SELATN | CMD_DMA: + /* Copy FIFO into cmdfifo */ + len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); + len = MIN(fifo8_num_free(&s->cmdfifo), len); + fifo8_push_all(&s->cmdfifo, buf, len); + + /* Handle when DMA transfer is terminated by non-DMA FIFO write */ + if (esp_cdb_ready(s)) { + /* Command has been received */ + do_cmd(s); + } + break; + + case CMD_SEL: + case CMD_SELATN: + /* FIFO already contain entire CDB: copy to cmdfifo and execute */ + len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); + len = MIN(fifo8_num_free(&s->cmdfifo), len); + fifo8_push_all(&s->cmdfifo, buf, len); + + do_cmd(s); + break; + } + break; + + case STAT_DO: + /* Accumulate data in FIFO until non-DMA TI is executed */ + break; + + case STAT_DI: + if (!s->current_req) { + return; + } + if (s->async_len == 0) { + /* Defer until data is available. */ + return; + } if (fifo8_is_empty(&s->fifo)) { - fifo8_push(&s->fifo, s->async_buf[0]); + esp_fifo_push(s, s->async_buf[0]); s->async_buf++; s->async_len--; s->ti_size--; } - } - if (s->async_len == 0) { - scsi_req_continue(s->current_req); - return; - } + if (s->async_len == 0) { + scsi_req_continue(s->current_req); + return; + } - s->rregs[ESP_RINTR] |= INTR_BS; - esp_raise_irq(s); -} + /* If preloading the FIFO, defer until TI command issued */ + if (s->rregs[ESP_CMD] != CMD_TI) { + return; + } -static void esp_pdma_cb(ESPState *s) -{ - switch (s->pdma_cb) { - case SATN_PDMA_CB: - satn_pdma_cb(s); - break; - case S_WITHOUT_SATN_PDMA_CB: - s_without_satn_pdma_cb(s); - break; - case SATN_STOP_PDMA_CB: - satn_stop_pdma_cb(s); + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); break; - case WRITE_RESPONSE_PDMA_CB: - write_response_pdma_cb(s); + + case STAT_ST: + switch (s->rregs[ESP_CMD]) { + case CMD_ICCS: + esp_fifo_push(s, s->status); + esp_set_phase(s, STAT_MI); + + /* Process any message in phase data */ + esp_do_nodma(s); + break; + } break; - case DO_DMA_PDMA_CB: - do_dma_pdma_cb(s); + + case STAT_MI: + switch (s->rregs[ESP_CMD]) { + case CMD_ICCS: + esp_fifo_push(s, 0); + + /* Raise end of command interrupt */ + s->rregs[ESP_RINTR] |= INTR_FC; + esp_raise_irq(s); + break; + } break; - default: - g_assert_not_reached(); } } void esp_command_complete(SCSIRequest *req, size_t resid) { ESPState *s = req->hba_private; - int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); + int to_device = (esp_get_phase(s) == STAT_DO); trace_esp_command_complete(); @@ -821,7 +977,6 @@ void esp_command_complete(SCSIRequest *req, size_t resid) if (s->ti_size != 0) { trace_esp_command_complete_unexpected(); } - s->ti_size = 0; } s->async_len = 0; @@ -831,15 +986,35 @@ void esp_command_complete(SCSIRequest *req, size_t resid) s->status = req->status; /* - * If the transfer is finished, switch to status phase. For non-DMA - * transfers from the target the last byte is still in the FIFO + * Switch to status phase. For non-DMA transfers from the target the last + * byte is still in the FIFO */ - if (s->ti_size == 0) { - s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; - esp_dma_done(s); - esp_lower_drq(s); + s->ti_size = 0; + + switch (s->rregs[ESP_CMD]) { + case CMD_SEL | CMD_DMA: + case CMD_SEL: + case CMD_SELATN | CMD_DMA: + case CMD_SELATN: + /* + * No data phase for sequencer command so raise deferred bus service + * and function complete interrupt + */ + s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; + s->rregs[ESP_RSEQ] = SEQ_CD; + break; + + case CMD_TI | CMD_DMA: + case CMD_TI: + s->rregs[ESP_CMD] = 0; + break; } + /* Raise bus service interrupt to indicate change to STATUS phase */ + esp_set_phase(s, STAT_ST); + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); + if (s->current_req) { scsi_req_unref(s->current_req); s->current_req = NULL; @@ -850,48 +1025,66 @@ void esp_command_complete(SCSIRequest *req, size_t resid) void esp_transfer_data(SCSIRequest *req, uint32_t len) { ESPState *s = req->hba_private; - int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); uint32_t dmalen = esp_get_tc(s); - assert(!s->do_cmd); trace_esp_transfer_data(dmalen, s->ti_size); s->async_len = len; s->async_buf = scsi_req_get_buf(req); - if (!to_device && !s->data_in_ready) { - /* - * Initial incoming data xfer is complete so raise command - * completion interrupt - */ - s->data_in_ready = true; - s->rregs[ESP_RSTAT] |= STAT_TC; - s->rregs[ESP_RINTR] |= INTR_BS; - esp_raise_irq(s); - } + if (!s->data_ready) { + s->data_ready = true; - if (s->ti_cmd == 0) { - /* - * Always perform the initial transfer upon reception of the next TI - * command to ensure the DMA/non-DMA status of the command is correct. - * It is not possible to use s->dma directly in the section below as - * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the - * async data transfer is delayed then s->dma is set incorrectly. - */ - return; - } + switch (s->rregs[ESP_CMD]) { + case CMD_SEL | CMD_DMA: + case CMD_SEL: + case CMD_SELATN | CMD_DMA: + case CMD_SELATN: + /* + * Initial incoming data xfer is complete for sequencer command + * so raise deferred bus service and function complete interrupt + */ + s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; + s->rregs[ESP_RSEQ] = SEQ_CD; + break; + + case CMD_SELATNS | CMD_DMA: + case CMD_SELATNS: + /* + * Initial incoming data xfer is complete so raise command + * completion interrupt + */ + s->rregs[ESP_RINTR] |= INTR_BS; + s->rregs[ESP_RSEQ] = SEQ_MO; + break; - if (s->ti_cmd == (CMD_TI | CMD_DMA)) { - if (dmalen) { - esp_do_dma(s); - } else if (s->ti_size <= 0) { + case CMD_TI | CMD_DMA: + case CMD_TI: /* - * If this was the last part of a DMA transfer then the - * completion interrupt is deferred to here. + * Bus service interrupt raised because of initial change to + * DATA phase */ - esp_dma_done(s); - esp_lower_drq(s); + s->rregs[ESP_CMD] = 0; + s->rregs[ESP_RINTR] |= INTR_BS; + break; } - } else if (s->ti_cmd == CMD_TI) { + + esp_raise_irq(s); + } + + /* + * Always perform the initial transfer upon reception of the next TI + * command to ensure the DMA/non-DMA status of the command is correct. + * It is not possible to use s->dma directly in the section below as + * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the + * async data transfer is delayed then s->dma is set incorrectly. + */ + + if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) { + /* When the SCSI layer returns more data, raise deferred INTR_BS */ + esp_dma_ti_check(s); + + esp_do_dma(s); + } else if (s->rregs[ESP_CMD] == CMD_TI) { esp_do_nodma(s); } } @@ -905,15 +1098,17 @@ static void handle_ti(ESPState *s) return; } - s->ti_cmd = s->rregs[ESP_CMD]; if (s->dma) { dmalen = esp_get_tc(s); trace_esp_handle_ti(dmalen); - s->rregs[ESP_RSTAT] &= ~STAT_TC; esp_do_dma(s); } else { trace_esp_handle_ti(s->ti_size); esp_do_nodma(s); + + if (esp_get_phase(s) == STAT_DO) { + esp_nodma_ti_dataout(s); + } } } @@ -927,7 +1122,6 @@ void esp_hard_reset(ESPState *s) fifo8_reset(&s->fifo); fifo8_reset(&s->cmdfifo); s->dma = 0; - s->do_cmd = 0; s->dma_cb = NULL; s->rregs[ESP_CFG1] = 7; @@ -936,7 +1130,7 @@ void esp_hard_reset(ESPState *s) static void esp_soft_reset(ESPState *s) { qemu_irq_lower(s->irq); - qemu_irq_lower(s->irq_data); + qemu_irq_lower(s->drq_irq); esp_hard_reset(s); } @@ -952,31 +1146,100 @@ static void parent_esp_reset(ESPState *s, int irq, int level) } } +static void esp_run_cmd(ESPState *s) +{ + uint8_t cmd = s->rregs[ESP_CMD]; + + if (cmd & CMD_DMA) { + s->dma = 1; + /* Reload DMA counter. */ + if (esp_get_stc(s) == 0) { + esp_set_tc(s, 0x10000); + } else { + esp_set_tc(s, esp_get_stc(s)); + } + } else { + s->dma = 0; + } + switch (cmd & CMD_CMD) { + case CMD_NOP: + trace_esp_mem_writeb_cmd_nop(cmd); + break; + case CMD_FLUSH: + trace_esp_mem_writeb_cmd_flush(cmd); + fifo8_reset(&s->fifo); + break; + case CMD_RESET: + trace_esp_mem_writeb_cmd_reset(cmd); + esp_soft_reset(s); + break; + case CMD_BUSRESET: + trace_esp_mem_writeb_cmd_bus_reset(cmd); + esp_bus_reset(s); + if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { + s->rregs[ESP_RINTR] |= INTR_RST; + esp_raise_irq(s); + } + break; + case CMD_TI: + trace_esp_mem_writeb_cmd_ti(cmd); + handle_ti(s); + break; + case CMD_ICCS: + trace_esp_mem_writeb_cmd_iccs(cmd); + write_response(s); + break; + case CMD_MSGACC: + trace_esp_mem_writeb_cmd_msgacc(cmd); + s->rregs[ESP_RINTR] |= INTR_DC; + s->rregs[ESP_RSEQ] = 0; + s->rregs[ESP_RFLAGS] = 0; + esp_raise_irq(s); + break; + case CMD_PAD: + trace_esp_mem_writeb_cmd_pad(cmd); + handle_pad(s); + break; + case CMD_SATN: + trace_esp_mem_writeb_cmd_satn(cmd); + break; + case CMD_RSTATN: + trace_esp_mem_writeb_cmd_rstatn(cmd); + break; + case CMD_SEL: + trace_esp_mem_writeb_cmd_sel(cmd); + handle_s_without_atn(s); + break; + case CMD_SELATN: + trace_esp_mem_writeb_cmd_selatn(cmd); + handle_satn(s); + break; + case CMD_SELATNS: + trace_esp_mem_writeb_cmd_selatns(cmd); + handle_satn_stop(s); + break; + case CMD_ENSEL: + trace_esp_mem_writeb_cmd_ensel(cmd); + s->rregs[ESP_RINTR] = 0; + break; + case CMD_DISSEL: + trace_esp_mem_writeb_cmd_dissel(cmd); + s->rregs[ESP_RINTR] = 0; + esp_raise_irq(s); + break; + default: + trace_esp_error_unhandled_command(cmd); + break; + } +} + uint64_t esp_reg_read(ESPState *s, uint32_t saddr) { uint32_t val; switch (saddr) { case ESP_FIFO: - if (s->dma_memory_read && s->dma_memory_write && - (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { - /* Data out. */ - qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); - s->rregs[ESP_FIFO] = 0; - } else { - if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) { - if (s->ti_size) { - esp_do_nodma(s); - } else { - /* - * The last byte of a non-DMA transfer has been read out - * of the FIFO so switch to status phase - */ - s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; - } - } - s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo); - } + s->rregs[ESP_FIFO] = esp_fifo_pop(s); val = s->rregs[ESP_FIFO]; break; case ESP_RINTR: @@ -986,7 +1249,8 @@ uint64_t esp_reg_read(ESPState *s, uint32_t saddr) */ val = s->rregs[ESP_RINTR]; s->rregs[ESP_RINTR] = 0; - s->rregs[ESP_RSTAT] &= ~STAT_TC; + esp_lower_irq(s); + s->rregs[ESP_RSTAT] &= STAT_TC | 7; /* * According to the datasheet ESP_RSEQ should be cleared, but as the * emulation currently defers information transfers to the next TI @@ -996,7 +1260,6 @@ uint64_t esp_reg_read(ESPState *s, uint32_t saddr) * * s->rregs[ESP_RSEQ] = SEQ_0; */ - esp_lower_irq(s); break; case ESP_TCHI: /* Return the unique id if the value has never been written */ @@ -1031,108 +1294,14 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) s->rregs[ESP_RSTAT] &= ~STAT_TC; break; case ESP_FIFO: - if (s->do_cmd) { - esp_fifo_push(&s->cmdfifo, val); - - /* - * If any unexpected message out/command phase data is - * transferred using non-DMA, raise the interrupt - */ - if (s->rregs[ESP_CMD] == CMD_TI) { - s->rregs[ESP_RINTR] |= INTR_BS; - esp_raise_irq(s); - } - } else { - esp_fifo_push(&s->fifo, val); + if (!fifo8_is_full(&s->fifo)) { + esp_fifo_push(s, val); } + esp_do_nodma(s); break; case ESP_CMD: s->rregs[saddr] = val; - if (val & CMD_DMA) { - s->dma = 1; - /* Reload DMA counter. */ - if (esp_get_stc(s) == 0) { - esp_set_tc(s, 0x10000); - } else { - esp_set_tc(s, esp_get_stc(s)); - } - } else { - s->dma = 0; - } - switch (val & CMD_CMD) { - case CMD_NOP: - trace_esp_mem_writeb_cmd_nop(val); - break; - case CMD_FLUSH: - trace_esp_mem_writeb_cmd_flush(val); - fifo8_reset(&s->fifo); - break; - case CMD_RESET: - trace_esp_mem_writeb_cmd_reset(val); - esp_soft_reset(s); - break; - case CMD_BUSRESET: - trace_esp_mem_writeb_cmd_bus_reset(val); - esp_bus_reset(s); - if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { - s->rregs[ESP_RINTR] |= INTR_RST; - esp_raise_irq(s); - } - break; - case CMD_TI: - trace_esp_mem_writeb_cmd_ti(val); - handle_ti(s); - break; - case CMD_ICCS: - trace_esp_mem_writeb_cmd_iccs(val); - write_response(s); - s->rregs[ESP_RINTR] |= INTR_FC; - s->rregs[ESP_RSTAT] |= STAT_MI; - break; - case CMD_MSGACC: - trace_esp_mem_writeb_cmd_msgacc(val); - s->rregs[ESP_RINTR] |= INTR_DC; - s->rregs[ESP_RSEQ] = 0; - s->rregs[ESP_RFLAGS] = 0; - esp_raise_irq(s); - break; - case CMD_PAD: - trace_esp_mem_writeb_cmd_pad(val); - s->rregs[ESP_RSTAT] = STAT_TC; - s->rregs[ESP_RINTR] |= INTR_FC; - s->rregs[ESP_RSEQ] = 0; - break; - case CMD_SATN: - trace_esp_mem_writeb_cmd_satn(val); - break; - case CMD_RSTATN: - trace_esp_mem_writeb_cmd_rstatn(val); - break; - case CMD_SEL: - trace_esp_mem_writeb_cmd_sel(val); - handle_s_without_atn(s); - break; - case CMD_SELATN: - trace_esp_mem_writeb_cmd_selatn(val); - handle_satn(s); - break; - case CMD_SELATNS: - trace_esp_mem_writeb_cmd_selatns(val); - handle_satn_stop(s); - break; - case CMD_ENSEL: - trace_esp_mem_writeb_cmd_ensel(val); - s->rregs[ESP_RINTR] = 0; - break; - case CMD_DISSEL: - trace_esp_mem_writeb_cmd_dissel(val); - s->rregs[ESP_RINTR] = 0; - esp_raise_irq(s); - break; - default: - trace_esp_error_unhandled_command(val); - break; - } + esp_run_cmd(s); break; case ESP_WBUSID ... ESP_WSYNO: break; @@ -1181,6 +1350,14 @@ static bool esp_is_version_6(void *opaque, int version_id) return version_id >= 6; } +static bool esp_is_between_version_5_and_6(void *opaque, int version_id) +{ + ESPState *s = ESP(opaque); + + version_id = MIN(version_id, s->mig_version_id); + return version_id >= 5 && version_id <= 6; +} + int esp_pre_save(void *opaque) { ESPState *s = ESP(object_resolve_path_component( @@ -1216,39 +1393,12 @@ static int esp_post_load(void *opaque, int version_id) return 0; } -/* - * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the - * guest CPU to perform the transfers between the SCSI bus and memory - * itself. This is indicated by the dma_memory_read and dma_memory_write - * functions being NULL (in contrast to the ESP PCI device) whilst - * dma_enabled is still set. - */ - -static bool esp_pdma_needed(void *opaque) -{ - ESPState *s = ESP(opaque); - - return s->dma_memory_read == NULL && s->dma_memory_write == NULL && - s->dma_enabled; -} - -static const VMStateDescription vmstate_esp_pdma = { - .name = "esp/pdma", - .version_id = 0, - .minimum_version_id = 0, - .needed = esp_pdma_needed, - .fields = (VMStateField[]) { - VMSTATE_UINT8(pdma_cb, ESPState), - VMSTATE_END_OF_LIST() - } -}; - const VMStateDescription vmstate_esp = { .name = "esp", - .version_id = 6, + .version_id = 7, .minimum_version_id = 3, .post_load = esp_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(rregs, ESPState), VMSTATE_BUFFER(wregs, ESPState), VMSTATE_INT32(ti_size, ESPState), @@ -1269,18 +1419,16 @@ const VMStateDescription vmstate_esp = { VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5), VMSTATE_UINT32(do_cmd, ESPState), VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5), - VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5), + VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5), VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5), VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5), VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5), - VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5), + VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState, + esp_is_between_version_5_and_6), VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), + VMSTATE_BOOL(drq_state, ESPState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { - &vmstate_esp_pdma, - NULL - } }; static void sysbus_esp_mem_write(void *opaque, hwaddr addr, @@ -1329,7 +1477,7 @@ static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, esp_pdma_write(s, val); break; } - esp_pdma_cb(s); + esp_do_dma(s); } static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, @@ -1350,9 +1498,7 @@ static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, val = (val << 8) | esp_pdma_read(s); break; } - if (fifo8_num_used(&s->fifo) < 2) { - esp_pdma_cb(s); - } + esp_do_dma(s); return val; } @@ -1412,7 +1558,7 @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp) } sysbus_init_irq(sbd, &s->irq); - sysbus_init_irq(sbd, &s->irq_data); + sysbus_init_irq(sbd, &s->drq_irq); assert(sysbus->it_shift != -1); s->chip_id = TCHI_FAS100A; @@ -1448,7 +1594,7 @@ static const VMStateDescription vmstate_sysbus_esp_scsi = { .version_id = 2, .minimum_version_id = 1, .pre_save = esp_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), VMSTATE_END_OF_LIST() @@ -1465,14 +1611,6 @@ static void sysbus_esp_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } -static const TypeInfo sysbus_esp_info = { - .name = TYPE_SYSBUS_ESP, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_init = sysbus_esp_init, - .instance_size = sizeof(SysBusESPState), - .class_init = sysbus_esp_class_init, -}; - static void esp_finalize(Object *obj) { ESPState *s = ESP(obj); @@ -1498,19 +1636,22 @@ static void esp_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } -static const TypeInfo esp_info = { - .name = TYPE_ESP, - .parent = TYPE_DEVICE, - .instance_init = esp_init, - .instance_finalize = esp_finalize, - .instance_size = sizeof(ESPState), - .class_init = esp_class_init, +static const TypeInfo esp_info_types[] = { + { + .name = TYPE_SYSBUS_ESP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = sysbus_esp_init, + .instance_size = sizeof(SysBusESPState), + .class_init = sysbus_esp_class_init, + }, + { + .name = TYPE_ESP, + .parent = TYPE_DEVICE, + .instance_init = esp_init, + .instance_finalize = esp_finalize, + .instance_size = sizeof(ESPState), + .class_init = esp_class_init, + }, }; -static void esp_register_types(void) -{ - type_register_static(&sysbus_esp_info); - type_register_static(&esp_info); -} - -type_init(esp_register_types) +DEFINE_TYPES(esp_info_types) diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index afbea0fefa4..eb9828dd5ef 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -188,7 +188,7 @@ static const char *names[] = { #define LSI_TAG_VALID (1 << 16) /* Maximum instructions to process. */ -#define LSI_MAX_INSN 10000 +#define LSI_MAX_INSN 100 typedef struct lsi_request { SCSIRequest *req; @@ -205,6 +205,7 @@ enum { LSI_WAIT_RESELECT, /* Wait Reselect instruction has been issued */ LSI_DMA_SCRIPTS, /* processing DMA from lsi_execute_script */ LSI_DMA_IN_PROGRESS, /* DMA operation is in progress */ + LSI_WAIT_SCRIPTS, /* SCRIPTS stopped because of instruction count limit */ }; enum { @@ -224,8 +225,9 @@ struct LSIState { MemoryRegion ram_io; MemoryRegion io_io; AddressSpace pci_io_as; + QEMUTimer *scripts_timer; - int carry; /* ??? Should this be an a visible register somewhere? */ + int carry; /* ??? Should this be in a visible register somewhere? */ int status; int msg_action; int msg_len; @@ -415,6 +417,7 @@ static void lsi_soft_reset(LSIState *s) s->sbr = 0; assert(QTAILQ_EMPTY(&s->queue)); assert(!s->current); + timer_del(s->scripts_timer); } static int lsi_dma_40bit(LSIState *s) @@ -570,8 +573,9 @@ static inline void lsi_set_phase(LSIState *s, int phase) s->sstat1 = (s->sstat1 & ~PHASE_MASK) | phase; } -static void lsi_bad_phase(LSIState *s, int out, int new_phase) +static int lsi_bad_phase(LSIState *s, int out, int new_phase) { + int ret = 0; /* Trigger a phase mismatch. */ if (s->ccntl0 & LSI_CCNTL0_ENPMJ) { if ((s->ccntl0 & LSI_CCNTL0_PMJCTL)) { @@ -584,8 +588,10 @@ static void lsi_bad_phase(LSIState *s, int out, int new_phase) trace_lsi_bad_phase_interrupt(); lsi_script_scsi_interrupt(s, LSI_SIST0_MA, 0); lsi_stop_script(s); + ret = 1; } lsi_set_phase(s, new_phase); + return ret; } @@ -789,7 +795,7 @@ static int lsi_queue_req(LSIState *s, SCSIRequest *req, uint32_t len) static void lsi_command_complete(SCSIRequest *req, size_t resid) { LSIState *s = LSI53C895A(req->bus->qbus.parent); - int out; + int out, stop = 0; out = (s->sstat1 & PHASE_MASK) == PHASE_DO; trace_lsi_command_complete(req->status); @@ -797,7 +803,10 @@ static void lsi_command_complete(SCSIRequest *req, size_t resid) s->command_complete = 2; if (s->waiting && s->dbc != 0) { /* Raise phase mismatch for short transfers. */ - lsi_bad_phase(s, out, PHASE_ST); + stop = lsi_bad_phase(s, out, PHASE_ST); + if (stop) { + s->waiting = 0; + } } else { lsi_set_phase(s, PHASE_ST); } @@ -807,7 +816,9 @@ static void lsi_command_complete(SCSIRequest *req, size_t resid) lsi_request_free(s, s->current); scsi_req_unref(req); } - lsi_resume_script(s); + if (!stop) { + lsi_resume_script(s); + } } /* Callback to indicate that the SCSI layer has completed a transfer. */ @@ -916,13 +927,18 @@ static void lsi_do_msgin(LSIState *s) assert(len > 0 && len <= LSI_MAX_MSGIN_LEN); if (len > s->dbc) len = s->dbc; - pci_dma_write(PCI_DEVICE(s), s->dnad, s->msg, len); - /* Linux drivers rely on the last byte being in the SIDL. */ - s->sidl = s->msg[len - 1]; - s->msg_len -= len; - if (s->msg_len) { - memmove(s->msg, s->msg + len, s->msg_len); - } else { + + if (len) { + pci_dma_write(PCI_DEVICE(s), s->dnad, s->msg, len); + /* Linux drivers rely on the last byte being in the SIDL. */ + s->sidl = s->msg[len - 1]; + s->msg_len -= len; + if (s->msg_len) { + memmove(s->msg, s->msg + len, s->msg_len); + } + } + + if (!s->msg_len) { /* ??? Check if ATN (not yet implemented) is asserted and maybe switch to PHASE_MO. */ switch (s->msg_action) { @@ -1127,6 +1143,12 @@ static void lsi_wait_reselect(LSIState *s) } } +static void lsi_scripts_timer_start(LSIState *s) +{ + trace_lsi_scripts_timer_start(); + timer_mod(s->scripts_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 500); +} + static void lsi_execute_script(LSIState *s) { PCIDevice *pci_dev = PCI_DEVICE(s); @@ -1136,6 +1158,11 @@ static void lsi_execute_script(LSIState *s) int insn_processed = 0; static int reentrancy_level; + if (s->waiting == LSI_WAIT_SCRIPTS) { + timer_del(s->scripts_timer); + s->waiting = LSI_NOWAIT; + } + reentrancy_level++; s->istat1 |= LSI_ISTAT1_SRUN; @@ -1143,8 +1170,8 @@ static void lsi_execute_script(LSIState *s) /* * Some windows drivers make the device spin waiting for a memory location * to change. If we have executed more than LSI_MAX_INSN instructions then - * assume this is the case and force an unexpected device disconnect. This - * is apparently sufficient to beat the drivers into submission. + * assume this is the case and start a timer. Until the timer fires, the + * host CPU has a chance to run and change the memory location. * * Another issue (CVE-2023-0330) can occur if the script is programmed to * trigger itself again and again. Avoid this problem by stopping after @@ -1152,13 +1179,8 @@ static void lsi_execute_script(LSIState *s) * which should be enough for all valid use cases). */ if (++insn_processed > LSI_MAX_INSN || reentrancy_level > 8) { - if (!(s->sien0 & LSI_SIST0_UDC)) { - qemu_log_mask(LOG_GUEST_ERROR, - "lsi_scsi: inf. loop with UDC masked"); - } - lsi_script_scsi_interrupt(s, LSI_SIST0_UDC, 0); - lsi_disconnect(s); - trace_lsi_execute_script_stop(); + s->waiting = LSI_WAIT_SCRIPTS; + lsi_scripts_timer_start(s); reentrancy_level--; return; } @@ -2197,6 +2219,9 @@ static int lsi_post_load(void *opaque, int version_id) return -EINVAL; } + if (s->waiting == LSI_WAIT_SCRIPTS) { + lsi_scripts_timer_start(s); + } return 0; } @@ -2206,7 +2231,7 @@ static const VMStateDescription vmstate_lsi_scsi = { .minimum_version_id = 0, .pre_save = lsi_pre_save, .post_load = lsi_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, LSIState), VMSTATE_INT32(carry, LSIState), @@ -2294,6 +2319,15 @@ static const struct SCSIBusInfo lsi_scsi_info = { .cancel = lsi_request_cancelled }; +static void scripts_timer_cb(void *opaque) +{ + LSIState *s = opaque; + + trace_lsi_scripts_timer_triggered(); + s->waiting = LSI_NOWAIT; + lsi_execute_script(s); +} + static void lsi_scsi_realize(PCIDevice *dev, Error **errp) { LSIState *s = LSI53C895A(dev); @@ -2313,6 +2347,7 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp) "lsi-ram", 0x2000); memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s, "lsi-io", 256); + s->scripts_timer = timer_new_us(QEMU_CLOCK_VIRTUAL, scripts_timer_cb, s); /* * Since we use the address-space API to interact with ram_io, disable the @@ -2337,6 +2372,7 @@ static void lsi_scsi_exit(PCIDevice *dev) LSIState *s = LSI53C895A(dev); address_space_destroy(&s->pci_io_as); + timer_del(s->scripts_timer); } static void lsi_class_init(ObjectClass *klass, void *data) diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c index 32c70c9e997..2d0c6071771 100644 --- a/hw/scsi/megasas.c +++ b/hw/scsi/megasas.c @@ -2299,7 +2299,7 @@ static const VMStateDescription vmstate_megasas_gen1 = { .name = "megasas", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, MegasasState), VMSTATE_MSIX(parent_obj, MegasasState), @@ -2317,7 +2317,7 @@ static const VMStateDescription vmstate_megasas_gen2 = { .name = "megasas-gen2", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, MegasasState), VMSTATE_MSIX(parent_obj, MegasasState), diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c index 75d3ab8bd18..c5d3138c936 100644 --- a/hw/scsi/mptsas.c +++ b/hw/scsi/mptsas.c @@ -1366,7 +1366,7 @@ static const VMStateDescription vmstate_mptsas = { .version_id = 0, .minimum_version_id = 0, .post_load = mptsas_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, MPTSASState), VMSTATE_BOOL(msi_in_use, MPTSASState), VMSTATE_UINT32(state, MPTSASState), diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c index fc4b77fdb02..9e40b0c920b 100644 --- a/hw/scsi/scsi-bus.c +++ b/hw/scsi/scsi-bus.c @@ -85,6 +85,92 @@ SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun) return d; } +/* + * Invoke @fn() for each enqueued request in device @s. Must be called from the + * main loop thread while the guest is stopped. This is only suitable for + * vmstate ->put(), use scsi_device_for_each_req_async() for other cases. + */ +static void scsi_device_for_each_req_sync(SCSIDevice *s, + void (*fn)(SCSIRequest *, void *), + void *opaque) +{ + SCSIRequest *req; + SCSIRequest *next_req; + + assert(!runstate_is_running()); + assert(qemu_in_main_thread()); + + QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) { + fn(req, opaque); + } +} + +typedef struct { + SCSIDevice *s; + void (*fn)(SCSIRequest *, void *); + void *fn_opaque; +} SCSIDeviceForEachReqAsyncData; + +static void scsi_device_for_each_req_async_bh(void *opaque) +{ + g_autofree SCSIDeviceForEachReqAsyncData *data = opaque; + SCSIDevice *s = data->s; + AioContext *ctx; + SCSIRequest *req; + SCSIRequest *next; + + /* + * The BB cannot have changed contexts between this BH being scheduled and + * now: BBs' AioContexts, when they have a node attached, can only be + * changed via bdrv_try_change_aio_context(), in a drained section. While + * we have the in-flight counter incremented, that drain must block. + */ + ctx = blk_get_aio_context(s->conf.blk); + assert(ctx == qemu_get_current_aio_context()); + + QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { + data->fn(req, data->fn_opaque); + } + + /* Drop the reference taken by scsi_device_for_each_req_async() */ + object_unref(OBJECT(s)); + + /* Paired with blk_inc_in_flight() in scsi_device_for_each_req_async() */ + blk_dec_in_flight(s->conf.blk); +} + +/* + * Schedule @fn() to be invoked for each enqueued request in device @s. @fn() + * runs in the AioContext that is executing the request. + * Keeps the BlockBackend's in-flight counter incremented until everything is + * done, so draining it will settle all scheduled @fn() calls. + */ +static void scsi_device_for_each_req_async(SCSIDevice *s, + void (*fn)(SCSIRequest *, void *), + void *opaque) +{ + assert(qemu_in_main_thread()); + + SCSIDeviceForEachReqAsyncData *data = + g_new(SCSIDeviceForEachReqAsyncData, 1); + + data->s = s; + data->fn = fn; + data->fn_opaque = opaque; + + /* + * Hold a reference to the SCSIDevice until + * scsi_device_for_each_req_async_bh() finishes. + */ + object_ref(OBJECT(s)); + + /* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */ + blk_inc_in_flight(s->conf.blk); + aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk), + scsi_device_for_each_req_async_bh, + data); +} + static void scsi_device_realize(SCSIDevice *s, Error **errp) { SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); @@ -144,20 +230,18 @@ void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host, qbus_set_bus_hotplug_handler(BUS(bus)); } -static void scsi_dma_restart_bh(void *opaque) +void scsi_req_retry(SCSIRequest *req) { - SCSIDevice *s = opaque; - SCSIRequest *req, *next; - - qemu_bh_delete(s->bh); - s->bh = NULL; + req->retry = true; +} - aio_context_acquire(blk_get_aio_context(s->conf.blk)); - QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { - scsi_req_ref(req); - if (req->retry) { - req->retry = false; - switch (req->cmd.mode) { +/* Called in the AioContext that is executing the request */ +static void scsi_dma_restart_req(SCSIRequest *req, void *opaque) +{ + scsi_req_ref(req); + if (req->retry) { + req->retry = false; + switch (req->cmd.mode) { case SCSI_XFER_FROM_DEV: case SCSI_XFER_TO_DEV: scsi_req_continue(req); @@ -166,37 +250,22 @@ static void scsi_dma_restart_bh(void *opaque) scsi_req_dequeue(req); scsi_req_enqueue(req); break; - } } - scsi_req_unref(req); } - aio_context_release(blk_get_aio_context(s->conf.blk)); - /* Drop the reference that was acquired in scsi_dma_restart_cb */ - object_unref(OBJECT(s)); -} - -void scsi_req_retry(SCSIRequest *req) -{ - /* No need to save a reference, because scsi_dma_restart_bh just - * looks at the request list. */ - req->retry = true; + scsi_req_unref(req); } static void scsi_dma_restart_cb(void *opaque, bool running, RunState state) { SCSIDevice *s = opaque; + assert(qemu_in_main_thread()); + if (!running) { return; } - if (!s->bh) { - AioContext *ctx = blk_get_aio_context(s->conf.blk); - /* The reference is dropped in scsi_dma_restart_bh.*/ - object_ref(OBJECT(s)); - s->bh = aio_bh_new_guarded(ctx, scsi_dma_restart_bh, s, - &DEVICE(s)->mem_reentrancy_guard); - qemu_bh_schedule(s->bh); - } + + scsi_device_for_each_req_async(s, scsi_dma_restart_req, NULL); } static bool scsi_bus_is_address_free(SCSIBus *bus, @@ -307,15 +376,13 @@ static void scsi_qdev_unrealize(DeviceState *qdev) /* handle legacy '-drive if=scsi,...' cmd line args */ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, - int unit, bool removable, int bootindex, - bool share_rw, - BlockdevOnError rerror, - BlockdevOnError werror, + int unit, bool removable, BlockConf *conf, const char *serial, Error **errp) { const char *driver; char *name; DeviceState *dev; + SCSIDevice *s; DriveInfo *dinfo; if (blk_is_sg(blk)) { @@ -333,11 +400,10 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, object_property_add_child(OBJECT(bus), name, OBJECT(dev)); g_free(name); + s = SCSI_DEVICE(dev); + s->conf = *conf; + qdev_prop_set_uint32(dev, "scsi-id", unit); - if (bootindex >= 0) { - object_property_set_int(OBJECT(dev), "bootindex", bootindex, - &error_abort); - } if (object_property_find(OBJECT(dev), "removable")) { qdev_prop_set_bit(dev, "removable", removable); } @@ -348,19 +414,12 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, object_unparent(OBJECT(dev)); return NULL; } - if (!object_property_set_bool(OBJECT(dev), "share-rw", share_rw, errp)) { - object_unparent(OBJECT(dev)); - return NULL; - } - - qdev_prop_set_enum(dev, "rerror", rerror); - qdev_prop_set_enum(dev, "werror", werror); if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) { object_unparent(OBJECT(dev)); return NULL; } - return SCSI_DEVICE(dev); + return s; } void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) @@ -368,6 +427,12 @@ void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) Location loc; DriveInfo *dinfo; int unit; + BlockConf conf = { + .bootindex = -1, + .share_rw = false, + .rerror = BLOCKDEV_ON_ERROR_AUTO, + .werror = BLOCKDEV_ON_ERROR_AUTO, + }; loc_push_none(&loc); for (unit = 0; unit <= bus->info->max_target; unit++) { @@ -377,10 +442,7 @@ void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) } qemu_opts_loc_restore(dinfo->opts); scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo), - unit, false, -1, false, - BLOCKDEV_ON_ERROR_AUTO, - BLOCKDEV_ON_ERROR_AUTO, - NULL, &error_fatal); + unit, false, &conf, NULL, &error_fatal); } loc_pop(&loc); } @@ -1657,17 +1719,25 @@ void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) } } +static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque) +{ + scsi_req_cancel_async(req, NULL); +} + +/** + * Cancel all requests, and block until they are deleted. + */ void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) { - SCSIRequest *req; + scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL); - aio_context_acquire(blk_get_aio_context(sdev->conf.blk)); - while (!QTAILQ_EMPTY(&sdev->requests)) { - req = QTAILQ_FIRST(&sdev->requests); - scsi_req_cancel_async(req, NULL); - } + /* + * Await all the scsi_device_purge_one_req() calls scheduled by + * scsi_device_for_each_req_async(), and all I/O requests that were + * cancelled this way, but may still take a bit of time to settle. + */ blk_drain(sdev->conf.blk); - aio_context_release(blk_get_aio_context(sdev->conf.blk)); + scsi_device_set_ua(sdev, sense); } @@ -1737,31 +1807,33 @@ static char *scsibus_get_fw_dev_path(DeviceState *dev) /* SCSI request list. For simplicity, pv points to the whole device */ +static void put_scsi_req(SCSIRequest *req, void *opaque) +{ + QEMUFile *f = opaque; + + assert(!req->io_canceled); + assert(req->status == -1 && req->host_status == -1); + assert(req->enqueued); + + qemu_put_sbyte(f, req->retry ? 1 : 2); + qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); + qemu_put_be32s(f, &req->tag); + qemu_put_be32s(f, &req->lun); + if (req->bus->info->save_request) { + req->bus->info->save_request(f, req); + } + if (req->ops->save_request) { + req->ops->save_request(f, req); + } +} + static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, const VMStateField *field, JSONWriter *vmdesc) { SCSIDevice *s = pv; - SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); - SCSIRequest *req; - QTAILQ_FOREACH(req, &s->requests, next) { - assert(!req->io_canceled); - assert(req->status == -1 && req->host_status == -1); - assert(req->enqueued); - - qemu_put_sbyte(f, req->retry ? 1 : 2); - qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); - qemu_put_be32s(f, &req->tag); - qemu_put_be32s(f, &req->lun); - if (bus->info->save_request) { - bus->info->save_request(f, req); - } - if (req->ops->save_request) { - req->ops->save_request(f, req); - } - } + scsi_device_for_each_req_sync(s, put_scsi_req, f); qemu_put_sbyte(f, 0); - return 0; } @@ -1826,7 +1898,7 @@ static const VMStateDescription vmstate_scsi_sense_state = { .version_id = 1, .minimum_version_id = 1, .needed = scsi_sense_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, SCSI_SENSE_BUF_SIZE_OLD, SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD), @@ -1838,7 +1910,7 @@ const VMStateDescription vmstate_scsi_device = { .name = "SCSIDevice", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(unit_attention.key, SCSIDevice), VMSTATE_UINT8(unit_attention.asc, SCSIDevice), VMSTATE_UINT8(unit_attention.ascq, SCSIDevice), @@ -1856,7 +1928,7 @@ const VMStateDescription vmstate_scsi_device = { }, VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_scsi_sense_state, NULL } diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index 6691f5edb84..4bd7af9d0c2 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -273,7 +273,9 @@ static void scsi_aio_complete(void *opaque, int ret) SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); + /* The request must only run in the BlockBackend's AioContext */ + assert(blk_get_aio_context(s->qdev.conf.blk) == + qemu_get_current_aio_context()); assert(r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -286,7 +288,6 @@ static void scsi_aio_complete(void *opaque, int ret) scsi_req_complete(&r->req, GOOD); done: - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); scsi_req_unref(&r->req); } @@ -354,7 +355,6 @@ static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) scsi_req_unref(&r->req); } -/* Called with AioContext lock held */ static void scsi_dma_complete(void *opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; @@ -373,8 +373,13 @@ static void scsi_dma_complete(void *opaque, int ret) static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) { + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; + /* The request must only run in the BlockBackend's AioContext */ + assert(blk_get_aio_context(s->qdev.conf.blk) == + qemu_get_current_aio_context()); + assert(r->req.aiocb == NULL); if (scsi_disk_req_check_error(r, ret, false)) { goto done; @@ -394,8 +399,6 @@ static void scsi_read_complete(void *opaque, int ret) SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); - assert(r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -406,7 +409,6 @@ static void scsi_read_complete(void *opaque, int ret) trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); } scsi_read_complete_noio(r, ret); - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); } /* Actually issue a read to the block device. */ @@ -448,8 +450,6 @@ static void scsi_do_read_cb(void *opaque, int ret) SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); - assert (r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -459,7 +459,6 @@ static void scsi_do_read_cb(void *opaque, int ret) block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); } scsi_do_read(opaque, ret); - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); } /* Read more data from scsi device into buffer. */ @@ -505,8 +504,13 @@ static void scsi_read_data(SCSIRequest *req) static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) { + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; + /* The request must only run in the BlockBackend's AioContext */ + assert(blk_get_aio_context(s->qdev.conf.blk) == + qemu_get_current_aio_context()); + assert (r->req.aiocb == NULL); if (scsi_disk_req_check_error(r, ret, false)) { goto done; @@ -533,8 +537,6 @@ static void scsi_write_complete(void * opaque, int ret) SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); - assert (r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -544,7 +546,6 @@ static void scsi_write_complete(void * opaque, int ret) block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); } scsi_write_complete_noio(r, ret); - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); } static void scsi_write_data(SCSIRequest *req) @@ -1742,8 +1743,6 @@ static void scsi_unmap_complete(void *opaque, int ret) SCSIDiskReq *r = data->r; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); - assert(r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -1754,7 +1753,6 @@ static void scsi_unmap_complete(void *opaque, int ret) block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); scsi_unmap_complete_noio(data, ret); } - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); } static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) @@ -1822,8 +1820,6 @@ static void scsi_write_same_complete(void *opaque, int ret) SCSIDiskReq *r = data->r; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); - assert(r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -1847,7 +1843,6 @@ static void scsi_write_same_complete(void *opaque, int ret) data->sector << BDRV_SECTOR_BITS, &data->qiov, 0, scsi_write_same_complete, data); - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); return; } @@ -1857,7 +1852,6 @@ static void scsi_write_same_complete(void *opaque, int ret) scsi_req_unref(&r->req); qemu_vfree(data->iov.iov_base); g_free(data); - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); } static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) @@ -2344,14 +2338,10 @@ static void scsi_disk_reset(DeviceState *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); uint64_t nb_sectors; - AioContext *ctx; scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); - ctx = blk_get_aio_context(s->qdev.conf.blk); - aio_context_acquire(ctx); blk_get_geometry(s->qdev.conf.blk, &nb_sectors); - aio_context_release(ctx); nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; if (nb_sectors) { @@ -2550,15 +2540,13 @@ static void scsi_unrealize(SCSIDevice *dev) static void scsi_hd_realize(SCSIDevice *dev, Error **errp) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); - AioContext *ctx = NULL; + /* can happen for devices without drive. The error message for missing * backend will be issued in scsi_realize */ if (s->qdev.conf.blk) { - ctx = blk_get_aio_context(s->qdev.conf.blk); - aio_context_acquire(ctx); if (!blkconf_blocksizes(&s->qdev.conf, errp)) { - goto out; + return; } } s->qdev.blocksize = s->qdev.conf.logical_block_size; @@ -2567,16 +2555,11 @@ static void scsi_hd_realize(SCSIDevice *dev, Error **errp) s->product = g_strdup("QEMU HARDDISK"); } scsi_realize(&s->qdev, errp); -out: - if (ctx) { - aio_context_release(ctx); - } } static void scsi_cd_realize(SCSIDevice *dev, Error **errp) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); - AioContext *ctx; int ret; uint32_t blocksize = 2048; @@ -2592,8 +2575,6 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp) blocksize = dev->conf.physical_block_size; } - ctx = blk_get_aio_context(dev->conf.blk); - aio_context_acquire(ctx); s->qdev.blocksize = blocksize; s->qdev.type = TYPE_ROM; s->features |= 1 << SCSI_DISK_F_REMOVABLE; @@ -2601,7 +2582,6 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp) s->product = g_strdup("QEMU CD-ROM"); } scsi_realize(&s->qdev, errp); - aio_context_release(ctx); } @@ -2732,7 +2712,6 @@ static int get_device_type(SCSIDiskState *s) static void scsi_block_realize(SCSIDevice *dev, Error **errp) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); - AioContext *ctx; int sg_version; int rc; @@ -2747,9 +2726,6 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp) "be removed in a future version"); } - ctx = blk_get_aio_context(s->qdev.conf.blk); - aio_context_acquire(ctx); - /* check we are using a driver managing SG_IO (version 3 and after) */ rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); if (rc < 0) { @@ -2757,18 +2733,18 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp) if (rc != -EPERM) { error_append_hint(errp, "Is this a SCSI device?\n"); } - goto out; + return; } if (sg_version < 30000) { error_setg(errp, "scsi generic interface too old"); - goto out; + return; } /* get device type from INQUIRY data */ rc = get_device_type(s); if (rc < 0) { error_setg(errp, "INQUIRY failed"); - goto out; + return; } /* Make a guess for the block size, we'll fix it when the guest sends. @@ -2788,9 +2764,6 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp) scsi_realize(&s->qdev, errp); scsi_generic_read_device_inquiry(&s->qdev); - -out: - aio_context_release(ctx); } typedef struct SCSIBlockReq { @@ -2810,7 +2783,6 @@ static void scsi_block_sgio_complete(void *opaque, int ret) { SCSIBlockReq *req = (SCSIBlockReq *)opaque; SCSIDiskReq *r = &req->req; - SCSIDevice *s = r->req.dev; sg_io_hdr_t *io_hdr = &req->io_header; if (ret == 0) { @@ -2827,13 +2799,10 @@ static void scsi_block_sgio_complete(void *opaque, int ret) } if (ret > 0) { - aio_context_acquire(blk_get_aio_context(s->conf.blk)); if (scsi_handle_rw_error(r, ret, true)) { - aio_context_release(blk_get_aio_context(s->conf.blk)); scsi_req_unref(&r->req); return; } - aio_context_release(blk_get_aio_context(s->conf.blk)); /* Ignore error. */ ret = 0; @@ -3168,7 +3137,7 @@ static const VMStateDescription vmstate_scsi_disk_state = { .name = "scsi-disk", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), VMSTATE_BOOL(media_changed, SCSIDiskState), VMSTATE_BOOL(media_event, SCSIDiskState), diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index 2417f0ad847..ee945f87e33 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -109,15 +109,11 @@ static void scsi_command_complete_noio(SCSIGenericReq *r, int ret) static void scsi_command_complete(void *opaque, int ret) { SCSIGenericReq *r = (SCSIGenericReq *)opaque; - SCSIDevice *s = r->req.dev; - - aio_context_acquire(blk_get_aio_context(s->conf.blk)); assert(r->req.aiocb != NULL); r->req.aiocb = NULL; scsi_command_complete_noio(r, ret); - aio_context_release(blk_get_aio_context(s->conf.blk)); } static int execute_command(BlockBackend *blk, @@ -274,14 +270,12 @@ static void scsi_read_complete(void * opaque, int ret) SCSIDevice *s = r->req.dev; int len; - aio_context_acquire(blk_get_aio_context(s->conf.blk)); - assert(r->req.aiocb != NULL); r->req.aiocb = NULL; if (ret || r->req.io_canceled) { scsi_command_complete_noio(r, ret); - goto done; + return; } len = r->io_header.dxfer_len - r->io_header.resid; @@ -320,7 +314,7 @@ static void scsi_read_complete(void * opaque, int ret) r->io_header.status != GOOD || len == 0) { scsi_command_complete_noio(r, 0); - goto done; + return; } /* Snoop READ CAPACITY output to set the blocksize. */ @@ -356,9 +350,6 @@ static void scsi_read_complete(void * opaque, int ret) req_complete: scsi_req_data(&r->req, len); scsi_req_unref(&r->req); - -done: - aio_context_release(blk_get_aio_context(s->conf.blk)); } /* Read more data from scsi device into buffer. */ @@ -391,14 +382,12 @@ static void scsi_write_complete(void * opaque, int ret) trace_scsi_generic_write_complete(ret); - aio_context_acquire(blk_get_aio_context(s->conf.blk)); - assert(r->req.aiocb != NULL); r->req.aiocb = NULL; if (ret || r->req.io_canceled) { scsi_command_complete_noio(r, ret); - goto done; + return; } if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && @@ -408,9 +397,6 @@ static void scsi_write_complete(void * opaque, int ret) } scsi_command_complete_noio(r, ret); - -done: - aio_context_release(blk_get_aio_context(s->conf.blk)); } /* Write data to a scsi device. Returns nonzero on failure. @@ -766,7 +752,6 @@ static void scsi_generic_realize(SCSIDevice *s, Error **errp) /* Only used by scsi-block, but initialize it nevertheless to be clean. */ s->default_scsi_version = -1; - s->io_timeout = DEFAULT_IO_TIMEOUT; scsi_generic_read_device_inquiry(s); } diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c index 5bbbef64ef3..c75a6c88079 100644 --- a/hw/scsi/spapr_vscsi.c +++ b/hw/scsi/spapr_vscsi.c @@ -605,7 +605,7 @@ static const VMStateDescription vmstate_spapr_vscsi_req = { .name = "spapr_vscsi_req", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(crq.raw, vscsi_req), VMSTATE_BUFFER(viosrp_iu_buf, vscsi_req), VMSTATE_UINT32(qtag, vscsi_req), @@ -1259,7 +1259,7 @@ static const VMStateDescription vmstate_spapr_vscsi = { .name = "spapr_vscsi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SPAPR_VIO(vdev, VSCSIState), /* VSCSI state */ /* ???? */ diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events index bdd4e2c7c78..f0f2a98c2ee 100644 --- a/hw/scsi/trace-events +++ b/hw/scsi/trace-events @@ -197,6 +197,7 @@ esp_mem_writeb_cmd_selatns(uint32_t val) "Select with ATN & stop (0x%2.2x)" esp_mem_writeb_cmd_ensel(uint32_t val) "Enable selection (0x%2.2x)" esp_mem_writeb_cmd_dissel(uint32_t val) "Disable selection (0x%2.2x)" esp_mem_writeb_cmd_ti(uint32_t val) "Transfer Information (0x%2.2x)" +esp_set_phase(const char *phase) "setting bus phase to %s" # esp-pci.c esp_pci_error_invalid_dma_direction(void) "invalid DMA transfer direction" @@ -301,6 +302,8 @@ lsi_execute_script_stop(void) "SCRIPTS execution stopped" lsi_awoken(void) "Woken by SIGP" lsi_reg_read(const char *name, int offset, uint8_t ret) "Read reg %s 0x%x = 0x%02x" lsi_reg_write(const char *name, int offset, uint8_t val) "Write reg %s 0x%x = 0x%02x" +lsi_scripts_timer_triggered(void) "SCRIPTS timer triggered" +lsi_scripts_timer_start(void) "SCRIPTS timer started" # virtio-scsi.c virtio_scsi_cmd_req(int lun, uint32_t tag, uint8_t cmd) "virtio_scsi_cmd_req lun=%u tag=0x%x cmd=0x%x" diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c index 3126df9e1d9..ae26bc19a45 100644 --- a/hw/scsi/vhost-scsi.c +++ b/hw/scsi/vhost-scsi.c @@ -91,13 +91,13 @@ static int vhost_scsi_start(VHostSCSI *s) ret = vhost_scsi_common_start(vsc, &local_err); if (ret < 0) { - error_reportf_err(local_err, "Error starting vhost-scsi"); + error_reportf_err(local_err, "Error starting vhost-scsi: "); return ret; } ret = vhost_scsi_set_endpoint(s); if (ret < 0) { - error_reportf_err(local_err, "Error setting vhost-scsi endpoint"); + error_report("Error setting vhost-scsi endpoint"); vhost_scsi_common_stop(vsc); } @@ -158,15 +158,69 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = { .name = "virtio-vhost_scsi", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, .pre_save = vhost_scsi_pre_save, }; +static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue) +{ + struct vhost_dev *dev = &vsc->dev; + struct vhost_vring_worker vq_worker; + struct vhost_worker_state worker; + int i, ret; + + /* Use default worker */ + if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) { + return 0; + } + + /* + * ctl/evt share the first worker since it will be rare for them + * to send cmds while IO is running. + */ + for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) { + memset(&worker, 0, sizeof(worker)); + + ret = dev->vhost_ops->vhost_new_worker(dev, &worker); + if (ret == -ENOTTY) { + /* + * worker ioctls are not implemented so just ignore and + * and continue device setup. + */ + warn_report("vhost-scsi: Backend supports a single worker. " + "Ignoring worker_per_virtqueue=true setting."); + ret = 0; + break; + } else if (ret) { + break; + } + + memset(&vq_worker, 0, sizeof(vq_worker)); + vq_worker.worker_id = worker.worker_id; + vq_worker.index = i; + + ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker); + if (ret == -ENOTTY) { + /* + * It's a bug for the kernel to have supported the worker creation + * ioctl but not attach. + */ + dev->vhost_ops->vhost_free_worker(dev, &worker); + break; + } else if (ret) { + break; + } + } + + return ret; +} + static void vhost_scsi_realize(DeviceState *dev, Error **errp) { + ERRP_GUARD(); VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(dev); Error *err = NULL; @@ -232,6 +286,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) goto free_vqs; } + ret = vhost_scsi_set_workers(vsc, vs->conf.worker_per_virtqueue); + if (ret < 0) { + error_setg(errp, "vhost-scsi: vhost worker setup failed: %s", + strerror(-ret)); + goto free_vqs; + } + /* At present, channel and lun both are 0 for bootable vhost-scsi disk */ vsc->channel = 0; vsc->lun = 0; @@ -297,6 +358,8 @@ static Property vhost_scsi_properties[] = { VIRTIO_SCSI_F_T10_PI, false), DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false), + DEFINE_PROP_BOOL("worker_per_virtqueue", VirtIOSCSICommon, + conf.worker_per_virtqueue, false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c index 780f10559d2..a63b1f49482 100644 --- a/hw/scsi/vhost-user-scsi.c +++ b/hw/scsi/vhost-user-scsi.c @@ -83,7 +83,8 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) if (should_start) { ret = vhost_user_scsi_start(s, &local_err); if (ret < 0) { - error_reportf_err(local_err, "unable to start vhost-user-scsi: %s", + error_reportf_err(local_err, + "unable to start vhost-user-scsi: %s: ", strerror(-ret)); qemu_chr_fe_disconnect(&vs->conf.chardev); } @@ -378,7 +379,7 @@ static const VMStateDescription vmstate_vhost_scsi = { .name = "virtio-scsi", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c index 1e684beebe2..2806a121b24 100644 --- a/hw/scsi/virtio-scsi-dataplane.c +++ b/hw/scsi/virtio-scsi-dataplane.c @@ -20,7 +20,7 @@ #include "scsi/constants.h" #include "hw/virtio/virtio-bus.h" -/* Context: QEMU global mutex held */ +/* Context: BQL held */ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); @@ -93,7 +93,7 @@ static void virtio_scsi_dataplane_stop_bh(void *opaque) } } -/* Context: QEMU global mutex held */ +/* Context: BQL held */ int virtio_scsi_dataplane_start(VirtIODevice *vdev) { int i; @@ -149,23 +149,17 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev) memory_region_transaction_commit(); - /* - * These fields are visible to the IOThread so we rely on implicit barriers - * in aio_context_acquire() on the write side and aio_notify_accept() on - * the read side. - */ s->dataplane_starting = false; s->dataplane_started = true; + smp_wmb(); /* paired with aio_notify_accept() */ if (s->bus.drain_count == 0) { - aio_context_acquire(s->ctx); virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx); virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx); for (i = 0; i < vs->conf.num_queues; i++) { virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx); } - aio_context_release(s->ctx); } return 0; @@ -191,7 +185,7 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev) return -ENOSYS; } -/* Context: QEMU global mutex held */ +/* Context: BQL held */ void virtio_scsi_dataplane_stop(VirtIODevice *vdev) { BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 4b0d476c8d0..9f02ceea099 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -123,6 +123,30 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req) virtio_scsi_free_req(req); } +static void virtio_scsi_complete_req_bh(void *opaque) +{ + VirtIOSCSIReq *req = opaque; + + virtio_scsi_complete_req(req); +} + +/* + * Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop + * thread cannot touch the virtqueue since that could race with an IOThread. + */ +static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req) +{ + VirtIOSCSI *s = req->dev; + + if (!s->ctx || s->ctx == qemu_get_aio_context()) { + /* No need to schedule a BH when there is no IOThread */ + virtio_scsi_complete_req(req); + } else { + /* Run request completion in the IOThread */ + aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req); + } +} + static void virtio_scsi_bad_req(VirtIOSCSIReq *req) { virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers"); @@ -338,10 +362,7 @@ static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req) out: object_unref(OBJECT(d)); - - virtio_scsi_acquire(s); - virtio_scsi_complete_req(req); - virtio_scsi_release(s); + virtio_scsi_complete_req_from_main_loop(req); } /* Some TMFs must be processed from the main loop thread */ @@ -354,18 +375,16 @@ static void virtio_scsi_do_tmf_bh(void *opaque) GLOBAL_STATE_CODE(); - virtio_scsi_acquire(s); + WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) { + QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) { + QTAILQ_REMOVE(&s->tmf_bh_list, req, next); + QTAILQ_INSERT_TAIL(&reqs, req, next); + } - QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) { - QTAILQ_REMOVE(&s->tmf_bh_list, req, next); - QTAILQ_INSERT_TAIL(&reqs, req, next); + qemu_bh_delete(s->tmf_bh); + s->tmf_bh = NULL; } - qemu_bh_delete(s->tmf_bh); - s->tmf_bh = NULL; - - virtio_scsi_release(s); - QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) { QTAILQ_REMOVE(&reqs, req, next); virtio_scsi_do_one_tmf_bh(req); @@ -379,8 +398,7 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s) GLOBAL_STATE_CODE(); - virtio_scsi_acquire(s); - + /* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */ if (s->tmf_bh) { qemu_bh_delete(s->tmf_bh); s->tmf_bh = NULL; @@ -393,19 +411,19 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s) req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE; virtio_scsi_complete_req(req); } - - virtio_scsi_release(s); } static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req) { VirtIOSCSI *s = req->dev; - QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next); + WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) { + QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next); - if (!s->tmf_bh) { - s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s); - qemu_bh_schedule(s->tmf_bh); + if (!s->tmf_bh) { + s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s); + qemu_bh_schedule(s->tmf_bh); + } } } @@ -624,9 +642,7 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) return; } - virtio_scsi_acquire(s); virtio_scsi_handle_ctrl_vq(s, vq); - virtio_scsi_release(s); } static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req) @@ -864,9 +880,7 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) return; } - virtio_scsi_acquire(s); virtio_scsi_handle_cmd_vq(s, vq); - virtio_scsi_release(s); } static void virtio_scsi_get_config(VirtIODevice *vdev, @@ -1013,9 +1027,7 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq) return; } - virtio_scsi_acquire(s); virtio_scsi_handle_event_vq(s, vq); - virtio_scsi_release(s); } static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) @@ -1034,9 +1046,7 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) }, }; - virtio_scsi_acquire(s); virtio_scsi_push_event(s, &info); - virtio_scsi_release(s); } } @@ -1053,17 +1063,13 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); VirtIOSCSI *s = VIRTIO_SCSI(vdev); SCSIDevice *sd = SCSI_DEVICE(dev); - AioContext *old_context; int ret; if (s->ctx && !s->dataplane_fenced) { if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { return; } - old_context = blk_get_aio_context(sd->conf.blk); - aio_context_acquire(old_context); ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp); - aio_context_release(old_context); if (ret < 0) { return; } @@ -1079,10 +1085,8 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, }, }; - virtio_scsi_acquire(s); virtio_scsi_push_event(s, &info); scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); - virtio_scsi_release(s); } } @@ -1104,17 +1108,13 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); if (s->ctx) { - virtio_scsi_acquire(s); /* If other users keep the BlockBackend in the iothread, that's ok */ blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL); - virtio_scsi_release(s); } if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { - virtio_scsi_acquire(s); virtio_scsi_push_event(s, &info); scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); - virtio_scsi_release(s); } } @@ -1240,6 +1240,7 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp) Error *err = NULL; QTAILQ_INIT(&s->tmf_bh_list); + qemu_mutex_init(&s->tmf_bh_lock); virtio_scsi_common_realize(dev, virtio_scsi_handle_ctrl, @@ -1282,6 +1283,7 @@ static void virtio_scsi_device_unrealize(DeviceState *dev) qbus_set_hotplug_handler(BUS(&s->bus), NULL); virtio_scsi_common_unrealize(dev); + qemu_mutex_destroy(&s->tmf_bh_lock); } static Property virtio_scsi_properties[] = { @@ -1308,7 +1310,7 @@ static const VMStateDescription vmstate_virtio_scsi = { .name = "virtio-scsi", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c index 4de34536e98..cd7bf6aa015 100644 --- a/hw/scsi/vmw_pvscsi.c +++ b/hw/scsi/vmw_pvscsi.c @@ -1249,7 +1249,7 @@ static bool pvscsi_vmstate_test_pci_device(void *opaque, int version_id) static const VMStateDescription vmstate_pvscsi_pcie_device = { .name = "pvscsi/pcie", .needed = pvscsi_vmstate_need_pcie_device, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState), VMSTATE_END_OF_LIST() } @@ -1261,7 +1261,7 @@ static const VMStateDescription vmstate_pvscsi = { .minimum_version_id = 0, .pre_save = pvscsi_pre_save, .post_load = pvscsi_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_TEST(parent_obj, PVSCSIState, pvscsi_vmstate_test_pci_device, 0, vmstate_pci_device, PCIDevice), @@ -1290,7 +1290,7 @@ static const VMStateDescription vmstate_pvscsi = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_pvscsi_pcie_device, NULL } diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c index 1a576d62ae2..a1b7230633e 100644 --- a/hw/sd/allwinner-sdhost.c +++ b/hw/sd/allwinner-sdhost.c @@ -773,7 +773,7 @@ static const VMStateDescription vmstate_allwinner_sdhost = { .name = "allwinner-sdhost", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(global_ctl, AwSdHostState), VMSTATE_UINT32(clock_ctl, AwSdHostState), VMSTATE_UINT32(timeout, AwSdHostState), diff --git a/hw/sd/aspeed_sdhci.c b/hw/sd/aspeed_sdhci.c index e53206d9594..3b63926c3a2 100644 --- a/hw/sd/aspeed_sdhci.c +++ b/hw/sd/aspeed_sdhci.c @@ -177,7 +177,7 @@ static void aspeed_sdhci_reset(DeviceState *dev) static const VMStateDescription vmstate_aspeed_sdhci = { .name = TYPE_ASPEED_SDHCI, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSDHCIState, ASPEED_SDHCI_NUM_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c index a600cf39e23..11c54dd4a73 100644 --- a/hw/sd/bcm2835_sdhost.c +++ b/hw/sd/bcm2835_sdhost.c @@ -381,7 +381,7 @@ static const VMStateDescription vmstate_bcm2835_sdhost = { .name = TYPE_BCM2835_SDHOST, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cmd, BCM2835SDHostState), VMSTATE_UINT32(cmdarg, BCM2835SDHostState), VMSTATE_UINT32(status, BCM2835SDHostState), diff --git a/hw/sd/cadence_sdhci.c b/hw/sd/cadence_sdhci.c index ef4e0d74e3e..7c8bc5464b9 100644 --- a/hw/sd/cadence_sdhci.c +++ b/hw/sd/cadence_sdhci.c @@ -159,7 +159,7 @@ static void cadence_sdhci_realize(DeviceState *dev, Error **errp) static const VMStateDescription vmstate_cadence_sdhci = { .name = TYPE_CADENCE_SDHCI, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CadenceSDHCIState, CADENCE_SDHCI_NUM_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/sd/npcm7xx_sdhci.c b/hw/sd/npcm7xx_sdhci.c index 9958680090f..e93dab8dbd5 100644 --- a/hw/sd/npcm7xx_sdhci.c +++ b/hw/sd/npcm7xx_sdhci.c @@ -142,7 +142,7 @@ static void npcm7xx_sdhci_reset(DeviceState *dev) static const VMStateDescription vmstate_npcm7xx_sdhci = { .name = TYPE_NPCM7XX_SDHCI, .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(regs.boottoctrl, NPCM7xxSDHCIState), VMSTATE_END_OF_LIST(), }, diff --git a/hw/sd/pl181.c b/hw/sd/pl181.c index 2b33814d830..e3633c2e6fc 100644 --- a/hw/sd/pl181.c +++ b/hw/sd/pl181.c @@ -63,7 +63,7 @@ static const VMStateDescription vmstate_pl181 = { .name = "pl181", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(clock, PL181State), VMSTATE_UINT32(power, PL181State), VMSTATE_UINT32(cmdarg, PL181State), diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c index 5e8ea691886..82529708c8a 100644 --- a/hw/sd/pxa2xx_mmci.c +++ b/hw/sd/pxa2xx_mmci.c @@ -84,7 +84,7 @@ static const VMStateDescription vmstate_pxa2xx_mmci = { .name = "pxa2xx-mmci", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(status, PXA2xxMMCIState), VMSTATE_UINT32(clkrt, PXA2xxMMCIState), VMSTATE_UINT32(spi, PXA2xxMMCIState), diff --git a/hw/sd/sd.c b/hw/sd/sd.c index 1106ff7d785..807b5d3de32 100644 --- a/hw/sd/sd.c +++ b/hw/sd/sd.c @@ -681,7 +681,7 @@ static const VMStateDescription sd_ocr_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = sd_ocr_vmstate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ocr, SDState), VMSTATE_TIMER_PTR(ocr_power_timer, SDState), VMSTATE_END_OF_LIST() @@ -706,7 +706,7 @@ static const VMStateDescription sd_vmstate = { .version_id = 2, .minimum_version_id = 2, .pre_load = sd_vmstate_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(mode, SDState), VMSTATE_INT32(state, SDState), VMSTATE_UINT8_ARRAY(cid, SDState, 16), @@ -733,7 +733,7 @@ static const VMStateDescription sd_vmstate = { VMSTATE_BOOL(enable, SDState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &sd_ocr_vmstate, NULL }, diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 40473b0db09..27673e1c70e 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -473,6 +473,7 @@ static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size) } for (i = 0; i < size; i++) { + assert(s->data_count < s->buf_maxsz); value |= s->fifo_buffer[s->data_count] << i * 8; s->data_count++; /* check if we've read all valid data (blksize bytes) from buffer */ @@ -561,6 +562,7 @@ static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size) } for (i = 0; i < size; i++) { + assert(s->data_count < s->buf_maxsz); s->fifo_buffer[s->data_count] = value & 0xFF; s->data_count++; value >>= 8; @@ -1208,6 +1210,12 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) { value &= ~SDHC_TRNS_DMA; } + + /* TRNMOD writes are inhibited while Command Inhibit (DAT) is true */ + if (s->prnsts & SDHC_DATA_INHIBIT) { + mask |= 0xffff; + } + MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK); MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); @@ -1457,7 +1465,7 @@ static const VMStateDescription sdhci_pending_insert_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = sdhci_pending_insert_vmstate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(pending_insert_state, SDHCIState), VMSTATE_END_OF_LIST() }, @@ -1467,7 +1475,7 @@ const VMStateDescription sdhci_vmstate = { .name = "sdhci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(sdmasysad, SDHCIState), VMSTATE_UINT16(blksize, SDHCIState), VMSTATE_UINT16(blkcnt, SDHCIState), @@ -1498,7 +1506,7 @@ const VMStateDescription sdhci_vmstate = { VMSTATE_TIMER_PTR(transfer_timer, SDHCIState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &sdhci_pending_insert_vmstate, NULL }, diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c index a6cc1ad6c89..2dd070f978c 100644 --- a/hw/sd/ssi-sd.c +++ b/hw/sd/ssi-sd.c @@ -350,7 +350,7 @@ static const VMStateDescription vmstate_ssi_sd = { .version_id = 7, .minimum_version_id = 7, .post_load = ssi_sd_post_load, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(mode, ssi_sd_state), VMSTATE_INT32(cmd, ssi_sd_state), VMSTATE_UINT8_ARRAY(cmdarg, ssi_sd_state, 4), diff --git a/hw/sensor/adm1266.c b/hw/sensor/adm1266.c index 5ae4f82ba16..5454b73a639 100644 --- a/hw/sensor/adm1266.c +++ b/hw/sensor/adm1266.c @@ -202,7 +202,7 @@ static const VMStateDescription vmstate_adm1266 = { .name = "ADM1266", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_PMBUS_DEVICE(parent, ADM1266State), VMSTATE_END_OF_LIST() } diff --git a/hw/sensor/adm1272.c b/hw/sensor/adm1272.c index 8f4a1c2cd4b..1f7c8abb838 100644 --- a/hw/sensor/adm1272.c +++ b/hw/sensor/adm1272.c @@ -457,7 +457,7 @@ static const VMStateDescription vmstate_adm1272 = { .name = "ADM1272", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_PMBUS_DEVICE(parent, ADM1272State), VMSTATE_UINT64(ein_ext, ADM1272State), VMSTATE_UINT32(pin_ext, ADM1272State), diff --git a/hw/sensor/dps310.c b/hw/sensor/dps310.c index addee99b196..01c776dd7a8 100644 --- a/hw/sensor/dps310.c +++ b/hw/sensor/dps310.c @@ -188,7 +188,7 @@ static const VMStateDescription vmstate_dps310 = { .name = "DPS310", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(len, DPS310State), VMSTATE_UINT8_ARRAY(regs, DPS310State, NUM_REGISTERS), VMSTATE_UINT8(pointer, DPS310State), diff --git a/hw/sensor/emc141x.c b/hw/sensor/emc141x.c index 7ce8f4e9794..95079558e87 100644 --- a/hw/sensor/emc141x.c +++ b/hw/sensor/emc141x.c @@ -228,7 +228,7 @@ static const VMStateDescription vmstate_emc141x = { .name = "EMC141X", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(len, EMC141XState), VMSTATE_UINT8(data, EMC141XState), VMSTATE_UINT8(pointer, EMC141XState), diff --git a/hw/sensor/lsm303dlhc_mag.c b/hw/sensor/lsm303dlhc_mag.c index bb8d48b2fdb..343ff989904 100644 --- a/hw/sensor/lsm303dlhc_mag.c +++ b/hw/sensor/lsm303dlhc_mag.c @@ -442,7 +442,7 @@ static const VMStateDescription vmstate_lsm303dlhc_mag = { .name = "LSM303DLHC_MAG", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, LSM303DLHCMagState), VMSTATE_UINT8(len, LSM303DLHCMagState), diff --git a/hw/sensor/max31785.c b/hw/sensor/max31785.c index 8b95e324814..916ed4d457b 100644 --- a/hw/sensor/max31785.c +++ b/hw/sensor/max31785.c @@ -487,7 +487,7 @@ static const VMStateDescription vmstate_max31785 = { .name = TYPE_MAX31785, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_PMBUS_DEVICE(parent, MAX31785State), VMSTATE_UINT16_ARRAY(mfr_mode, MAX31785State, MAX31785_TOTAL_NUM_PAGES), diff --git a/hw/sensor/max34451.c b/hw/sensor/max34451.c index 9db52ef6778..031ae53f594 100644 --- a/hw/sensor/max34451.c +++ b/hw/sensor/max34451.c @@ -654,7 +654,7 @@ static const VMStateDescription vmstate_max34451 = { .name = TYPE_MAX34451, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_PMBUS_DEVICE(parent, MAX34451State), VMSTATE_UINT16_ARRAY(power_good_on, MAX34451State, MAX34451_NUM_PWR_DEVICES), diff --git a/hw/sensor/tmp105.c b/hw/sensor/tmp105.c index 20564494899..a8730d0b7f9 100644 --- a/hw/sensor/tmp105.c +++ b/hw/sensor/tmp105.c @@ -238,7 +238,7 @@ static const VMStateDescription vmstate_tmp105_detect_falling = { .version_id = 1, .minimum_version_id = 1, .needed = detect_falling_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(detect_falling, TMP105State), VMSTATE_END_OF_LIST() } @@ -249,7 +249,7 @@ static const VMStateDescription vmstate_tmp105 = { .version_id = 0, .minimum_version_id = 0, .post_load = tmp105_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(len, TMP105State), VMSTATE_UINT8_ARRAY(buf, TMP105State, 2), VMSTATE_UINT8(pointer, TMP105State), @@ -260,7 +260,7 @@ static const VMStateDescription vmstate_tmp105 = { VMSTATE_I2C_SLAVE(i2c, TMP105State), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_tmp105_detect_falling, NULL } diff --git a/hw/sensor/tmp421.c b/hw/sensor/tmp421.c index a3db57dcb5a..b6f0b62ab11 100644 --- a/hw/sensor/tmp421.c +++ b/hw/sensor/tmp421.c @@ -290,7 +290,7 @@ static const VMStateDescription vmstate_tmp421 = { .name = "TMP421", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(len, TMP421State), VMSTATE_UINT8_ARRAY(buf, TMP421State, 2), VMSTATE_UINT8(pointer, TMP421State), diff --git a/hw/sh4/Kconfig b/hw/sh4/Kconfig index ab733a3f760..e0c4ecd1a53 100644 --- a/hw/sh4/Kconfig +++ b/hw/sh4/Kconfig @@ -6,7 +6,6 @@ config R2D select I82378 if TEST_DEVICES select IDE_MMIO select PFLASH_CFI02 - select USB_OHCI_PCI select PCI select SM501 select SH7750 diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c index 4944994e9c8..e5ac6751bd5 100644 --- a/hw/sh4/r2d.c +++ b/hw/sh4/r2d.c @@ -240,11 +240,11 @@ static void r2d_init(MachineState *machine) MemoryRegion *sdram = g_new(MemoryRegion, 1); qemu_irq *irq; DriveInfo *dinfo; - int i; DeviceState *dev; SysBusDevice *busdev; MemoryRegion *address_space_mem = get_system_memory(); PCIBus *pci_bus; + USBBus *usb_bus; cpu = SUPERH_CPU(cpu_create(machine->cpu_type)); env = &cpu->env; @@ -286,9 +286,9 @@ static void r2d_init(MachineState *machine) dinfo = drive_get(IF_IDE, 0, 0); dev = qdev_new("mmio-ide"); busdev = SYS_BUS_DEVICE(dev); - sysbus_connect_irq(busdev, 0, irq[CF_IDE]); qdev_prop_set_uint32(dev, "shift", 1); sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_connect_irq(busdev, 0, irq[CF_IDE]); sysbus_mmio_map(busdev, 0, 0x14001000); sysbus_mmio_map(busdev, 1, 0x1400080c); mmio_ide_init_drives(dev, dinfo, NULL); @@ -309,12 +309,13 @@ static void r2d_init(MachineState *machine) 0x555, 0x2aa, 0); /* NIC: rtl8139 on-board, and 2 slots. */ - for (i = 0; i < nb_nics; i++) - pci_nic_init_nofail(&nd_table[i], pci_bus, - mc->default_nic, i == 0 ? "2" : NULL); + pci_init_nic_in_slot(pci_bus, mc->default_nic, NULL, "2"); + pci_init_nic_devices(pci_bus, mc->default_nic); /* USB keyboard */ - usb_create_simple(usb_bus_find(-1), "usb-kbd"); + usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS, + &error_abort)); + usb_create_simple(usb_bus, "usb-kbd"); /* Todo: register on board registers */ memset(&boot_params, 0, sizeof(boot_params)); diff --git a/hw/sh4/sh7750_regs.h b/hw/sh4/sh7750_regs.h index edb5d18f00c..946ad7b3aaf 100644 --- a/hw/sh4/sh7750_regs.h +++ b/hw/sh4/sh7750_regs.h @@ -172,7 +172,7 @@ /* - * Exeption-related registers + * Exception-related registers */ /* Immediate data for TRAPA instruction - TRA */ diff --git a/hw/sh4/shix.c b/hw/sh4/shix.c index aa812512f0c..eb3150b5bcf 100644 --- a/hw/sh4/shix.c +++ b/hw/sh4/shix.c @@ -80,6 +80,7 @@ static void shix_machine_init(MachineClass *mc) mc->init = shix_init; mc->is_default = true; mc->default_cpu_type = TYPE_SH7750R_CPU; + mc->deprecation_reason = "old and unmaintained"; } DEFINE_MACHINE("shix", shix_machine_init) diff --git a/hw/smbios/Kconfig b/hw/smbios/Kconfig index 553adf4bfcf..8d989a2f1bc 100644 --- a/hw/smbios/Kconfig +++ b/hw/smbios/Kconfig @@ -1,2 +1,4 @@ config SMBIOS bool +config SMBIOS_LEGACY + bool diff --git a/hw/smbios/meson.build b/hw/smbios/meson.build index 6eeae4b35c2..a59039f6692 100644 --- a/hw/smbios/meson.build +++ b/hw/smbios/meson.build @@ -4,10 +4,9 @@ smbios_ss.add(when: 'CONFIG_IPMI', if_true: files('smbios_type_38.c'), if_false: files('smbios_type_38-stub.c')) +smbios_ss.add(when: 'CONFIG_SMBIOS_LEGACY', + if_true: files('smbios_legacy.c'), + if_false: files('smbios_legacy_stub.c')) + system_ss.add_all(when: 'CONFIG_SMBIOS', if_true: smbios_ss) system_ss.add(when: 'CONFIG_SMBIOS', if_false: files('smbios-stub.c')) - -system_ss.add(when: 'CONFIG_ALL', if_true: files( - 'smbios-stub.c', - 'smbios_type_38-stub.c', -)) diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index 8a44d3f271d..eed5787b15d 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -19,7 +19,6 @@ #include "qemu/units.h" #include "qapi/error.h" #include "qemu/config-file.h" -#include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/option.h" #include "sysemu/sysemu.h" @@ -31,60 +30,31 @@ #include "hw/pci/pci_device.h" #include "smbios_build.h" -/* legacy structures and constants for <= 2.0 machines */ -struct smbios_header { - uint16_t length; - uint8_t type; -} QEMU_PACKED; - -struct smbios_field { - struct smbios_header header; - uint8_t type; - uint16_t offset; - uint8_t data[]; -} QEMU_PACKED; - -struct smbios_table { - struct smbios_header header; - uint8_t data[]; -} QEMU_PACKED; - -#define SMBIOS_FIELD_ENTRY 0 -#define SMBIOS_TABLE_ENTRY 1 - -static uint8_t *smbios_entries; -static size_t smbios_entries_len; -static bool smbios_legacy = true; static bool smbios_uuid_encoded = true; -/* end: legacy structures & constants for <= 2.0 machines */ - +/* + * SMBIOS tables provided by user with '-smbios file=' option + */ +uint8_t *usr_blobs; +size_t usr_blobs_len; +static unsigned usr_table_max; +static unsigned usr_table_cnt; uint8_t *smbios_tables; size_t smbios_tables_len; unsigned smbios_table_max; unsigned smbios_table_cnt; -static SmbiosEntryPointType smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_32; static SmbiosEntryPoint ep; static int smbios_type4_count = 0; -static bool smbios_immutable; static bool smbios_have_defaults; -static uint32_t smbios_cpuid_version, smbios_cpuid_features, smbios_smp_sockets; +static uint32_t smbios_cpuid_version, smbios_cpuid_features; -static DECLARE_BITMAP(have_binfile_bitmap, SMBIOS_MAX_TYPE+1); -static DECLARE_BITMAP(have_fields_bitmap, SMBIOS_MAX_TYPE+1); +DECLARE_BITMAP(smbios_have_binfile_bitmap, SMBIOS_MAX_TYPE + 1); +DECLARE_BITMAP(smbios_have_fields_bitmap, SMBIOS_MAX_TYPE + 1); -static struct { - const char *vendor, *version, *date; - bool have_major_minor, uefi; - uint8_t major, minor; -} type0; - -static struct { - const char *manufacturer, *product, *version, *serial, *sku, *family; - /* uuid is in qemu_uuid */ -} type1; +smbios_type0_t smbios_type0; +smbios_type1_t smbios_type1; static struct { const char *manufacturer, *product, *version, *serial, *asset, *location; @@ -102,6 +72,7 @@ static struct { #define DEFAULT_CPU_SPEED 2000 static struct { + uint16_t processor_family; const char *sock_pfx, *manufacturer, *version, *serial, *asset, *part; uint64_t max_speed; uint64_t current_speed; @@ -110,6 +81,7 @@ static struct { .max_speed = DEFAULT_CPU_SPEED, .current_speed = DEFAULT_CPU_SPEED, .processor_id = 0, + .processor_family = 0x01, /* Other */ }; struct type8_instance { @@ -119,6 +91,16 @@ struct type8_instance { }; static QTAILQ_HEAD(, type8_instance) type8 = QTAILQ_HEAD_INITIALIZER(type8); +/* type 9 instance for parsing */ +struct type9_instance { + const char *slot_designation, *pcidev; + uint8_t slot_type, slot_data_bus_width, current_usage, slot_length, + slot_characteristics1, slot_characteristics2; + uint16_t slot_id; + QTAILQ_ENTRY(type9_instance) next; +}; +static QTAILQ_HEAD(, type9_instance) type9 = QTAILQ_HEAD_INITIALIZER(type9); + static struct { size_t nvalues; char **values; @@ -337,6 +319,10 @@ static const QemuOptDesc qemu_smbios_type4_opts[] = { .name = "part", .type = QEMU_OPT_STRING, .help = "part number", + }, { + .name = "processor-family", + .type = QEMU_OPT_NUMBER, + .help = "processor family", }, { .name = "processor-id", .type = QEMU_OPT_NUMBER, @@ -374,6 +360,59 @@ static const QemuOptDesc qemu_smbios_type8_opts[] = { { /* end of list */ } }; +static const QemuOptDesc qemu_smbios_type9_opts[] = { + { + .name = "type", + .type = QEMU_OPT_NUMBER, + .help = "SMBIOS element type", + }, + { + .name = "slot_designation", + .type = QEMU_OPT_STRING, + .help = "string number for reference designation", + }, + { + .name = "slot_type", + .type = QEMU_OPT_NUMBER, + .help = "connector type", + }, + { + .name = "slot_data_bus_width", + .type = QEMU_OPT_NUMBER, + .help = "port type", + }, + { + .name = "current_usage", + .type = QEMU_OPT_NUMBER, + .help = "current usage", + }, + { + .name = "slot_length", + .type = QEMU_OPT_NUMBER, + .help = "system slot length", + }, + { + .name = "slot_id", + .type = QEMU_OPT_NUMBER, + .help = "system slot id", + }, + { + .name = "slot_characteristics1", + .type = QEMU_OPT_NUMBER, + .help = "slot characteristics1, see the spec", + }, + { + .name = "slot_characteristics2", + .type = QEMU_OPT_NUMBER, + .help = "slot characteristics2, see the spec", + }, + { + .name = "pci_device", + .type = QEMU_OPT_STRING, + .help = "PCI device, if provided." + } +}; + static const QemuOptDesc qemu_smbios_type11_opts[] = { { .name = "type", @@ -470,126 +509,33 @@ opts_init(smbios_register_config); */ #define SMBIOS_21_MAX_TABLES_LEN 0xffff -static void smbios_validate_table(MachineState *ms) +static bool smbios_check_type4_count(uint32_t expected_t4_count, Error **errp) { - uint32_t expect_t4_count = smbios_legacy ? - ms->smp.cpus : smbios_smp_sockets; - - if (smbios_type4_count && smbios_type4_count != expect_t4_count) { - error_report("Expected %d SMBIOS Type 4 tables, got %d instead", - expect_t4_count, smbios_type4_count); - exit(1); - } - - if (smbios_ep_type == SMBIOS_ENTRY_POINT_TYPE_32 && - smbios_tables_len > SMBIOS_21_MAX_TABLES_LEN) { - error_report("SMBIOS 2.1 table length %zu exceeds %d", - smbios_tables_len, SMBIOS_21_MAX_TABLES_LEN); - exit(1); - } -} - - -/* legacy setup functions for <= 2.0 machines */ -static void smbios_add_field(int type, int offset, const void *data, size_t len) -{ - struct smbios_field *field; - - if (!smbios_entries) { - smbios_entries_len = sizeof(uint16_t); - smbios_entries = g_malloc0(smbios_entries_len); - } - smbios_entries = g_realloc(smbios_entries, smbios_entries_len + - sizeof(*field) + len); - field = (struct smbios_field *)(smbios_entries + smbios_entries_len); - field->header.type = SMBIOS_FIELD_ENTRY; - field->header.length = cpu_to_le16(sizeof(*field) + len); - - field->type = type; - field->offset = cpu_to_le16(offset); - memcpy(field->data, data, len); - - smbios_entries_len += sizeof(*field) + len; - (*(uint16_t *)smbios_entries) = - cpu_to_le16(le16_to_cpu(*(uint16_t *)smbios_entries) + 1); -} - -static void smbios_maybe_add_str(int type, int offset, const char *data) -{ - if (data) { - smbios_add_field(type, offset, data, strlen(data) + 1); - } -} - -static void smbios_build_type_0_fields(void) -{ - smbios_maybe_add_str(0, offsetof(struct smbios_type_0, vendor_str), - type0.vendor); - smbios_maybe_add_str(0, offsetof(struct smbios_type_0, bios_version_str), - type0.version); - smbios_maybe_add_str(0, offsetof(struct smbios_type_0, - bios_release_date_str), - type0.date); - if (type0.have_major_minor) { - smbios_add_field(0, offsetof(struct smbios_type_0, - system_bios_major_release), - &type0.major, 1); - smbios_add_field(0, offsetof(struct smbios_type_0, - system_bios_minor_release), - &type0.minor, 1); - } -} - -static void smbios_build_type_1_fields(void) -{ - smbios_maybe_add_str(1, offsetof(struct smbios_type_1, manufacturer_str), - type1.manufacturer); - smbios_maybe_add_str(1, offsetof(struct smbios_type_1, product_name_str), - type1.product); - smbios_maybe_add_str(1, offsetof(struct smbios_type_1, version_str), - type1.version); - smbios_maybe_add_str(1, offsetof(struct smbios_type_1, serial_number_str), - type1.serial); - smbios_maybe_add_str(1, offsetof(struct smbios_type_1, sku_number_str), - type1.sku); - smbios_maybe_add_str(1, offsetof(struct smbios_type_1, family_str), - type1.family); - if (qemu_uuid_set) { - /* We don't encode the UUID in the "wire format" here because this - * function is for legacy mode and needs to keep the guest ABI, and - * because we don't know what's the SMBIOS version advertised by the - * BIOS. - */ - smbios_add_field(1, offsetof(struct smbios_type_1, uuid), - &qemu_uuid, 16); + if (smbios_type4_count && smbios_type4_count != expected_t4_count) { + error_setg(errp, "Expected %d SMBIOS Type 4 tables, got %d instead", + expected_t4_count, smbios_type4_count); + return false; } + return true; } -uint8_t *smbios_get_table_legacy(MachineState *ms, size_t *length) +bool smbios_validate_table(SmbiosEntryPointType ep_type, Error **errp) { - if (!smbios_legacy) { - *length = 0; - return NULL; - } - - if (!smbios_immutable) { - smbios_build_type_0_fields(); - smbios_build_type_1_fields(); - smbios_validate_table(ms); - smbios_immutable = true; + if (ep_type == SMBIOS_ENTRY_POINT_TYPE_32 && + smbios_tables_len > SMBIOS_21_MAX_TABLES_LEN) { + error_setg(errp, "SMBIOS 2.1 table length %zu exceeds %d", + smbios_tables_len, SMBIOS_21_MAX_TABLES_LEN); + return false; } - *length = smbios_entries_len; - return smbios_entries; + return true; } -/* end: legacy setup functions for <= 2.0 machines */ - bool smbios_skip_table(uint8_t type, bool required_table) { - if (test_bit(type, have_binfile_bitmap)) { + if (test_bit(type, smbios_have_binfile_bitmap)) { return true; /* user provided their own binary blob(s) */ } - if (test_bit(type, have_fields_bitmap)) { + if (test_bit(type, smbios_have_fields_bitmap)) { return false; /* user provided fields via command line */ } if (smbios_have_defaults && required_table) { @@ -603,6 +549,7 @@ bool smbios_skip_table(uint8_t type, bool required_table) #define T2_BASE 0x200 #define T3_BASE 0x300 #define T4_BASE 0x400 +#define T9_BASE 0x900 #define T11_BASE 0xe00 #define T16_BASE 0x1000 @@ -616,25 +563,25 @@ static void smbios_build_type_0_table(void) { SMBIOS_BUILD_TABLE_PRE(0, T0_BASE, false); /* optional, leave up to BIOS */ - SMBIOS_TABLE_SET_STR(0, vendor_str, type0.vendor); - SMBIOS_TABLE_SET_STR(0, bios_version_str, type0.version); + SMBIOS_TABLE_SET_STR(0, vendor_str, smbios_type0.vendor); + SMBIOS_TABLE_SET_STR(0, bios_version_str, smbios_type0.version); t->bios_starting_address_segment = cpu_to_le16(0xE800); /* from SeaBIOS */ - SMBIOS_TABLE_SET_STR(0, bios_release_date_str, type0.date); + SMBIOS_TABLE_SET_STR(0, bios_release_date_str, smbios_type0.date); t->bios_rom_size = 0; /* hardcoded in SeaBIOS with FIXME comment */ t->bios_characteristics = cpu_to_le64(0x08); /* Not supported */ t->bios_characteristics_extension_bytes[0] = 0; t->bios_characteristics_extension_bytes[1] = 0x14; /* TCD/SVVP | VM */ - if (type0.uefi) { + if (smbios_type0.uefi) { t->bios_characteristics_extension_bytes[1] |= 0x08; /* |= UEFI */ } - if (type0.have_major_minor) { - t->system_bios_major_release = type0.major; - t->system_bios_minor_release = type0.minor; + if (smbios_type0.have_major_minor) { + t->system_bios_major_release = smbios_type0.major; + t->system_bios_minor_release = smbios_type0.minor; } else { t->system_bios_major_release = 0; t->system_bios_minor_release = 0; @@ -664,18 +611,18 @@ static void smbios_build_type_1_table(void) { SMBIOS_BUILD_TABLE_PRE(1, T1_BASE, true); /* required */ - SMBIOS_TABLE_SET_STR(1, manufacturer_str, type1.manufacturer); - SMBIOS_TABLE_SET_STR(1, product_name_str, type1.product); - SMBIOS_TABLE_SET_STR(1, version_str, type1.version); - SMBIOS_TABLE_SET_STR(1, serial_number_str, type1.serial); + SMBIOS_TABLE_SET_STR(1, manufacturer_str, smbios_type1.manufacturer); + SMBIOS_TABLE_SET_STR(1, product_name_str, smbios_type1.product); + SMBIOS_TABLE_SET_STR(1, version_str, smbios_type1.version); + SMBIOS_TABLE_SET_STR(1, serial_number_str, smbios_type1.serial); if (qemu_uuid_set) { smbios_encode_uuid(&t->uuid, &qemu_uuid); } else { memset(&t->uuid, 0, 16); } t->wake_up_type = 0x06; /* power switch */ - SMBIOS_TABLE_SET_STR(1, sku_number_str, type1.sku); - SMBIOS_TABLE_SET_STR(1, family_str, type1.family); + SMBIOS_TABLE_SET_STR(1, sku_number_str, smbios_type1.sku); + SMBIOS_TABLE_SET_STR(1, family_str, smbios_type1.family); SMBIOS_BUILD_TABLE_POST; } @@ -721,14 +668,16 @@ static void smbios_build_type_3_table(void) SMBIOS_BUILD_TABLE_POST; } -static void smbios_build_type_4_table(MachineState *ms, unsigned instance) +static void smbios_build_type_4_table(MachineState *ms, unsigned instance, + SmbiosEntryPointType ep_type, + Error **errp) { char sock_str[128]; size_t tbl_len = SMBIOS_TYPE_4_LEN_V28; unsigned threads_per_socket; unsigned cores_per_socket; - if (smbios_ep_type == SMBIOS_ENTRY_POINT_TYPE_64) { + if (ep_type == SMBIOS_ENTRY_POINT_TYPE_64) { tbl_len = SMBIOS_TYPE_4_LEN_V30; } @@ -738,7 +687,7 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance) snprintf(sock_str, sizeof(sock_str), "%s%2x", type4.sock_pfx, instance); SMBIOS_TABLE_SET_STR(4, socket_designation_str, sock_str); t->processor_type = 0x03; /* CPU */ - t->processor_family = 0x01; /* Other */ + t->processor_family = 0xfe; /* use Processor Family 2 field */ SMBIOS_TABLE_SET_STR(4, processor_manufacturer_str, type4.manufacturer); if (type4.processor_id == 0) { t->processor_id[0] = cpu_to_le32(smbios_cpuid_version); @@ -770,11 +719,17 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance) t->thread_count = (threads_per_socket > 255) ? 0xFF : threads_per_socket; t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */ - t->processor_family2 = cpu_to_le16(0x01); /* Other */ + t->processor_family2 = cpu_to_le16(type4.processor_family); if (tbl_len == SMBIOS_TYPE_4_LEN_V30) { t->core_count2 = t->core_enabled2 = cpu_to_le16(cores_per_socket); t->thread_count2 = cpu_to_le16(threads_per_socket); + } else if (t->core_count == 0xFF || t->thread_count == 0xFF) { + error_setg(errp, "SMBIOS 2.0 doesn't support number of processor " + "cores/threads more than 255, use " + "-machine smbios-entry-point-type=64 option to enable " + "SMBIOS 3.0 support"); + return; } SMBIOS_BUILD_TABLE_POST; @@ -801,6 +756,65 @@ static void smbios_build_type_8_table(void) } } +static void smbios_build_type_9_table(Error **errp) +{ + unsigned instance = 0; + struct type9_instance *t9; + + QTAILQ_FOREACH(t9, &type9, next) { + SMBIOS_BUILD_TABLE_PRE(9, T9_BASE + instance, true); + + SMBIOS_TABLE_SET_STR(9, slot_designation, t9->slot_designation); + t->slot_type = t9->slot_type; + t->slot_data_bus_width = t9->slot_data_bus_width; + t->current_usage = t9->current_usage; + t->slot_length = t9->slot_length; + t->slot_id = t9->slot_id; + t->slot_characteristics1 = t9->slot_characteristics1; + t->slot_characteristics2 = t9->slot_characteristics2; + + if (t9->pcidev) { + PCIDevice *pdev = NULL; + int rc = pci_qdev_find_device(t9->pcidev, &pdev); + if (rc != 0) { + error_setg(errp, + "No PCI device %s for SMBIOS type 9 entry %s", + t9->pcidev, t9->slot_designation); + return; + } + /* + * We only handle the case were the device is attached to + * the PCI root bus. The general case is more complex as + * bridges are enumerated later and the table would need + * to be updated at this moment. + */ + if (!pci_bus_is_root(pci_get_bus(pdev))) { + error_setg(errp, + "Cannot create type 9 entry for PCI device %s: " + "not attached to the root bus", + t9->pcidev); + return; + } + t->segment_group_number = cpu_to_le16(0); + t->bus_number = pci_dev_bus_num(pdev); + t->device_number = pdev->devfn; + } else { + /* + * Per SMBIOS spec, For slots that are not of the PCI, AGP, PCI-X, + * or PCI-Express type that do not have bus/device/function + * information, 0FFh should be populated in the fields of Segment + * Group Number, Bus Number, Device/Function Number. + */ + t->segment_group_number = 0xff; + t->bus_number = 0xff; + t->device_number = 0xff; + } + + SMBIOS_BUILD_TABLE_POST; + instance++; + } +} + static void smbios_build_type_11_table(void) { char count_str[128]; @@ -995,32 +1009,23 @@ void smbios_set_cpuid(uint32_t version, uint32_t features) field = value; \ } +void smbios_set_default_processor_family(uint16_t processor_family) +{ + if (type4.processor_family <= 0x01) { + type4.processor_family = processor_family; + } +} + void smbios_set_defaults(const char *manufacturer, const char *product, - const char *version, bool legacy_mode, - bool uuid_encoded, SmbiosEntryPointType ep_type) + const char *version, + bool uuid_encoded) { smbios_have_defaults = true; - smbios_legacy = legacy_mode; smbios_uuid_encoded = uuid_encoded; - smbios_ep_type = ep_type; - - /* drop unwanted version of command-line file blob(s) */ - if (smbios_legacy) { - g_free(smbios_tables); - /* in legacy mode, also complain if fields were given for types > 1 */ - if (find_next_bit(have_fields_bitmap, - SMBIOS_MAX_TYPE+1, 2) < SMBIOS_MAX_TYPE+1) { - error_report("can't process fields for smbios " - "types > 1 on machine versions < 2.1!"); - exit(1); - } - } else { - g_free(smbios_entries); - } - SMBIOS_SET_DEFAULT(type1.manufacturer, manufacturer); - SMBIOS_SET_DEFAULT(type1.product, product); - SMBIOS_SET_DEFAULT(type1.version, version); + SMBIOS_SET_DEFAULT(smbios_type1.manufacturer, manufacturer); + SMBIOS_SET_DEFAULT(smbios_type1.product, product); + SMBIOS_SET_DEFAULT(smbios_type1.version, version); SMBIOS_SET_DEFAULT(type2.manufacturer, manufacturer); SMBIOS_SET_DEFAULT(type2.product, product); SMBIOS_SET_DEFAULT(type2.version, version); @@ -1033,9 +1038,9 @@ void smbios_set_defaults(const char *manufacturer, const char *product, SMBIOS_SET_DEFAULT(type17.manufacturer, manufacturer); } -static void smbios_entry_point_setup(void) +static void smbios_entry_point_setup(SmbiosEntryPointType ep_type) { - switch (smbios_ep_type) { + switch (ep_type) { case SMBIOS_ENTRY_POINT_TYPE_32: memcpy(ep.ep21.anchor_string, "_SM_", 4); memcpy(ep.ep21.intermediate_anchor_string, "_DMI_", 5); @@ -1084,7 +1089,8 @@ static void smbios_entry_point_setup(void) } } -void smbios_get_tables(MachineState *ms, +static bool smbios_get_tables_ep(MachineState *ms, + SmbiosEntryPointType ep_type, const struct smbios_phys_mem_area *mem_array, const unsigned int mem_array_size, uint8_t **tables, size_t *tables_len, @@ -1092,77 +1098,87 @@ void smbios_get_tables(MachineState *ms, Error **errp) { unsigned i, dimm_cnt, offset; + ERRP_GUARD(); - if (smbios_legacy) { - *tables = *anchor = NULL; - *tables_len = *anchor_len = 0; - return; - } + assert(ep_type == SMBIOS_ENTRY_POINT_TYPE_32 || + ep_type == SMBIOS_ENTRY_POINT_TYPE_64); + + g_free(smbios_tables); + smbios_type4_count = 0; + smbios_tables = g_memdup2(usr_blobs, usr_blobs_len); + smbios_tables_len = usr_blobs_len; + smbios_table_max = usr_table_max; + smbios_table_cnt = usr_table_cnt; - if (!smbios_immutable) { - smbios_build_type_0_table(); - smbios_build_type_1_table(); - smbios_build_type_2_table(); - smbios_build_type_3_table(); + smbios_build_type_0_table(); + smbios_build_type_1_table(); + smbios_build_type_2_table(); + smbios_build_type_3_table(); - smbios_smp_sockets = ms->smp.sockets; - assert(smbios_smp_sockets >= 1); + assert(ms->smp.sockets >= 1); - for (i = 0; i < smbios_smp_sockets; i++) { - smbios_build_type_4_table(ms, i); + for (i = 0; i < ms->smp.sockets; i++) { + smbios_build_type_4_table(ms, i, ep_type, errp); + if (*errp) { + goto err_exit; } + } - smbios_build_type_8_table(); - smbios_build_type_11_table(); + smbios_build_type_8_table(); + smbios_build_type_9_table(errp); + smbios_build_type_11_table(); #define MAX_DIMM_SZ (16 * GiB) #define GET_DIMM_SZ ((i < dimm_cnt - 1) ? MAX_DIMM_SZ \ : ((current_machine->ram_size - 1) % MAX_DIMM_SZ) + 1) - dimm_cnt = QEMU_ALIGN_UP(current_machine->ram_size, MAX_DIMM_SZ) / MAX_DIMM_SZ; + dimm_cnt = QEMU_ALIGN_UP(current_machine->ram_size, MAX_DIMM_SZ) / + MAX_DIMM_SZ; - /* - * The offset determines if we need to keep additional space between - * table 17 and table 19 header handle numbers so that they do - * not overlap. For example, for a VM with larger than 8 TB guest - * memory and DIMM like chunks of 16 GiB, the default space between - * the two tables (T19_BASE - T17_BASE = 512) is not enough. - */ - offset = (dimm_cnt > (T19_BASE - T17_BASE)) ? \ - dimm_cnt - (T19_BASE - T17_BASE) : 0; + /* + * The offset determines if we need to keep additional space between + * table 17 and table 19 header handle numbers so that they do + * not overlap. For example, for a VM with larger than 8 TB guest + * memory and DIMM like chunks of 16 GiB, the default space between + * the two tables (T19_BASE - T17_BASE = 512) is not enough. + */ + offset = (dimm_cnt > (T19_BASE - T17_BASE)) ? \ + dimm_cnt - (T19_BASE - T17_BASE) : 0; - smbios_build_type_16_table(dimm_cnt); + smbios_build_type_16_table(dimm_cnt); - for (i = 0; i < dimm_cnt; i++) { - smbios_build_type_17_table(i, GET_DIMM_SZ); - } + for (i = 0; i < dimm_cnt; i++) { + smbios_build_type_17_table(i, GET_DIMM_SZ); + } - for (i = 0; i < mem_array_size; i++) { - smbios_build_type_19_table(i, offset, mem_array[i].address, - mem_array[i].length); - } + for (i = 0; i < mem_array_size; i++) { + smbios_build_type_19_table(i, offset, mem_array[i].address, + mem_array[i].length); + } - /* - * make sure 16 bit handle numbers in the headers of tables 19 - * and 32 do not overlap. - */ - assert((mem_array_size + offset) < (T32_BASE - T19_BASE)); + /* + * make sure 16 bit handle numbers in the headers of tables 19 + * and 32 do not overlap. + */ + assert((mem_array_size + offset) < (T32_BASE - T19_BASE)); - smbios_build_type_32_table(); - smbios_build_type_38_table(); - smbios_build_type_41_table(errp); - smbios_build_type_127_table(); + smbios_build_type_32_table(); + smbios_build_type_38_table(); + smbios_build_type_41_table(errp); + smbios_build_type_127_table(); - smbios_validate_table(ms); - smbios_entry_point_setup(); - smbios_immutable = true; + if (!smbios_check_type4_count(ms->smp.sockets, errp)) { + goto err_exit; } + if (!smbios_validate_table(ep_type, errp)) { + goto err_exit; + } + smbios_entry_point_setup(ep_type); /* return tables blob and entry point (anchor), and their sizes */ *tables = smbios_tables; *tables_len = smbios_tables_len; *anchor = (uint8_t *)&ep; - /* calculate length based on anchor string */ if (!strncmp((char *)&ep, "_SM_", 4)) { *anchor_len = sizeof(struct smbios_21_entry_point); @@ -1171,6 +1187,57 @@ void smbios_get_tables(MachineState *ms, } else { abort(); } + + return true; +err_exit: + g_free(smbios_tables); + smbios_tables = NULL; + return false; +} + +void smbios_get_tables(MachineState *ms, + SmbiosEntryPointType ep_type, + const struct smbios_phys_mem_area *mem_array, + const unsigned int mem_array_size, + uint8_t **tables, size_t *tables_len, + uint8_t **anchor, size_t *anchor_len, + Error **errp) +{ + Error *local_err = NULL; + bool is_valid; + ERRP_GUARD(); + + switch (ep_type) { + case SMBIOS_ENTRY_POINT_TYPE_AUTO: + case SMBIOS_ENTRY_POINT_TYPE_32: + is_valid = smbios_get_tables_ep(ms, SMBIOS_ENTRY_POINT_TYPE_32, + mem_array, mem_array_size, + tables, tables_len, + anchor, anchor_len, + &local_err); + if (is_valid || ep_type != SMBIOS_ENTRY_POINT_TYPE_AUTO) { + break; + } + /* + * fall through in case AUTO endpoint is selected and + * SMBIOS 2.x tables can't be generated, to try if SMBIOS 3.x + * tables would work + */ + case SMBIOS_ENTRY_POINT_TYPE_64: + error_free(local_err); + local_err = NULL; + is_valid = smbios_get_tables_ep(ms, SMBIOS_ENTRY_POINT_TYPE_64, + mem_array, mem_array_size, + tables, tables_len, + anchor, anchor_len, + &local_err); + break; + default: + abort(); + } + if (!is_valid) { + error_propagate(errp, local_err); + } } static void save_opt(const char **dest, QemuOpts *opts, const char *name) @@ -1256,13 +1323,10 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) { const char *val; - assert(!smbios_immutable); - val = qemu_opt_get(opts, "file"); if (val) { struct smbios_structure_header *header; - int size; - struct smbios_table *table; /* legacy mode only */ + size_t size; if (!qemu_opts_validate(opts, qemu_smbios_file_opts, errp)) { return; @@ -1279,9 +1343,9 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) * (except in legacy mode, where the second '\0' is implicit and * will be inserted by the BIOS). */ - smbios_tables = g_realloc(smbios_tables, smbios_tables_len + size); - header = (struct smbios_structure_header *)(smbios_tables + - smbios_tables_len); + usr_blobs = g_realloc(usr_blobs, usr_blobs_len + size); + header = (struct smbios_structure_header *)(usr_blobs + + usr_blobs_len); if (load_image_size(val, (uint8_t *)header, size) != size) { error_setg(errp, "Failed to load SMBIOS file %s", val); @@ -1289,47 +1353,30 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) } if (header->type <= SMBIOS_MAX_TYPE) { - if (test_bit(header->type, have_fields_bitmap)) { + if (test_bit(header->type, smbios_have_fields_bitmap)) { error_setg(errp, "can't load type %d struct, fields already specified!", header->type); return; } - set_bit(header->type, have_binfile_bitmap); + set_bit(header->type, smbios_have_binfile_bitmap); } if (header->type == 4) { smbios_type4_count++; } - smbios_tables_len += size; - if (size > smbios_table_max) { - smbios_table_max = size; - } - smbios_table_cnt++; - - /* add a copy of the newly loaded blob to legacy smbios_entries */ - /* NOTE: This code runs before smbios_set_defaults(), so we don't - * yet know which mode (legacy vs. aggregate-table) will be - * required. We therefore add the binary blob to both legacy - * (smbios_entries) and aggregate (smbios_tables) tables, and - * delete the one we don't need from smbios_set_defaults(), - * once we know which machine version has been requested. + /* + * preserve blob size for legacy mode so it could build its + * blobs flavor from 'usr_blobs' */ - if (!smbios_entries) { - smbios_entries_len = sizeof(uint16_t); - smbios_entries = g_malloc0(smbios_entries_len); + smbios_add_usr_blob_size(size); + + usr_blobs_len += size; + if (size > usr_table_max) { + usr_table_max = size; } - smbios_entries = g_realloc(smbios_entries, smbios_entries_len + - size + sizeof(*table)); - table = (struct smbios_table *)(smbios_entries + smbios_entries_len); - table->header.type = SMBIOS_TABLE_ENTRY; - table->header.length = cpu_to_le16(sizeof(*table) + size); - memcpy(table->data, header, size); - smbios_entries_len += sizeof(*table) + size; - (*(uint16_t *)smbios_entries) = - cpu_to_le16(le16_to_cpu(*(uint16_t *)smbios_entries) + 1); - /* end: add a copy of the newly loaded blob to legacy smbios_entries */ + usr_table_cnt++; return; } @@ -1343,41 +1390,42 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) return; } - if (test_bit(type, have_binfile_bitmap)) { + if (test_bit(type, smbios_have_binfile_bitmap)) { error_setg(errp, "can't add fields, binary file already loaded!"); return; } - set_bit(type, have_fields_bitmap); + set_bit(type, smbios_have_fields_bitmap); switch (type) { case 0: if (!qemu_opts_validate(opts, qemu_smbios_type0_opts, errp)) { return; } - save_opt(&type0.vendor, opts, "vendor"); - save_opt(&type0.version, opts, "version"); - save_opt(&type0.date, opts, "date"); - type0.uefi = qemu_opt_get_bool(opts, "uefi", false); + save_opt(&smbios_type0.vendor, opts, "vendor"); + save_opt(&smbios_type0.version, opts, "version"); + save_opt(&smbios_type0.date, opts, "date"); + smbios_type0.uefi = qemu_opt_get_bool(opts, "uefi", false); val = qemu_opt_get(opts, "release"); if (val) { - if (sscanf(val, "%hhu.%hhu", &type0.major, &type0.minor) != 2) { + if (sscanf(val, "%hhu.%hhu", &smbios_type0.major, + &smbios_type0.minor) != 2) { error_setg(errp, "Invalid release"); return; } - type0.have_major_minor = true; + smbios_type0.have_major_minor = true; } return; case 1: if (!qemu_opts_validate(opts, qemu_smbios_type1_opts, errp)) { return; } - save_opt(&type1.manufacturer, opts, "manufacturer"); - save_opt(&type1.product, opts, "product"); - save_opt(&type1.version, opts, "version"); - save_opt(&type1.serial, opts, "serial"); - save_opt(&type1.sku, opts, "sku"); - save_opt(&type1.family, opts, "family"); + save_opt(&smbios_type1.manufacturer, opts, "manufacturer"); + save_opt(&smbios_type1.product, opts, "product"); + save_opt(&smbios_type1.version, opts, "version"); + save_opt(&smbios_type1.serial, opts, "serial"); + save_opt(&smbios_type1.sku, opts, "sku"); + save_opt(&smbios_type1.family, opts, "family"); val = qemu_opt_get(opts, "uuid"); if (val) { @@ -1414,6 +1462,9 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) return; } save_opt(&type4.sock_pfx, opts, "sock_pfx"); + type4.processor_family = qemu_opt_get_number(opts, + "processor-family", + 0x01 /* Other */); save_opt(&type4.manufacturer, opts, "manufacturer"); save_opt(&type4.version, opts, "version"); save_opt(&type4.serial, opts, "serial"); @@ -1444,6 +1495,27 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) t8_i->port_type = qemu_opt_get_number(opts, "port_type", 0); QTAILQ_INSERT_TAIL(&type8, t8_i, next); return; + case 9: { + if (!qemu_opts_validate(opts, qemu_smbios_type9_opts, errp)) { + return; + } + struct type9_instance *t; + t = g_new0(struct type9_instance, 1); + save_opt(&t->slot_designation, opts, "slot_designation"); + t->slot_type = qemu_opt_get_number(opts, "slot_type", 0); + t->slot_data_bus_width = + qemu_opt_get_number(opts, "slot_data_bus_width", 0); + t->current_usage = qemu_opt_get_number(opts, "current_usage", 0); + t->slot_length = qemu_opt_get_number(opts, "slot_length", 0); + t->slot_id = qemu_opt_get_number(opts, "slot_id", 0); + t->slot_characteristics1 = + qemu_opt_get_number(opts, "slot_characteristics1", 0); + t->slot_characteristics2 = + qemu_opt_get_number(opts, "slot_characteristics2", 0); + save_opt(&t->pcidev, opts, "pcidev"); + QTAILQ_INSERT_TAIL(&type9, t, next); + return; + } case 11: if (!qemu_opts_validate(opts, qemu_smbios_type11_opts, errp)) { return; diff --git a/hw/smbios/smbios_legacy.c b/hw/smbios/smbios_legacy.c new file mode 100644 index 00000000000..c37a8ee821f --- /dev/null +++ b/hw/smbios/smbios_legacy.c @@ -0,0 +1,192 @@ +/* + * SMBIOS legacy support + * + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. + * Copyright (C) 2013 Red Hat, Inc. + * + * Authors: + * Alex Williamson + * Markus Armbruster + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/bswap.h" +#include "hw/firmware/smbios.h" +#include "sysemu/sysemu.h" +#include "qapi/error.h" + +struct smbios_header { + uint16_t length; + uint8_t type; +} QEMU_PACKED; + +struct smbios_field { + struct smbios_header header; + uint8_t type; + uint16_t offset; + uint8_t data[]; +} QEMU_PACKED; + +struct smbios_table { + struct smbios_header header; + uint8_t data[]; +} QEMU_PACKED; + +#define SMBIOS_FIELD_ENTRY 0 +#define SMBIOS_TABLE_ENTRY 1 + +static uint8_t *smbios_entries; +static size_t smbios_entries_len; +GArray *usr_blobs_sizes; + +void smbios_add_usr_blob_size(size_t size) +{ + if (!usr_blobs_sizes) { + usr_blobs_sizes = g_array_new(false, false, sizeof(size_t)); + } + g_array_append_val(usr_blobs_sizes, size); +} + +static void smbios_add_field(int type, int offset, const void *data, size_t len) +{ + struct smbios_field *field; + + if (!smbios_entries) { + smbios_entries_len = sizeof(uint16_t); + smbios_entries = g_malloc0(smbios_entries_len); + } + smbios_entries = g_realloc(smbios_entries, smbios_entries_len + + sizeof(*field) + len); + field = (struct smbios_field *)(smbios_entries + smbios_entries_len); + field->header.type = SMBIOS_FIELD_ENTRY; + field->header.length = cpu_to_le16(sizeof(*field) + len); + + field->type = type; + field->offset = cpu_to_le16(offset); + memcpy(field->data, data, len); + + smbios_entries_len += sizeof(*field) + len; + (*(uint16_t *)smbios_entries) = + cpu_to_le16(le16_to_cpu(*(uint16_t *)smbios_entries) + 1); +} + +static void smbios_maybe_add_str(int type, int offset, const char *data) +{ + if (data) { + smbios_add_field(type, offset, data, strlen(data) + 1); + } +} + +static void smbios_build_type_0_fields(void) +{ + smbios_maybe_add_str(0, offsetof(struct smbios_type_0, vendor_str), + smbios_type0.vendor); + smbios_maybe_add_str(0, offsetof(struct smbios_type_0, bios_version_str), + smbios_type0.version); + smbios_maybe_add_str(0, offsetof(struct smbios_type_0, + bios_release_date_str), + smbios_type0.date); + if (smbios_type0.have_major_minor) { + smbios_add_field(0, offsetof(struct smbios_type_0, + system_bios_major_release), + &smbios_type0.major, 1); + smbios_add_field(0, offsetof(struct smbios_type_0, + system_bios_minor_release), + &smbios_type0.minor, 1); + } +} + +static void smbios_build_type_1_fields(void) +{ + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, manufacturer_str), + smbios_type1.manufacturer); + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, product_name_str), + smbios_type1.product); + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, version_str), + smbios_type1.version); + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, serial_number_str), + smbios_type1.serial); + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, sku_number_str), + smbios_type1.sku); + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, family_str), + smbios_type1.family); + if (qemu_uuid_set) { + /* + * We don't encode the UUID in the "wire format" here because this + * function is for legacy mode and needs to keep the guest ABI, and + * because we don't know what's the SMBIOS version advertised by the + * BIOS. + */ + smbios_add_field(1, offsetof(struct smbios_type_1, uuid), + &qemu_uuid, 16); + } +} + +uint8_t *smbios_get_table_legacy(size_t *length, Error **errp) +{ + int i; + size_t usr_offset; + + /* complain if fields were given for types > 1 */ + if (find_next_bit(smbios_have_fields_bitmap, + SMBIOS_MAX_TYPE + 1, 2) < SMBIOS_MAX_TYPE + 1) { + error_setg(errp, "can't process fields for smbios " + "types > 1 on machine versions < 2.1!"); + goto err_exit; + } + + if (test_bit(4, smbios_have_binfile_bitmap)) { + error_setg(errp, "can't process table for smbios " + "type 4 on machine versions < 2.1!"); + goto err_exit; + } + + g_free(smbios_entries); + smbios_entries_len = sizeof(uint16_t); + smbios_entries = g_malloc0(smbios_entries_len); + + /* + * build a set of legacy smbios_table entries using user provided blobs + */ + for (i = 0, usr_offset = 0; usr_blobs_sizes && i < usr_blobs_sizes->len; + i++) + { + struct smbios_table *table; + struct smbios_structure_header *header; + size_t size = g_array_index(usr_blobs_sizes, size_t, i); + + header = (struct smbios_structure_header *)(usr_blobs + usr_offset); + smbios_entries = g_realloc(smbios_entries, smbios_entries_len + + size + sizeof(*table)); + table = (struct smbios_table *)(smbios_entries + smbios_entries_len); + table->header.type = SMBIOS_TABLE_ENTRY; + table->header.length = cpu_to_le16(sizeof(*table) + size); + memcpy(table->data, header, size); + smbios_entries_len += sizeof(*table) + size; + /* + * update number of entries in the blob, + * see SeaBIOS: qemu_cfg_legacy():QEMU_CFG_SMBIOS_ENTRIES + */ + (*(uint16_t *)smbios_entries) = + cpu_to_le16(le16_to_cpu(*(uint16_t *)smbios_entries) + 1); + usr_offset += size; + } + + smbios_build_type_0_fields(); + smbios_build_type_1_fields(); + if (!smbios_validate_table(SMBIOS_ENTRY_POINT_TYPE_32, errp)) { + goto err_exit; + } + + *length = smbios_entries_len; + return smbios_entries; +err_exit: + g_free(smbios_entries); + return NULL; +} diff --git a/hw/smbios/smbios_legacy_stub.c b/hw/smbios/smbios_legacy_stub.c new file mode 100644 index 00000000000..7d593dca986 --- /dev/null +++ b/hw/smbios/smbios_legacy_stub.c @@ -0,0 +1,20 @@ +/* + * IPMI SMBIOS firmware handling + * + * Copyright (c) 2024 Igor Mammedov, Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/firmware/smbios.h" + +void smbios_add_usr_blob_size(size_t size) +{ +} + +uint8_t *smbios_get_table_legacy(size_t *length, Error **errp) +{ + g_assert_not_reached(); +} diff --git a/hw/sparc/leon3.c b/hw/sparc/leon3.c index 1e39d2e2d0a..6aaa04cb191 100644 --- a/hw/sparc/leon3.c +++ b/hw/sparc/leon3.c @@ -1,7 +1,9 @@ /* * QEMU Leon3 System Emulator * - * Copyright (c) 2010-2019 AdaCore + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2010-2024 AdaCore * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -40,7 +42,9 @@ #include "elf.h" #include "trace.h" -#include "hw/sparc/grlib.h" +#include "hw/timer/grlib_gptimer.h" +#include "hw/char/grlib_uart.h" +#include "hw/intc/grlib_irqmp.h" #include "hw/misc/grlib_ahb_apb_pnp.h" /* Default system clock. */ @@ -50,6 +54,8 @@ #define LEON3_PROM_OFFSET (0x00000000) #define LEON3_RAM_OFFSET (0x40000000) +#define MAX_CPUS 4 + #define LEON3_UART_OFFSET (0x80000100) #define LEON3_UART_IRQ (3) @@ -63,9 +69,11 @@ #define LEON3_AHB_PNP_OFFSET (0xFFFFF000) typedef struct ResetData { - SPARCCPU *cpu; - uint32_t entry; /* save kernel entry in case of reset */ - target_ulong sp; /* initial stack pointer */ + struct CPUResetData { + int id; + SPARCCPU *cpu; + } info[MAX_CPUS]; + uint32_t entry; /* save kernel entry in case of reset */ } ResetData; static uint32_t *gen_store_u32(uint32_t *code, hwaddr addr, uint32_t val) @@ -91,13 +99,26 @@ static uint32_t *gen_store_u32(uint32_t *code, hwaddr addr, uint32_t val) /* * When loading a kernel in RAM the machine is expected to be in a different - * state (eg: initialized by the bootloader). This little code reproduces - * this behavior. + * state (eg: initialized by the bootloader). This little code reproduces + * this behavior. Also this code can be executed by the secondary cpus as + * well since it looks at the %asr17 register before doing any + * initialization, it allows to use the same reset address for all the + * cpus. */ -static void write_bootloader(CPUSPARCState *env, uint8_t *base, - hwaddr kernel_addr) +static void write_bootloader(void *ptr, hwaddr kernel_addr) { - uint32_t *p = (uint32_t *) base; + uint32_t *p = ptr; + uint32_t *sec_cpu_branch_p = NULL; + + /* If we are running on a secondary CPU, jump directly to the kernel. */ + + stl_p(p++, 0x85444000); /* rd %asr17, %g2 */ + stl_p(p++, 0x8530a01c); /* srl %g2, 0x1c, %g2 */ + stl_p(p++, 0x80908000); /* tst %g2 */ + /* Filled below. */ + sec_cpu_branch_p = p; + stl_p(p++, 0x0BADC0DE); /* bne xxx */ + stl_p(p++, 0x01000000); /* nop */ /* Initialize the UARTs */ /* *UART_CONTROL = UART_RECEIVE_ENABLE | UART_TRANSMIT_ENABLE; */ @@ -111,6 +132,10 @@ static void write_bootloader(CPUSPARCState *env, uint8_t *base, /* *GPTIMER0_CONFIG = GPTIMER_ENABLE | GPTIMER_RESTART; */ p = gen_store_u32(p, 0x80000318, 3); + /* Now, the relative branch above can be computed. */ + stl_p(sec_cpu_branch_p, 0x12800000 + + (p - sec_cpu_branch_p)); + /* JUMP to the entry point */ stl_p(p++, 0x82100000); /* mov %g0, %g1 */ stl_p(p++, 0x03000000 + extract32(kernel_addr, 10, 22)); @@ -121,18 +146,19 @@ static void write_bootloader(CPUSPARCState *env, uint8_t *base, stl_p(p++, 0x01000000); /* nop */ } -static void main_cpu_reset(void *opaque) +static void leon3_cpu_reset(void *opaque) { - ResetData *s = (ResetData *)opaque; - CPUState *cpu = CPU(s->cpu); - CPUSPARCState *env = &s->cpu->env; + struct CPUResetData *info = (struct CPUResetData *) opaque; + int id = info->id; + ResetData *s = container_of(info, ResetData, info[id]); + CPUState *cpu = CPU(s->info[id].cpu); + CPUSPARCState *env = cpu_env(cpu); cpu_reset(cpu); - cpu->halted = 0; - env->pc = s->entry; - env->npc = s->entry + 4; - env->regbase[6] = s->sp; + cpu->halted = cpu->cpu_index != 0; + env->pc = s->entry; + env->npc = s->entry + 4; } static void leon3_cache_control_int(CPUSPARCState *env) @@ -164,9 +190,10 @@ static void leon3_cache_control_int(CPUSPARCState *env) } } -static void leon3_irq_ack(void *irq_manager, int intno) +static void leon3_irq_ack(CPUSPARCState *env, int intno) { - grlib_irqmp_ack((DeviceState *)irq_manager, intno); + CPUState *cpu = CPU(env_cpu(env)); + grlib_irqmp_ack(env->irq_manager, cpu->cpu_index, intno); } /* @@ -175,9 +202,10 @@ static void leon3_irq_ack(void *irq_manager, int intno) */ static void leon3_set_pil_in(void *opaque, int n, int level) { - CPUSPARCState *env = opaque; + DeviceState *cpu = opaque; + CPUState *cs = CPU(cpu); + CPUSPARCState *env = cpu_env(cs); uint32_t pil_in = level; - CPUState *cs; assert(env != NULL); @@ -193,7 +221,6 @@ static void leon3_set_pil_in(void *opaque, int n, int level) env->interrupt_index = TT_EXTINT | i; if (old_interrupt != env->interrupt_index) { - cs = env_cpu(env); trace_leon3_set_irq(i); cpu_interrupt(cs, CPU_INTERRUPT_HARD); } @@ -201,16 +228,29 @@ static void leon3_set_pil_in(void *opaque, int n, int level) } } } else if (!env->pil_in && (env->interrupt_index & ~15) == TT_EXTINT) { - cs = env_cpu(env); trace_leon3_reset_irq(env->interrupt_index & 15); env->interrupt_index = 0; cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } } -static void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno) +static void leon3_start_cpu_async_work(CPUState *cpu, run_on_cpu_data data) +{ + cpu->halted = 0; +} + +static void leon3_start_cpu(void *opaque, int n, int level) { - leon3_irq_ack(irq_manager, intno); + DeviceState *cpu = opaque; + CPUState *cs = CPU(cpu); + + assert(level == 1); + async_run_on_cpu(cs, leon3_start_cpu_async_work, RUN_ON_CPU_NULL); +} + +static void leon3_irq_manager(CPUSPARCState *env, int intno) +{ + leon3_irq_ack(env, intno); leon3_cache_control_int(env); } @@ -233,17 +273,23 @@ static void leon3_generic_hw_init(MachineState *machine) AHBPnp *ahb_pnp; APBPnp *apb_pnp; - /* Init CPU */ - cpu = SPARC_CPU(cpu_create(machine->cpu_type)); - env = &cpu->env; + reset_info = g_malloc0(sizeof(ResetData)); + + for (i = 0; i < machine->smp.cpus; i++) { + /* Init CPU */ + cpu = SPARC_CPU(object_new(machine->cpu_type)); + qdev_init_gpio_in_named(DEVICE(cpu), leon3_start_cpu, "start_cpu", 1); + qdev_init_gpio_in_named(DEVICE(cpu), leon3_set_pil_in, "pil", 1); + qdev_realize(DEVICE(cpu), NULL, &error_fatal); + env = &cpu->env; - cpu_sparc_set_id(env, 0); + cpu_sparc_set_id(env, i); - /* Reset data */ - reset_info = g_new0(ResetData, 1); - reset_info->cpu = cpu; - reset_info->sp = LEON3_RAM_OFFSET + ram_size; - qemu_register_reset(main_cpu_reset, reset_info); + /* Reset data */ + reset_info->info[i].id = i; + reset_info->info[i].cpu = cpu; + qemu_register_reset(leon3_cpu_reset, &reset_info->info[i]); + } ahb_pnp = GRLIB_AHB_PNP(qdev_new(TYPE_GRLIB_AHB_PNP)); sysbus_realize_and_unref(SYS_BUS_DEVICE(ahb_pnp), &error_fatal); @@ -261,14 +307,24 @@ static void leon3_generic_hw_init(MachineState *machine) /* Allocate IRQ manager */ irqmpdev = qdev_new(TYPE_GRLIB_IRQMP); - qdev_init_gpio_in_named_with_opaque(DEVICE(cpu), leon3_set_pil_in, - env, "pil", 1); - qdev_connect_gpio_out_named(irqmpdev, "grlib-irq", 0, - qdev_get_gpio_in_named(DEVICE(cpu), "pil", 0)); + object_property_set_int(OBJECT(irqmpdev), "ncpus", machine->smp.cpus, + &error_fatal); sysbus_realize_and_unref(SYS_BUS_DEVICE(irqmpdev), &error_fatal); + + for (i = 0; i < machine->smp.cpus; i++) { + cpu = reset_info->info[i].cpu; + env = &cpu->env; + qdev_connect_gpio_out_named(irqmpdev, "grlib-start-cpu", i, + qdev_get_gpio_in_named(DEVICE(cpu), + "start_cpu", 0)); + qdev_connect_gpio_out_named(irqmpdev, "grlib-irq", i, + qdev_get_gpio_in_named(DEVICE(cpu), + "pil", 0)); + env->irq_manager = irqmpdev; + env->qemu_irq_ack = leon3_irq_manager; + } + sysbus_mmio_map(SYS_BUS_DEVICE(irqmpdev), 0, LEON3_IRQMP_OFFSET); - env->irq_manager = irqmpdev; - env->qemu_irq_ack = leon3_irq_manager; grlib_apb_pnp_add_entry(apb_pnp, LEON3_IRQMP_OFFSET, 0xFFF, GRLIB_VENDOR_GAISLER, GRLIB_IRQMP_DEV, 2, 0, GRLIB_APBIO_AREA); @@ -339,13 +395,12 @@ static void leon3_generic_hw_init(MachineState *machine) * the machine in an initialized state through a little * bootloader. */ - uint8_t *bootloader_entry; - - bootloader_entry = memory_region_get_ram_ptr(prom); - write_bootloader(env, bootloader_entry, entry); - env->pc = LEON3_PROM_OFFSET; - env->npc = LEON3_PROM_OFFSET + 4; + write_bootloader(memory_region_get_ram_ptr(prom), entry); reset_info->entry = LEON3_PROM_OFFSET; + for (i = 0; i < machine->smp.cpus; i++) { + reset_info->info[i].cpu->env.pc = LEON3_PROM_OFFSET; + reset_info->info[i].cpu->env.npc = LEON3_PROM_OFFSET + 4; + } } } @@ -384,6 +439,7 @@ static void leon3_generic_machine_init(MachineClass *mc) mc->init = leon3_generic_hw_init; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("LEON3"); mc->default_ram_id = "leon3.ram"; + mc->max_cpus = MAX_CPUS; } DEFINE_MACHINE("leon3_generic", leon3_generic_machine_init) diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c index 17bf5f28791..d52e6a7213f 100644 --- a/hw/sparc/sun4m.c +++ b/hw/sparc/sun4m.c @@ -299,30 +299,42 @@ static void *iommu_init(hwaddr addr, uint32_t version, qemu_irq irq) static void *sparc32_dma_init(hwaddr dma_base, hwaddr esp_base, qemu_irq espdma_irq, - hwaddr le_base, qemu_irq ledma_irq, NICInfo *nd) + hwaddr le_base, qemu_irq ledma_irq, + MACAddr *mac) { DeviceState *dma; ESPDMADeviceState *espdma; LEDMADeviceState *ledma; SysBusESPState *esp; SysBusPCNetState *lance; + NICInfo *nd = qemu_find_nic_info("lance", true, NULL); dma = qdev_new(TYPE_SPARC32_DMA); espdma = SPARC32_ESPDMA_DEVICE(object_resolve_path_component( OBJECT(dma), "espdma")); - sysbus_connect_irq(SYS_BUS_DEVICE(espdma), 0, espdma_irq); esp = SYSBUS_ESP(object_resolve_path_component(OBJECT(espdma), "esp")); ledma = SPARC32_LEDMA_DEVICE(object_resolve_path_component( OBJECT(dma), "ledma")); - sysbus_connect_irq(SYS_BUS_DEVICE(ledma), 0, ledma_irq); lance = SYSBUS_PCNET(object_resolve_path_component( OBJECT(ledma), "lance")); - qdev_set_nic_properties(DEVICE(lance), nd); + + if (nd) { + qdev_set_nic_properties(DEVICE(lance), nd); + memcpy(mac->a, nd->macaddr.a, sizeof(mac->a)); + } else { + qemu_macaddr_default_if_unset(mac); + qdev_prop_set_macaddr(DEVICE(lance), "mac", mac->a); + } sysbus_realize_and_unref(SYS_BUS_DEVICE(dma), &error_fatal); + + sysbus_connect_irq(SYS_BUS_DEVICE(espdma), 0, espdma_irq); + + sysbus_connect_irq(SYS_BUS_DEVICE(ledma), 0, ledma_irq); + sysbus_mmio_map(SYS_BUS_DEVICE(dma), 0, dma_base); sysbus_mmio_map(SYS_BUS_DEVICE(esp), 0, esp_base); @@ -577,12 +589,9 @@ static void idreg_realize(DeviceState *ds, Error **errp) { IDRegState *s = MACIO_ID_REGISTER(ds); SysBusDevice *dev = SYS_BUS_DEVICE(ds); - Error *local_err = NULL; - memory_region_init_ram_nomigrate(&s->mem, OBJECT(ds), "sun4m.idreg", - sizeof(idreg_data), &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram_nomigrate(&s->mem, OBJECT(ds), "sun4m.idreg", + sizeof(idreg_data), errp)) { return; } @@ -631,12 +640,9 @@ static void afx_realize(DeviceState *ds, Error **errp) { AFXState *s = TCX_AFX(ds); SysBusDevice *dev = SYS_BUS_DEVICE(ds); - Error *local_err = NULL; - memory_region_init_ram_nomigrate(&s->mem, OBJECT(ds), "sun4m.afx", 4, - &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram_nomigrate(&s->mem, OBJECT(ds), "sun4m.afx", + 4, errp)) { return; } @@ -715,12 +721,9 @@ static void prom_realize(DeviceState *ds, Error **errp) { PROMState *s = OPENPROM(ds); SysBusDevice *dev = SYS_BUS_DEVICE(ds); - Error *local_err = NULL; - memory_region_init_ram_nomigrate(&s->prom, OBJECT(ds), "sun4m.prom", - PROM_SIZE_MAX, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram_nomigrate(&s->prom, OBJECT(ds), "sun4m.prom", + PROM_SIZE_MAX, errp)) { return; } @@ -804,7 +807,7 @@ static void cpu_devinit(const char *cpu_type, unsigned int id, qemu_register_reset(sun4m_cpu_reset, cpu); object_property_set_bool(OBJECT(cpu), "start-powered-off", id != 0, - &error_fatal); + &error_abort); qdev_realize_and_unref(DEVICE(cpu), NULL, &error_fatal); cpu_sparc_set_id(env, id); *cpu_irqs = qemu_allocate_irqs(cpu_set_irq, cpu, MAX_PILS); @@ -832,7 +835,7 @@ static void sun4m_hw_init(MachineState *machine) unsigned int smp_cpus = machine->smp.cpus; unsigned int max_cpus = machine->smp.max_cpus; HostMemoryBackend *ram_memdev = machine->memdev; - NICInfo *nd = &nd_table[0]; + MACAddr hostid; if (machine->ram_size > hwdef->max_mem) { error_report("Too much memory for this machine: %" PRId64 "," @@ -893,10 +896,9 @@ static void sun4m_hw_init(MachineState *machine) hwdef->iommu_pad_base, hwdef->iommu_pad_len); } - qemu_check_nic_model(nd, TYPE_LANCE); sparc32_dma_init(hwdef->dma_base, hwdef->esp_base, slavio_irq[18], - hwdef->le_base, slavio_irq[16], nd); + hwdef->le_base, slavio_irq[16], &hostid); if (graphic_depth != 8 && graphic_depth != 24) { error_report("Unsupported depth: %d", graphic_depth); @@ -1048,7 +1050,7 @@ static void sun4m_hw_init(MachineState *machine) machine->initrd_filename, machine->ram_size, &initrd_size); - nvram_init(nvram, (uint8_t *)&nd->macaddr, machine->kernel_cmdline, + nvram_init(nvram, hostid.a, machine->kernel_cmdline, machine->boot_config.order, machine->ram_size, kernel_size, graphic_width, graphic_height, graphic_depth, hwdef->nvram_machine_id, "Sun4m"); diff --git a/hw/sparc/sun4m_iommu.c b/hw/sparc/sun4m_iommu.c index eb40f9377c1..06703b1d96e 100644 --- a/hw/sparc/sun4m_iommu.c +++ b/hw/sparc/sun4m_iommu.c @@ -331,7 +331,7 @@ static const VMStateDescription vmstate_iommu = { .name = "iommu", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IOMMUState, IOMMU_NREGS), VMSTATE_UINT64(iostart, IOMMUState), VMSTATE_END_OF_LIST() diff --git a/hw/sparc64/sparc64.c b/hw/sparc64/sparc64.c index 72f0849f50c..3091cde5862 100644 --- a/hw/sparc64/sparc64.c +++ b/hw/sparc64/sparc64.c @@ -24,6 +24,7 @@ #include "qemu/osdep.h" +#include "qapi/error.h" #include "cpu.h" #include "hw/boards.h" #include "hw/sparc/sparc64.h" @@ -271,9 +272,10 @@ SPARCCPU *sparc64_cpu_devinit(const char *cpu_type, uint64_t prom_addr) uint32_t stick_frequency = 100 * 1000000; uint32_t hstick_frequency = 100 * 1000000; - cpu = SPARC_CPU(cpu_create(cpu_type)); + cpu = SPARC_CPU(object_new(cpu_type)); qdev_init_gpio_in_named(DEVICE(cpu), sparc64_cpu_set_ivec_irq, "ivec-irq", IVEC_MAX); + qdev_realize(DEVICE(cpu), NULL, &error_fatal); env = &cpu->env; env->tick = cpu_timer_create("tick", cpu, tick_irq, diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c index c8711703784..cff6d5abafd 100644 --- a/hw/sparc64/sun4u.c +++ b/hw/sparc64/sun4u.c @@ -360,8 +360,13 @@ static void ebus_realize(PCIDevice *pci_dev, Error **errp) pci_dev->config[0x09] = 0x00; // programming i/f pci_dev->config[0x0D] = 0x0a; // latency_timer - memory_region_init_alias(&s->bar0, OBJECT(s), "bar0", - pci_address_space_io(pci_dev), 0, 0x1000000); + /* + * BAR0 is accessed by OpenBSD but not for ebus device access: allow any + * memory access to this region to succeed which allows the OpenBSD kernel + * to boot. + */ + memory_region_init_io(&s->bar0, OBJECT(s), &unassigned_io_ops, s, + "bar0", 0x1000000); pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0); memory_region_init_alias(&s->bar1, OBJECT(s), "bar1", pci_address_space_io(pci_dev), 0, 0x8000); @@ -454,12 +459,9 @@ static void prom_realize(DeviceState *ds, Error **errp) { PROMState *s = OPENPROM(ds); SysBusDevice *dev = SYS_BUS_DEVICE(ds); - Error *local_err = NULL; - memory_region_init_ram_nomigrate(&s->prom, OBJECT(ds), "sun4u.prom", - PROM_SIZE_MAX, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram_nomigrate(&s->prom, OBJECT(ds), "sun4u.prom", + PROM_SIZE_MAX, errp)) { return; } @@ -642,29 +644,18 @@ static void sun4uv_init(MemoryRegion *address_space_mem, memset(&macaddr, 0, sizeof(MACAddr)); onboard_nic = false; - for (i = 0; i < nb_nics; i++) { - PCIBus *bus; - nd = &nd_table[i]; - - if (!nd->model || strcmp(nd->model, mc->default_nic) == 0) { - if (!onboard_nic) { - pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), mc->default_nic); - bus = pci_busA; - memcpy(&macaddr, &nd->macaddr.a, sizeof(MACAddr)); - onboard_nic = true; - } else { - pci_dev = pci_new(-1, mc->default_nic); - bus = pci_busB; - } - } else { - pci_dev = pci_new(-1, nd->model); - bus = pci_busB; - } + nd = qemu_find_nic_info(mc->default_nic, true, NULL); + if (nd) { + pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), mc->default_nic); dev = &pci_dev->qdev; qdev_set_nic_properties(dev, nd); - pci_realize_and_unref(pci_dev, bus, &error_fatal); + pci_realize_and_unref(pci_dev, pci_busA, &error_fatal); + + memcpy(&macaddr, &nd->macaddr.a, sizeof(MACAddr)); + onboard_nic = true; } + pci_init_nic_devices(pci_busB, mc->default_nic); /* If we don't have an onboard NIC, grab a default MAC address so that * we have a valid machine id */ diff --git a/hw/ssi/Kconfig b/hw/ssi/Kconfig index 7d90a02181e..83ee53c1d08 100644 --- a/hw/ssi/Kconfig +++ b/hw/ssi/Kconfig @@ -20,3 +20,7 @@ config XILINX_SPIPS config STM32F2XX_SPI bool select SSI + +config BCM2835_SPI + bool + select SSI diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index 2a4001b774a..6e1a84c1971 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -23,6 +23,7 @@ */ #include "qemu/osdep.h" +#include "hw/block/flash.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qemu/log.h" @@ -695,6 +696,14 @@ static void aspeed_smc_reset(DeviceState *d) for (i = 0; i < asc->cs_num_max; i++) { DeviceState *dev = ssi_get_cs(s->spi, i); if (dev) { + Object *o = OBJECT(dev); + + if (!object_dynamic_cast(o, TYPE_M25P80)) { + warn_report("Aspeed SMC %s.%d : Invalid %s device type", + BUS(s->spi)->name, i, object_get_typename(o)); + continue; + } + qemu_irq cs_line = qdev_get_gpio_in_named(dev, SSI_GPIO_CS, 0); qdev_connect_gpio_out_named(DEVICE(s), "cs", i, cs_line); } @@ -1201,7 +1210,7 @@ static const VMStateDescription vmstate_aspeed_smc = { .name = "aspeed.smc", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSMCState, ASPEED_SMC_R_MAX), VMSTATE_UINT8(snoop_index, AspeedSMCState), VMSTATE_UINT8(snoop_dummies, AspeedSMCState), diff --git a/hw/ssi/bcm2835_spi.c b/hw/ssi/bcm2835_spi.c new file mode 100644 index 00000000000..6ecb42d4e3b --- /dev/null +++ b/hw/ssi/bcm2835_spi.c @@ -0,0 +1,288 @@ +/* + * BCM2835 SPI Master Controller + * + * Copyright (c) 2024 Rayhan Faizel + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/fifo8.h" +#include "hw/ssi/bcm2835_spi.h" +#include "hw/irq.h" +#include "migration/vmstate.h" + +static void bcm2835_spi_update_int(BCM2835SPIState *s) +{ + int do_interrupt = 0; + + /* Interrupt on DONE */ + if (s->cs & BCM2835_SPI_CS_INTD && s->cs & BCM2835_SPI_CS_DONE) { + do_interrupt = 1; + } + /* Interrupt on RXR */ + if (s->cs & BCM2835_SPI_CS_INTR && s->cs & BCM2835_SPI_CS_RXR) { + do_interrupt = 1; + } + qemu_set_irq(s->irq, do_interrupt); +} + +static void bcm2835_spi_update_rx_flags(BCM2835SPIState *s) +{ + /* Set RXD if RX FIFO is non empty */ + if (!fifo8_is_empty(&s->rx_fifo)) { + s->cs |= BCM2835_SPI_CS_RXD; + } else { + s->cs &= ~BCM2835_SPI_CS_RXD; + } + + /* Set RXF if RX FIFO is full */ + if (fifo8_is_full(&s->rx_fifo)) { + s->cs |= BCM2835_SPI_CS_RXF; + } else { + s->cs &= ~BCM2835_SPI_CS_RXF; + } + + /* Set RXR if RX FIFO is 3/4th used or above */ + if (fifo8_num_used(&s->rx_fifo) >= FIFO_SIZE_3_4) { + s->cs |= BCM2835_SPI_CS_RXR; + } else { + s->cs &= ~BCM2835_SPI_CS_RXR; + } +} + +static void bcm2835_spi_update_tx_flags(BCM2835SPIState *s) +{ + /* Set TXD if TX FIFO is not full */ + if (fifo8_is_full(&s->tx_fifo)) { + s->cs &= ~BCM2835_SPI_CS_TXD; + } else { + s->cs |= BCM2835_SPI_CS_TXD; + } + + /* Set DONE if in TA mode and TX FIFO is empty */ + if (fifo8_is_empty(&s->tx_fifo) && s->cs & BCM2835_SPI_CS_TA) { + s->cs |= BCM2835_SPI_CS_DONE; + } else { + s->cs &= ~BCM2835_SPI_CS_DONE; + } +} + +static void bcm2835_spi_flush_tx_fifo(BCM2835SPIState *s) +{ + uint8_t tx_byte, rx_byte; + + while (!fifo8_is_empty(&s->tx_fifo) && !fifo8_is_full(&s->rx_fifo)) { + tx_byte = fifo8_pop(&s->tx_fifo); + rx_byte = ssi_transfer(s->bus, tx_byte); + fifo8_push(&s->rx_fifo, rx_byte); + } + + bcm2835_spi_update_tx_flags(s); + bcm2835_spi_update_rx_flags(s); +} + +static uint64_t bcm2835_spi_read(void *opaque, hwaddr addr, unsigned size) +{ + BCM2835SPIState *s = opaque; + uint32_t readval = 0; + + switch (addr) { + case BCM2835_SPI_CS: + readval = s->cs & 0xffffffff; + break; + case BCM2835_SPI_FIFO: + bcm2835_spi_flush_tx_fifo(s); + if (s->cs & BCM2835_SPI_CS_RXD) { + readval = fifo8_pop(&s->rx_fifo); + bcm2835_spi_update_rx_flags(s); + } + + bcm2835_spi_update_int(s); + break; + case BCM2835_SPI_CLK: + readval = s->clk & 0xffff; + break; + case BCM2835_SPI_DLEN: + readval = s->dlen & 0xffff; + break; + case BCM2835_SPI_LTOH: + readval = s->ltoh & 0xf; + break; + case BCM2835_SPI_DC: + readval = s->dc & 0xffffffff; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + } + return readval; +} + +static void bcm2835_spi_write(void *opaque, hwaddr addr, + uint64_t value, unsigned int size) +{ + BCM2835SPIState *s = opaque; + + switch (addr) { + case BCM2835_SPI_CS: + s->cs = (value & ~RO_MASK) | (s->cs & RO_MASK); + if (!(s->cs & BCM2835_SPI_CS_TA)) { + /* Clear DONE and RXR if TA is off */ + s->cs &= ~(BCM2835_SPI_CS_DONE); + s->cs &= ~(BCM2835_SPI_CS_RXR); + } + + /* Clear RX FIFO */ + if (s->cs & BCM2835_SPI_CLEAR_RX) { + fifo8_reset(&s->rx_fifo); + bcm2835_spi_update_rx_flags(s); + } + + /* Clear TX FIFO*/ + if (s->cs & BCM2835_SPI_CLEAR_TX) { + fifo8_reset(&s->tx_fifo); + bcm2835_spi_update_tx_flags(s); + } + + /* Set Transfer Active */ + if (s->cs & BCM2835_SPI_CS_TA) { + bcm2835_spi_update_tx_flags(s); + } + + if (s->cs & BCM2835_SPI_CS_DMAEN) { + qemu_log_mask(LOG_UNIMP, "%s: " \ + "DMA not supported\n", __func__); + } + + if (s->cs & BCM2835_SPI_CS_LEN) { + qemu_log_mask(LOG_UNIMP, "%s: " \ + "LoSSI not supported\n", __func__); + } + + bcm2835_spi_update_int(s); + break; + case BCM2835_SPI_FIFO: + /* + * According to documentation, writes to FIFO without TA controls + * CS and DLEN registers. This is supposed to be used in DMA mode + * which is currently unimplemented. Moreover, Linux does not make + * use of this and directly modifies the CS and DLEN registers. + */ + if (s->cs & BCM2835_SPI_CS_TA) { + if (s->cs & BCM2835_SPI_CS_TXD) { + fifo8_push(&s->tx_fifo, value & 0xff); + bcm2835_spi_update_tx_flags(s); + } + + bcm2835_spi_flush_tx_fifo(s); + bcm2835_spi_update_int(s); + } + break; + case BCM2835_SPI_CLK: + s->clk = value & 0xffff; + break; + case BCM2835_SPI_DLEN: + s->dlen = value & 0xffff; + break; + case BCM2835_SPI_LTOH: + s->ltoh = value & 0xf; + break; + case BCM2835_SPI_DC: + s->dc = value & 0xffffffff; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + } +} + +static const MemoryRegionOps bcm2835_spi_ops = { + .read = bcm2835_spi_read, + .write = bcm2835_spi_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void bcm2835_spi_realize(DeviceState *dev, Error **errp) +{ + BCM2835SPIState *s = BCM2835_SPI(dev); + s->bus = ssi_create_bus(dev, "spi"); + + memory_region_init_io(&s->iomem, OBJECT(dev), &bcm2835_spi_ops, s, + TYPE_BCM2835_SPI, 0x18); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); + + fifo8_create(&s->tx_fifo, FIFO_SIZE); + fifo8_create(&s->rx_fifo, FIFO_SIZE); +} +static void bcm2835_spi_reset(DeviceState *dev) +{ + BCM2835SPIState *s = BCM2835_SPI(dev); + + fifo8_reset(&s->tx_fifo); + fifo8_reset(&s->rx_fifo); + + /* Reset values according to BCM2835 Peripheral Documentation */ + s->cs = BCM2835_SPI_CS_TXD | BCM2835_SPI_CS_REN; + s->clk = 0; + s->dlen = 0; + s->ltoh = 0x1; + s->dc = 0x30201020; +} + +static const VMStateDescription vmstate_bcm2835_spi = { + .name = TYPE_BCM2835_SPI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_FIFO8(tx_fifo, BCM2835SPIState), + VMSTATE_FIFO8(rx_fifo, BCM2835SPIState), + VMSTATE_UINT32(cs, BCM2835SPIState), + VMSTATE_UINT32(clk, BCM2835SPIState), + VMSTATE_UINT32(dlen, BCM2835SPIState), + VMSTATE_UINT32(ltoh, BCM2835SPIState), + VMSTATE_UINT32(dc, BCM2835SPIState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_spi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bcm2835_spi_reset; + dc->realize = bcm2835_spi_realize; + dc->vmsd = &vmstate_bcm2835_spi; +} + +static const TypeInfo bcm2835_spi_info = { + .name = TYPE_BCM2835_SPI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835SPIState), + .class_init = bcm2835_spi_class_init, +}; + +static void bcm2835_spi_register_types(void) +{ + type_register_static(&bcm2835_spi_info); +} + +type_init(bcm2835_spi_register_types) diff --git a/hw/ssi/ibex_spi_host.c b/hw/ssi/ibex_spi_host.c index c300ec294d8..863b5fd60e9 100644 --- a/hw/ssi/ibex_spi_host.c +++ b/hw/ssi/ibex_spi_host.c @@ -570,7 +570,7 @@ static const VMStateDescription vmstate_ibex = { .name = TYPE_IBEX_SPI_HOST, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS), VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState, num_cs, 0, vmstate_info_uint32, uint32_t), diff --git a/hw/ssi/imx_spi.c b/hw/ssi/imx_spi.c index 189423bb3a5..d8a7583ff34 100644 --- a/hw/ssi/imx_spi.c +++ b/hw/ssi/imx_spi.c @@ -62,7 +62,7 @@ static const VMStateDescription vmstate_imx_spi = { .name = TYPE_IMX_SPI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO32(tx_fifo, IMXSPIState), VMSTATE_FIFO32(rx_fifo, IMXSPIState), VMSTATE_INT16(burst_length, IMXSPIState), diff --git a/hw/ssi/meson.build b/hw/ssi/meson.build index 0aebcdd6142..b999aeb027c 100644 --- a/hw/ssi/meson.build +++ b/hw/ssi/meson.build @@ -11,3 +11,4 @@ system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-ospi.c')) system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c')) system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c')) system_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_spi_host.c')) +system_ss.add(when: 'CONFIG_BCM2835_SPI', if_true: files('bcm2835_spi.c')) diff --git a/hw/ssi/mss-spi.c b/hw/ssi/mss-spi.c index b2432c5a132..1d25ba23aa5 100644 --- a/hw/ssi/mss-spi.c +++ b/hw/ssi/mss-spi.c @@ -390,7 +390,7 @@ static const VMStateDescription vmstate_mss_spi = { .name = TYPE_MSS_SPI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO32(tx_fifo, MSSSpiState), VMSTATE_FIFO32(rx_fifo, MSSSpiState), VMSTATE_UINT32_ARRAY(regs, MSSSpiState, R_SPI_MAX), diff --git a/hw/ssi/npcm7xx_fiu.c b/hw/ssi/npcm7xx_fiu.c index 4eedb2927e7..81dd972ee8c 100644 --- a/hw/ssi/npcm7xx_fiu.c +++ b/hw/ssi/npcm7xx_fiu.c @@ -534,7 +534,7 @@ static const VMStateDescription vmstate_npcm7xx_fiu = { .name = "npcm7xx-fiu", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(active_cs, NPCM7xxFIUState), VMSTATE_UINT32_ARRAY(regs, NPCM7xxFIUState, NPCM7XX_FIU_NR_REGS), VMSTATE_END_OF_LIST(), diff --git a/hw/ssi/npcm_pspi.c b/hw/ssi/npcm_pspi.c index 3fb935043ab..41a53235303 100644 --- a/hw/ssi/npcm_pspi.c +++ b/hw/ssi/npcm_pspi.c @@ -192,7 +192,7 @@ static const VMStateDescription vmstate_npcm_pspi = { .name = "npcm-pspi", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_ARRAY(regs, NPCMPSPIState, NPCM_PSPI_NR_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/ssi/pl022.c b/hw/ssi/pl022.c index 8954ffebb1f..b8be8ddf0ea 100644 --- a/hw/ssi/pl022.c +++ b/hw/ssi/pl022.c @@ -249,7 +249,7 @@ static const VMStateDescription vmstate_pl022 = { .version_id = 1, .minimum_version_id = 1, .post_load = pl022_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cr0, PL022State), VMSTATE_UINT32(cr1, PL022State), VMSTATE_UINT32(bitmask, PL022State), diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c index 1f3e540ab8a..3f357e8f16a 100644 --- a/hw/ssi/ssi.c +++ b/hw/ssi/ssi.c @@ -172,7 +172,7 @@ const VMStateDescription vmstate_ssi_peripheral = { .name = "SSISlave", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(cs, SSIPeripheral), VMSTATE_END_OF_LIST() } diff --git a/hw/ssi/stm32f2xx_spi.c b/hw/ssi/stm32f2xx_spi.c index cd6e8443db3..a37139fe5ac 100644 --- a/hw/ssi/stm32f2xx_spi.c +++ b/hw/ssi/stm32f2xx_spi.c @@ -174,7 +174,7 @@ static const VMStateDescription vmstate_stm32f2xx_spi = { .name = TYPE_STM32F2XX_SPI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(spi_cr1, STM32F2XXSPIState), VMSTATE_UINT32(spi_cr2, STM32F2XXSPIState), VMSTATE_UINT32(spi_sr, STM32F2XXSPIState), diff --git a/hw/ssi/xilinx_spi.c b/hw/ssi/xilinx_spi.c index d4de2e7aabc..2e0687ac907 100644 --- a/hw/ssi/xilinx_spi.c +++ b/hw/ssi/xilinx_spi.c @@ -353,7 +353,7 @@ static const VMStateDescription vmstate_xilinx_spi = { .name = "xilinx_spi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO8(tx_fifo, XilinxSPI), VMSTATE_FIFO8(rx_fifo, XilinxSPI), VMSTATE_UINT32_ARRAY(regs, XilinxSPI, R_MAX), diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c index 0bdfad7e2e5..71952a410d8 100644 --- a/hw/ssi/xilinx_spips.c +++ b/hw/ssi/xilinx_spips.c @@ -1369,7 +1369,7 @@ static const VMStateDescription vmstate_xilinx_spips = { .version_id = 2, .minimum_version_id = 2, .post_load = xilinx_spips_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO8(tx_fifo, XilinxSPIPS), VMSTATE_FIFO8(rx_fifo, XilinxSPIPS), VMSTATE_UINT32_ARRAY(regs, XilinxSPIPS, XLNX_SPIPS_R_MAX), @@ -1395,7 +1395,7 @@ static const VMStateDescription vmstate_xilinx_qspips = { .name = "xilinx_qspips", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, XilinxQSPIPS, 0, vmstate_xilinx_spips, XilinxSPIPS), VMSTATE_END_OF_LIST() @@ -1407,7 +1407,7 @@ static const VMStateDescription vmstate_xlnx_zynqmp_qspips = { .version_id = 1, .minimum_version_id = 1, .post_load = xlnx_zynqmp_qspips_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, XlnxZynqMPQSPIPS, 0, vmstate_xilinx_qspips, XilinxQSPIPS), VMSTATE_FIFO8(tx_fifo_g, XlnxZynqMPQSPIPS), diff --git a/hw/ssi/xlnx-versal-ospi.c b/hw/ssi/xlnx-versal-ospi.c index 1a61679c2fe..c479138ec1c 100644 --- a/hw/ssi/xlnx-versal-ospi.c +++ b/hw/ssi/xlnx-versal-ospi.c @@ -1772,6 +1772,12 @@ static void xlnx_versal_ospi_init(Object *obj) memory_region_init_io(&s->iomem_dac, obj, &ospi_dac_ops, s, TYPE_XILINX_VERSAL_OSPI "-dac", 0x20000000); sysbus_init_mmio(sbd, &s->iomem_dac); + /* + * The OSPI DMA reads flash data through the OSPI linear address space (the + * iomem_dac region), because of this the reentrancy guard needs to be + * disabled. + */ + s->iomem_dac.disable_reentrancy_guard = true; sysbus_init_irq(sbd, &s->irq); @@ -1787,7 +1793,7 @@ static const VMStateDescription vmstate_ind_op = { .name = "OSPIIndOp", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(flash_addr, IndOp), VMSTATE_UINT32(num_bytes, IndOp), VMSTATE_UINT32(done_bytes, IndOp), @@ -1800,7 +1806,7 @@ static const VMStateDescription vmstate_xlnx_versal_ospi = { .name = TYPE_XILINX_VERSAL_OSPI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO8(rx_fifo, XlnxVersalOspi), VMSTATE_FIFO8(tx_fifo, XlnxVersalOspi), VMSTATE_FIFO8(rx_sram, XlnxVersalOspi), diff --git a/hw/timer/a9gtimer.c b/hw/timer/a9gtimer.c index 5e959b6d09f..a2ac5bdfb99 100644 --- a/hw/timer/a9gtimer.c +++ b/hw/timer/a9gtimer.c @@ -328,7 +328,7 @@ static const VMStateDescription vmstate_a9_gtimer_per_cpu = { .name = "arm.cortex-a9-global-timer.percpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, A9GTimerPerCPU), VMSTATE_UINT64(compare, A9GTimerPerCPU), VMSTATE_UINT32(status, A9GTimerPerCPU), @@ -342,7 +342,7 @@ static const VMStateDescription vmstate_a9_gtimer_control = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_a9_gtimer_control_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, A9GTimerState), VMSTATE_END_OF_LIST() } @@ -352,7 +352,7 @@ static const VMStateDescription vmstate_a9_gtimer = { .name = "arm.cortex-a9-global-timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, A9GTimerState), VMSTATE_UINT64(counter, A9GTimerState), VMSTATE_UINT64(ref_counter, A9GTimerState), @@ -362,7 +362,7 @@ static const VMStateDescription vmstate_a9_gtimer = { A9GTimerPerCPU), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_a9_gtimer_control, NULL } diff --git a/hw/timer/allwinner-a10-pit.c b/hw/timer/allwinner-a10-pit.c index 971f78462ab..a524de13817 100644 --- a/hw/timer/allwinner-a10-pit.c +++ b/hw/timer/allwinner-a10-pit.c @@ -200,7 +200,7 @@ static const VMStateDescription vmstate_a10_pit = { .name = "a10.pit", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(irq_enable, AwA10PITState), VMSTATE_UINT32(irq_status, AwA10PITState), VMSTATE_UINT32_ARRAY(control, AwA10PITState, AW_A10_PIT_TIMER_NR), diff --git a/hw/timer/arm_mptimer.c b/hw/timer/arm_mptimer.c index cdfca3000be..bca4cee0e4e 100644 --- a/hw/timer/arm_mptimer.c +++ b/hw/timer/arm_mptimer.c @@ -281,7 +281,7 @@ static const VMStateDescription vmstate_timerblock = { .name = "arm_mptimer_timerblock", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, TimerBlock), VMSTATE_UINT32(status, TimerBlock), VMSTATE_PTIMER(timer, TimerBlock), @@ -293,7 +293,7 @@ static const VMStateDescription vmstate_arm_mptimer = { .name = "arm_mptimer", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_UINT32(timerblock, ARMMPTimerState, num_cpu, 3, vmstate_timerblock, TimerBlock), VMSTATE_END_OF_LIST() diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c index 9afe8da831f..0940e03f1d9 100644 --- a/hw/timer/arm_timer.c +++ b/hw/timer/arm_timer.c @@ -163,7 +163,7 @@ static const VMStateDescription vmstate_arm_timer = { .name = "arm_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, arm_timer_state), VMSTATE_UINT32(limit, arm_timer_state), VMSTATE_INT32(int_level, arm_timer_state), @@ -282,7 +282,7 @@ static const VMStateDescription vmstate_sp804 = { .name = "sp804", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_ARRAY(level, SP804State, 2), VMSTATE_END_OF_LIST() } diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c index 5dfe39afe36..f6b1acef271 100644 --- a/hw/timer/armv7m_systick.c +++ b/hw/timer/armv7m_systick.c @@ -275,7 +275,7 @@ static const VMStateDescription vmstate_systick = { .name = "armv7m_systick", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(refclk, SysTickState), VMSTATE_CLOCK(cpuclk, SysTickState), VMSTATE_UINT32(control, SysTickState), diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c index 72161f07bbe..fc5c94bdf36 100644 --- a/hw/timer/aspeed_timer.c +++ b/hw/timer/aspeed_timer.c @@ -645,7 +645,7 @@ static const VMStateDescription vmstate_aspeed_timer = { .name = "aspeed.timer", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(id, AspeedTimer), VMSTATE_INT32(level, AspeedTimer), VMSTATE_TIMER(timer, AspeedTimer), @@ -659,7 +659,7 @@ static const VMStateDescription vmstate_aspeed_timer_state = { .name = "aspeed.timerctrl", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctrl, AspeedTimerCtrlState), VMSTATE_UINT32(ctrl2, AspeedTimerCtrlState), VMSTATE_UINT32(ctrl3, AspeedTimerCtrlState), diff --git a/hw/timer/bcm2835_systmr.c b/hw/timer/bcm2835_systmr.c index 67669a57ff3..3ec64604ee5 100644 --- a/hw/timer/bcm2835_systmr.c +++ b/hw/timer/bcm2835_systmr.c @@ -146,7 +146,7 @@ static const VMStateDescription bcm2835_systmr_vmstate = { .name = "bcm2835_sys_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(reg.ctrl_status, BCM2835SystemTimerState), VMSTATE_UINT32_ARRAY(reg.compare, BCM2835SystemTimerState, BCM2835_SYSTIMER_COUNT), diff --git a/hw/timer/cadence_ttc.c b/hw/timer/cadence_ttc.c index e57a0f5f09f..54dbd4c5646 100644 --- a/hw/timer/cadence_ttc.c +++ b/hw/timer/cadence_ttc.c @@ -425,7 +425,7 @@ static const VMStateDescription vmstate_cadence_timer = { .minimum_version_id = 1, .pre_save = cadence_timer_pre_save, .post_load = cadence_timer_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(reg_clock, CadenceTimerState), VMSTATE_UINT32(reg_count, CadenceTimerState), VMSTATE_UINT32(reg_value, CadenceTimerState), @@ -443,7 +443,7 @@ static const VMStateDescription vmstate_cadence_ttc = { .name = "cadence_TTC", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(timer, CadenceTTCState, 3, 0, vmstate_cadence_timer, CadenceTimerState), diff --git a/hw/timer/cmsdk-apb-dualtimer.c b/hw/timer/cmsdk-apb-dualtimer.c index d4a509c798e..ddf9070c3c0 100644 --- a/hw/timer/cmsdk-apb-dualtimer.c +++ b/hw/timer/cmsdk-apb-dualtimer.c @@ -508,7 +508,7 @@ static const VMStateDescription cmsdk_dualtimermod_vmstate = { .name = "cmsdk-apb-dualtimer-module", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, CMSDKAPBDualTimerModule), VMSTATE_UINT32(load, CMSDKAPBDualTimerModule), VMSTATE_UINT32(value, CMSDKAPBDualTimerModule), @@ -522,7 +522,7 @@ static const VMStateDescription cmsdk_apb_dualtimer_vmstate = { .name = "cmsdk-apb-dualtimer", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(timclk, CMSDKAPBDualTimer), VMSTATE_STRUCT_ARRAY(timermod, CMSDKAPBDualTimer, CMSDK_APB_DUALTIMER_NUM_MODULES, diff --git a/hw/timer/cmsdk-apb-timer.c b/hw/timer/cmsdk-apb-timer.c index 68aa1a76360..814545c7832 100644 --- a/hw/timer/cmsdk-apb-timer.c +++ b/hw/timer/cmsdk-apb-timer.c @@ -250,7 +250,7 @@ static const VMStateDescription cmsdk_apb_timer_vmstate = { .name = "cmsdk-apb-timer", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, CMSDKAPBTimer), VMSTATE_CLOCK(pclk, CMSDKAPBTimer), VMSTATE_UINT32(ctrl, CMSDKAPBTimer), diff --git a/hw/timer/digic-timer.c b/hw/timer/digic-timer.c index 973eab4386e..9fc5c1d8a46 100644 --- a/hw/timer/digic-timer.c +++ b/hw/timer/digic-timer.c @@ -39,7 +39,7 @@ static const VMStateDescription vmstate_digic_timer = { .name = "digic.timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(ptimer, DigicTimerState), VMSTATE_UINT32(control, DigicTimerState), VMSTATE_UINT32(relvalue, DigicTimerState), diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c index f035b745601..da7c946af52 100644 --- a/hw/timer/etraxfs_timer.c +++ b/hw/timer/etraxfs_timer.c @@ -88,7 +88,7 @@ static const VMStateDescription vmstate_etraxfs = { .name = "etraxfs", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(ptimer_t0, ETRAXTimerState), VMSTATE_PTIMER(ptimer_t1, ETRAXTimerState), VMSTATE_PTIMER(ptimer_wd, ETRAXTimerState), diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c index 446bbd2b96c..75098cdb555 100644 --- a/hw/timer/exynos4210_mct.c +++ b/hw/timer/exynos4210_mct.c @@ -264,7 +264,7 @@ static const VMStateDescription vmstate_tick_timer = { .name = "exynos4210.mct.tick_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cnt_run, struct tick_timer), VMSTATE_UINT32(int_run, struct tick_timer), VMSTATE_UINT32(last_icnto, struct tick_timer), @@ -283,7 +283,7 @@ static const VMStateDescription vmstate_lregs = { .name = "exynos4210.mct.lregs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(cnt, struct lregs, L_REG_CNT_AMOUNT), VMSTATE_UINT32(tcon, struct lregs), VMSTATE_UINT32(int_cstat, struct lregs), @@ -297,7 +297,7 @@ static const VMStateDescription vmstate_exynos4210_mct_lt = { .name = "exynos4210.mct.lt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(id, Exynos4210MCTLT), VMSTATE_STRUCT(tick_timer, Exynos4210MCTLT, 0, vmstate_tick_timer, @@ -314,7 +314,7 @@ static const VMStateDescription vmstate_gregs = { .name = "exynos4210.mct.lregs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(cnt, struct gregs), VMSTATE_UINT32(cnt_wstat, struct gregs), VMSTATE_UINT32(tcon, struct gregs), @@ -332,7 +332,7 @@ static const VMStateDescription vmstate_exynos4210_mct_gt = { .name = "exynos4210.mct.lt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(reg, Exynos4210MCTGT, 0, vmstate_gregs, struct gregs), VMSTATE_UINT64(count, Exynos4210MCTGT), @@ -346,7 +346,7 @@ static const VMStateDescription vmstate_exynos4210_mct_state = { .name = "exynos4210.mct", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(reg_mct_cfg, Exynos4210MCTState), VMSTATE_STRUCT_ARRAY(l_timer, Exynos4210MCTState, 2, 0, vmstate_exynos4210_mct_lt, Exynos4210MCTLT), diff --git a/hw/timer/exynos4210_pwm.c b/hw/timer/exynos4210_pwm.c index 3528d0f33ab..ca330e9446c 100644 --- a/hw/timer/exynos4210_pwm.c +++ b/hw/timer/exynos4210_pwm.c @@ -123,7 +123,7 @@ static const VMStateDescription vmstate_exynos4210_pwm = { .name = "exynos4210.pwm.pwm", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, Exynos4210PWM), VMSTATE_UINT32(freq, Exynos4210PWM), VMSTATE_PTIMER(ptimer, Exynos4210PWM), @@ -137,7 +137,7 @@ static const VMStateDescription vmstate_exynos4210_pwm_state = { .name = "exynos4210.pwm", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg_tcfg, Exynos4210PWMState, 2), VMSTATE_UINT32(reg_tcon, Exynos4210PWMState), VMSTATE_UINT32(reg_tint_cstat, Exynos4210PWMState), diff --git a/hw/timer/grlib_gptimer.c b/hw/timer/grlib_gptimer.c index 5c4923c1e09..49908854510 100644 --- a/hw/timer/grlib_gptimer.c +++ b/hw/timer/grlib_gptimer.c @@ -1,7 +1,9 @@ /* * QEMU GRLIB GPTimer Emulator * - * Copyright (c) 2010-2019 AdaCore + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2010-2024 AdaCore * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -23,7 +25,7 @@ */ #include "qemu/osdep.h" -#include "hw/sparc/grlib.h" +#include "hw/timer/grlib_gptimer.h" #include "hw/sysbus.h" #include "qemu/timer.h" #include "hw/irq.h" diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c index 6998094233a..01efe4885db 100644 --- a/hw/timer/hpet.c +++ b/hw/timer/hpet.c @@ -25,7 +25,6 @@ */ #include "qemu/osdep.h" -#include "hw/i386/pc.h" #include "hw/irq.h" #include "qapi/error.h" #include "qemu/error-report.h" @@ -39,13 +38,7 @@ #include "hw/timer/i8254.h" #include "exec/address-spaces.h" #include "qom/object.h" - -//#define HPET_DEBUG -#ifdef HPET_DEBUG -#define DPRINTF printf -#else -#define DPRINTF(...) -#endif +#include "trace.h" #define HPET_MSI_SUPPORT 0 @@ -296,7 +289,7 @@ static const VMStateDescription vmstate_hpet_rtc_irq_level = { .version_id = 1, .minimum_version_id = 1, .needed = hpet_rtc_irq_level_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(rtc_irq_level, HPETState), VMSTATE_END_OF_LIST() } @@ -307,7 +300,7 @@ static const VMStateDescription vmstate_hpet_offset = { .version_id = 1, .minimum_version_id = 1, .needed = hpet_offset_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(hpet_offset, HPETState), VMSTATE_END_OF_LIST() } @@ -317,7 +310,7 @@ static const VMStateDescription vmstate_hpet_timer = { .name = "hpet_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(tn, HPETTimer), VMSTATE_UINT64(config, HPETTimer), VMSTATE_UINT64(cmp, HPETTimer), @@ -336,7 +329,7 @@ static const VMStateDescription vmstate_hpet = { .pre_save = hpet_pre_save, .pre_load = hpet_pre_load, .post_load = hpet_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(config, HPETState), VMSTATE_UINT64(isr, HPETState), VMSTATE_UINT64(hpet_counter, HPETState), @@ -346,7 +339,7 @@ static const VMStateDescription vmstate_hpet = { vmstate_hpet_timer, HPETTimer), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_hpet_rtc_irq_level, &vmstate_hpet_offset, NULL @@ -431,7 +424,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr, HPETState *s = opaque; uint64_t cur_tick, index; - DPRINTF("qemu: Enter hpet_ram_readl at %" PRIx64 "\n", addr); + trace_hpet_ram_read(addr); index = addr; /*address range of all TN regs*/ if (index >= 0x100 && index <= 0x3ff) { @@ -439,7 +432,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr, HPETTimer *timer = &s->timer[timer_id]; if (timer_id > s->num_timers) { - DPRINTF("qemu: timer id out of range\n"); + trace_hpet_timer_id_out_of_range(timer_id); return 0; } @@ -457,7 +450,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr, case HPET_TN_ROUTE + 4: return timer->fsb >> 32; default: - DPRINTF("qemu: invalid hpet_ram_readl\n"); + trace_hpet_ram_read_invalid(); break; } } else { @@ -469,7 +462,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr, case HPET_CFG: return s->config; case HPET_CFG + 4: - DPRINTF("qemu: invalid HPET_CFG + 4 hpet_ram_readl\n"); + trace_hpet_invalid_hpet_cfg(4); return 0; case HPET_COUNTER: if (hpet_enabled(s)) { @@ -477,7 +470,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr, } else { cur_tick = s->hpet_counter; } - DPRINTF("qemu: reading counter = %" PRIx64 "\n", cur_tick); + trace_hpet_ram_read_reading_counter(0, cur_tick); return cur_tick; case HPET_COUNTER + 4: if (hpet_enabled(s)) { @@ -485,12 +478,12 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr, } else { cur_tick = s->hpet_counter; } - DPRINTF("qemu: reading counter + 4 = %" PRIx64 "\n", cur_tick); + trace_hpet_ram_read_reading_counter(4, cur_tick); return cur_tick >> 32; case HPET_STATUS: return s->isr; default: - DPRINTF("qemu: invalid hpet_ram_readl\n"); + trace_hpet_ram_read_invalid(); break; } } @@ -504,8 +497,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr, HPETState *s = opaque; uint64_t old_val, new_val, val, index; - DPRINTF("qemu: Enter hpet_ram_writel at %" PRIx64 " = 0x%" PRIx64 "\n", - addr, value); + trace_hpet_ram_write(addr, value); index = addr; old_val = hpet_ram_read(opaque, addr, 4); new_val = value; @@ -515,14 +507,14 @@ static void hpet_ram_write(void *opaque, hwaddr addr, uint8_t timer_id = (addr - 0x100) / 0x20; HPETTimer *timer = &s->timer[timer_id]; - DPRINTF("qemu: hpet_ram_writel timer_id = 0x%x\n", timer_id); + trace_hpet_ram_write_timer_id(timer_id); if (timer_id > s->num_timers) { - DPRINTF("qemu: timer id out of range\n"); + trace_hpet_timer_id_out_of_range(timer_id); return; } switch ((addr - 0x100) % 0x20) { case HPET_TN_CFG: - DPRINTF("qemu: hpet_ram_writel HPET_TN_CFG\n"); + trace_hpet_ram_write_tn_cfg(); if (activating_bit(old_val, new_val, HPET_TN_FSB_ENABLE)) { update_irq(timer, 0); } @@ -540,10 +532,10 @@ static void hpet_ram_write(void *opaque, hwaddr addr, } break; case HPET_TN_CFG + 4: // Interrupt capabilities - DPRINTF("qemu: invalid HPET_TN_CFG+4 write\n"); + trace_hpet_ram_write_invalid_tn_cfg(4); break; case HPET_TN_CMP: // comparator register - DPRINTF("qemu: hpet_ram_writel HPET_TN_CMP\n"); + trace_hpet_ram_write_tn_cmp(0); if (timer->config & HPET_TN_32BIT) { new_val = (uint32_t)new_val; } @@ -566,7 +558,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr, } break; case HPET_TN_CMP + 4: // comparator register high order - DPRINTF("qemu: hpet_ram_writel HPET_TN_CMP + 4\n"); + trace_hpet_ram_write_tn_cmp(4); if (!timer_is_periodic(timer) || (timer->config & HPET_TN_SETVAL)) { timer->cmp = (timer->cmp & 0xffffffffULL) | new_val << 32; @@ -591,7 +583,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr, timer->fsb = (new_val << 32) | (timer->fsb & 0xffffffff); break; default: - DPRINTF("qemu: invalid hpet_ram_writel\n"); + trace_hpet_ram_write_invalid(); break; } return; @@ -631,7 +623,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr, } break; case HPET_CFG + 4: - DPRINTF("qemu: invalid HPET_CFG+4 write\n"); + trace_hpet_invalid_hpet_cfg(4); break; case HPET_STATUS: val = new_val & s->isr; @@ -643,24 +635,20 @@ static void hpet_ram_write(void *opaque, hwaddr addr, break; case HPET_COUNTER: if (hpet_enabled(s)) { - DPRINTF("qemu: Writing counter while HPET enabled!\n"); + trace_hpet_ram_write_counter_write_while_enabled(); } s->hpet_counter = (s->hpet_counter & 0xffffffff00000000ULL) | value; - DPRINTF("qemu: HPET counter written. ctr = 0x%" PRIx64 " -> " - "%" PRIx64 "\n", value, s->hpet_counter); + trace_hpet_ram_write_counter_written(0, value, s->hpet_counter); break; case HPET_COUNTER + 4: - if (hpet_enabled(s)) { - DPRINTF("qemu: Writing counter while HPET enabled!\n"); - } + trace_hpet_ram_write_counter_write_while_enabled(); s->hpet_counter = (s->hpet_counter & 0xffffffffULL) | (((uint64_t)value) << 32); - DPRINTF("qemu: HPET counter + 4 written. ctr = 0x%" PRIx64 " -> " - "%" PRIx64 "\n", value, s->hpet_counter); + trace_hpet_ram_write_counter_written(4, value, s->hpet_counter); break; default: - DPRINTF("qemu: invalid hpet_ram_writel\n"); + trace_hpet_ram_write_invalid(); break; } } diff --git a/hw/timer/i8254_common.c b/hw/timer/i8254_common.c index b25da448c86..28fdabc3218 100644 --- a/hw/timer/i8254_common.c +++ b/hw/timer/i8254_common.c @@ -180,7 +180,7 @@ static const VMStateDescription vmstate_pit_channel = { .name = "pit channel", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(count, PITChannelState), VMSTATE_UINT16(latched_count, PITChannelState), VMSTATE_UINT8(count_latched, PITChannelState), @@ -228,7 +228,7 @@ static const VMStateDescription vmstate_pit_common = { .minimum_version_id = 2, .pre_save = pit_dispatch_pre_save, .post_load = pit_dispatch_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_V(channels[0].irq_disabled, PITCommonState, 3), VMSTATE_STRUCT_ARRAY(channels, PITCommonState, 3, 2, vmstate_pit_channel, PITChannelState), diff --git a/hw/timer/ibex_timer.c b/hw/timer/ibex_timer.c index d8b8e4e1f60..4917388d45a 100644 --- a/hw/timer/ibex_timer.c +++ b/hw/timer/ibex_timer.c @@ -252,7 +252,7 @@ static const VMStateDescription vmstate_ibex_timer = { .version_id = 2, .minimum_version_id = 2, .post_load = ibex_timer_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(timer_ctrl, IbexTimerState), VMSTATE_UINT32(timer_cfg0, IbexTimerState), VMSTATE_UINT32(timer_compare_lower0, IbexTimerState), diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c index 640e4399c24..bd625203aaf 100644 --- a/hw/timer/imx_epit.c +++ b/hw/timer/imx_epit.c @@ -383,7 +383,7 @@ static const VMStateDescription vmstate_imx_timer_epit = { .name = TYPE_IMX_EPIT, .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cr, IMXEPITState), VMSTATE_UINT32(sr, IMXEPITState), VMSTATE_UINT32(lr, IMXEPITState), diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c index 7222b1b3874..a8edaec8673 100644 --- a/hw/timer/imx_gpt.c +++ b/hw/timer/imx_gpt.c @@ -63,7 +63,7 @@ static const VMStateDescription vmstate_imx_timer_gpt = { .name = TYPE_IMX_GPT, .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cr, IMXGPTState), VMSTATE_UINT32(pr, IMXGPTState), VMSTATE_UINT32(sr, IMXGPTState), diff --git a/hw/timer/mss-timer.c b/hw/timer/mss-timer.c index ee7438f1684..b66aed56ead 100644 --- a/hw/timer/mss-timer.c +++ b/hw/timer/mss-timer.c @@ -260,7 +260,7 @@ static const VMStateDescription vmstate_timers = { .name = "mss-timer-block", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(ptimer, struct Msf2Timer), VMSTATE_UINT32_ARRAY(regs, struct Msf2Timer, R_TIM1_MAX), VMSTATE_END_OF_LIST() @@ -271,7 +271,7 @@ static const VMStateDescription vmstate_mss_timer = { .name = TYPE_MSS_TIMER, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(freq_hz, MSSTimerState), VMSTATE_STRUCT_ARRAY(timers, MSSTimerState, NUM_TIMERS, 0, vmstate_timers, struct Msf2Timer), diff --git a/hw/timer/npcm7xx_timer.c b/hw/timer/npcm7xx_timer.c index a8bd93aeb2c..779c6049fab 100644 --- a/hw/timer/npcm7xx_timer.c +++ b/hw/timer/npcm7xx_timer.c @@ -637,7 +637,7 @@ static const VMStateDescription vmstate_npcm7xx_base_timer = { .name = "npcm7xx-base-timer", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER(qtimer, NPCM7xxBaseTimer), VMSTATE_INT64(expires_ns, NPCM7xxBaseTimer), VMSTATE_INT64(remaining_ns, NPCM7xxBaseTimer), @@ -649,7 +649,7 @@ static const VMStateDescription vmstate_npcm7xx_timer = { .name = "npcm7xx-timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(base_timer, NPCM7xxTimer, 0, vmstate_npcm7xx_base_timer, NPCM7xxBaseTimer), @@ -663,7 +663,7 @@ static const VMStateDescription vmstate_npcm7xx_watchdog_timer = { .name = "npcm7xx-watchdog-timer", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(base_timer, NPCM7xxWatchdogTimer, 0, vmstate_npcm7xx_base_timer, NPCM7xxBaseTimer), @@ -676,7 +676,7 @@ static const VMStateDescription vmstate_npcm7xx_timer_ctrl = { .name = "npcm7xx-timer-ctrl", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tisr, NPCM7xxTimerCtrlState), VMSTATE_CLOCK(clock, NPCM7xxTimerCtrlState), VMSTATE_STRUCT_ARRAY(timer, NPCM7xxTimerCtrlState, diff --git a/hw/timer/nrf51_timer.c b/hw/timer/nrf51_timer.c index 50c6772383e..a33166a8817 100644 --- a/hw/timer/nrf51_timer.c +++ b/hw/timer/nrf51_timer.c @@ -361,7 +361,7 @@ static const VMStateDescription vmstate_nrf51_timer = { .name = TYPE_NRF51_TIMER, .version_id = 1, .post_load = nrf51_timer_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER(timer, NRF51TimerState), VMSTATE_INT64(timer_start_ns, NRF51TimerState), VMSTATE_INT64(update_counter_ns, NRF51TimerState), diff --git a/hw/timer/pxa2xx_timer.c b/hw/timer/pxa2xx_timer.c index 2ae5ae32123..6479ab1a8b3 100644 --- a/hw/timer/pxa2xx_timer.c +++ b/hw/timer/pxa2xx_timer.c @@ -18,6 +18,7 @@ #include "qemu/log.h" #include "qemu/module.h" #include "qom/object.h" +#include "sysemu/watchdog.h" #define OSMR0 0x00 #define OSMR1 0x04 @@ -417,7 +418,7 @@ static void pxa2xx_timer_tick(void *opaque) if (t->num == 3) if (i->reset3 & 1) { i->reset3 = 0; - qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + watchdog_perform_action(); } } @@ -501,7 +502,7 @@ static const VMStateDescription vmstate_pxa2xx_timer0_regs = { .name = "pxa2xx_timer0", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(value, PXA2xxTimer0), VMSTATE_END_OF_LIST(), }, @@ -511,7 +512,7 @@ static const VMStateDescription vmstate_pxa2xx_timer4_regs = { .name = "pxa2xx_timer4", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(tm, PXA2xxTimer4, 1, vmstate_pxa2xx_timer0_regs, PXA2xxTimer0), VMSTATE_INT32(oldclock, PXA2xxTimer4), @@ -533,7 +534,7 @@ static const VMStateDescription vmstate_pxa2xx_timer_regs = { .version_id = 1, .minimum_version_id = 1, .post_load = pxa25x_timer_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(clock, PXA2xxTimerInfo), VMSTATE_INT32(oldclock, PXA2xxTimerInfo), VMSTATE_UINT64(lastload, PXA2xxTimerInfo), diff --git a/hw/timer/renesas_cmt.c b/hw/timer/renesas_cmt.c index 69eabc678a6..08832932d2a 100644 --- a/hw/timer/renesas_cmt.c +++ b/hw/timer/renesas_cmt.c @@ -242,7 +242,7 @@ static const VMStateDescription vmstate_rcmt = { .name = "rx-cmt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(cmstr, RCMTState), VMSTATE_UINT16_ARRAY(cmcr, RCMTState, CMT_CH), VMSTATE_UINT16_ARRAY(cmcnt, RCMTState, CMT_CH), diff --git a/hw/timer/renesas_tmr.c b/hw/timer/renesas_tmr.c index 43b31213bc9..1d47d0615a4 100644 --- a/hw/timer/renesas_tmr.c +++ b/hw/timer/renesas_tmr.c @@ -447,7 +447,7 @@ static const VMStateDescription vmstate_rtmr = { .name = "rx-tmr", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(tick, RTMRState), VMSTATE_UINT8_ARRAY(tcnt, RTMRState, TMR_CH), VMSTATE_UINT8_ARRAY(tcora, RTMRState, TMR_CH), diff --git a/hw/timer/sifive_pwm.c b/hw/timer/sifive_pwm.c index c664480ccf5..e8610c37dd3 100644 --- a/hw/timer/sifive_pwm.c +++ b/hw/timer/sifive_pwm.c @@ -395,7 +395,7 @@ static const VMStateDescription vmstate_sifive_pwm = { .name = TYPE_SIFIVE_PWM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_ARRAY(timer, SiFivePwmState, 4), VMSTATE_UINT64(tick_offset, SiFivePwmState), VMSTATE_UINT32(pwmcfg, SiFivePwmState), diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c index 8c4f6eb06b6..5507b0145b5 100644 --- a/hw/timer/slavio_timer.c +++ b/hw/timer/slavio_timer.c @@ -344,7 +344,7 @@ static const VMStateDescription vmstate_timer = { .name ="timer", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(limit, CPUTimerState), VMSTATE_UINT32(count, CPUTimerState), VMSTATE_UINT32(counthigh, CPUTimerState), @@ -359,7 +359,7 @@ static const VMStateDescription vmstate_slavio_timer = { .name ="slavio_timer", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(cputimer, SLAVIO_TIMERState, MAX_CPUS + 1, 3, vmstate_timer, CPUTimerState), VMSTATE_END_OF_LIST() diff --git a/hw/timer/sse-counter.c b/hw/timer/sse-counter.c index 16c0e8ad15d..daceedf964e 100644 --- a/hw/timer/sse-counter.c +++ b/hw/timer/sse-counter.c @@ -442,7 +442,7 @@ static const VMStateDescription sse_counter_vmstate = { .name = "sse-counter", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clk, SSECounter), VMSTATE_END_OF_LIST() } diff --git a/hw/timer/sse-timer.c b/hw/timer/sse-timer.c index e92e83747d2..cb20a9eb79e 100644 --- a/hw/timer/sse-timer.c +++ b/hw/timer/sse-timer.c @@ -428,7 +428,7 @@ static const VMStateDescription sse_timer_vmstate = { .name = "sse-timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER(timer, SSETimer), VMSTATE_UINT32(cntfrq, SSETimer), VMSTATE_UINT32(cntp_ctl, SSETimer), diff --git a/hw/timer/stellaris-gptm.c b/hw/timer/stellaris-gptm.c index fd71c79be48..f28958cefca 100644 --- a/hw/timer/stellaris-gptm.c +++ b/hw/timer/stellaris-gptm.c @@ -250,7 +250,7 @@ static const VMStateDescription vmstate_stellaris_gptm = { .name = "stellaris_gptm", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(config, gptm_state), VMSTATE_UINT32_ARRAY(mode, gptm_state, 2), VMSTATE_UINT32(control, gptm_state), diff --git a/hw/timer/stm32f2xx_timer.c b/hw/timer/stm32f2xx_timer.c index ba8694dcd34..de4208b1a61 100644 --- a/hw/timer/stm32f2xx_timer.c +++ b/hw/timer/stm32f2xx_timer.c @@ -274,7 +274,7 @@ static const VMStateDescription vmstate_stm32f2xx_timer = { .name = TYPE_STM32F2XX_TIMER, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(tick_offset, STM32F2XXTimerState), VMSTATE_UINT32(tim_cr1, STM32F2XXTimerState), VMSTATE_UINT32(tim_cr2, STM32F2XXTimerState), diff --git a/hw/timer/trace-events b/hw/timer/trace-events index 3eccef83858..de769f4b716 100644 --- a/hw/timer/trace-events +++ b/hw/timer/trace-events @@ -35,7 +35,7 @@ aspeed_timer_read(uint64_t offset, unsigned size, uint64_t value) "From 0x%" PRI # armv7m_systick.c systick_reload(void) "systick reload" -systick_timer_tick(void) "systick reload" +systick_timer_tick(void) "systick tick" systick_read(uint64_t addr, uint32_t value, unsigned size) "systick read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" systick_write(uint64_t addr, uint32_t value, unsigned size) "systick write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" @@ -99,3 +99,18 @@ sifive_pwm_write(uint64_t data, uint64_t offset) "Write 0x%" PRIx64 " at address sh_timer_start_stop(int enable, int current) "%d (%d)" sh_timer_read(uint64_t offset) "tmu012_read 0x%" PRIx64 sh_timer_write(uint64_t offset, uint64_t value) "tmu012_write 0x%" PRIx64 " 0x%08" PRIx64 + +# hpet.c +hpet_timer_id_out_of_range(uint8_t timer_id) "timer id out of range: 0x%" PRIx8 +hpet_invalid_hpet_cfg(uint8_t reg_off) "invalid HPET_CFG + %u" PRIx8 +hpet_ram_read(uint64_t addr) "enter hpet_ram_readl at 0x%" PRIx64 +hpet_ram_read_reading_counter(uint8_t reg_off, uint64_t cur_tick) "reading counter + %" PRIu8 " = 0x%" PRIx64 +hpet_ram_read_invalid(void) "invalid hpet_ram_readl" +hpet_ram_write(uint64_t addr, uint64_t value) "enter hpet_ram_writel at 0x%" PRIx64 " = 0x%" PRIx64 +hpet_ram_write_timer_id(uint64_t timer_id) "hpet_ram_writel timer_id = 0x%" PRIx64 +hpet_ram_write_tn_cfg(void) "hpet_ram_writel HPET_TN_CFG" +hpet_ram_write_invalid_tn_cfg(uint8_t reg_off) "invalid HPET_TN_CFG + %" PRIu8 " write" +hpet_ram_write_tn_cmp(uint8_t reg_off) "hpet_ram_writel HPET_TN_CMP + %" PRIu8 +hpet_ram_write_invalid(void) "invalid hpet_ram_writel" +hpet_ram_write_counter_write_while_enabled(void) "Writing counter while HPET enabled!" +hpet_ram_write_counter_written(uint8_t reg_off, uint64_t value, uint64_t counter) "HPET counter + %" PRIu8 "written. crt = 0x%" PRIx64 " -> 0x%" PRIx64 diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c index ea930da545a..5cd5a2533b8 100644 --- a/hw/tpm/tpm_crb.c +++ b/hw/tpm/tpm_crb.c @@ -220,7 +220,7 @@ static int tpm_crb_pre_save(void *opaque) static const VMStateDescription vmstate_tpm_crb = { .name = "tpm-crb", .pre_save = tpm_crb_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CRBState, TPM_CRB_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/tpm/tpm_ppi.c b/hw/tpm/tpm_ppi.c index 7f74e26ec6c..f27ed6c35ed 100644 --- a/hw/tpm/tpm_ppi.c +++ b/hw/tpm/tpm_ppi.c @@ -47,8 +47,10 @@ void tpm_ppi_reset(TPMPPI *tpmppi) void tpm_ppi_init(TPMPPI *tpmppi, MemoryRegion *m, hwaddr addr, Object *obj) { - tpmppi->buf = qemu_memalign(qemu_real_host_page_size(), - HOST_PAGE_ALIGN(TPM_PPI_ADDR_SIZE)); + size_t host_page_size = qemu_real_host_page_size(); + + tpmppi->buf = qemu_memalign(host_page_size, + ROUND_UP(TPM_PPI_ADDR_SIZE, host_page_size)); memory_region_init_ram_device_ptr(&tpmppi->ram, obj, "tpm-ppi", TPM_PPI_ADDR_SIZE, tpmppi->buf); vmstate_register_ram(&tpmppi->ram, DEVICE(obj)); diff --git a/hw/tpm/tpm_spapr.c b/hw/tpm/tpm_spapr.c index dea7b1333b8..e084e987e6e 100644 --- a/hw/tpm/tpm_spapr.c +++ b/hw/tpm/tpm_spapr.c @@ -353,7 +353,7 @@ static const VMStateDescription vmstate_spapr_vtpm = { .name = "tpm-spapr", .pre_save = tpm_spapr_pre_save, .post_load = tpm_spapr_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SPAPR_VIO(vdev, SpaprTpmState), VMSTATE_UINT8(state, SpaprTpmState), diff --git a/hw/tpm/tpm_tis_common.c b/hw/tpm/tpm_tis_common.c index 279ce436b54..1bfa28bfd95 100644 --- a/hw/tpm/tpm_tis_common.c +++ b/hw/tpm/tpm_tis_common.c @@ -879,7 +879,7 @@ int tpm_tis_pre_save(TPMState *s) const VMStateDescription vmstate_locty = { .name = "tpm-tis/locty", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(state, TPMLocality), VMSTATE_UINT32(inte, TPMLocality), VMSTATE_UINT32(ints, TPMLocality), diff --git a/hw/tpm/tpm_tis_i2c.c b/hw/tpm/tpm_tis_i2c.c index 4ecea7fa3e9..4bb09655b40 100644 --- a/hw/tpm/tpm_tis_i2c.c +++ b/hw/tpm/tpm_tis_i2c.c @@ -115,7 +115,7 @@ static const VMStateDescription vmstate_tpm_tis_i2c = { .version_id = 0, .pre_save = tpm_tis_i2c_pre_save, .post_load = tpm_tis_i2c_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(state.buffer, TPMStateI2C), VMSTATE_UINT16(state.rw_offset, TPMStateI2C), VMSTATE_UINT8(state.active_locty, TPMStateI2C), diff --git a/hw/tpm/tpm_tis_isa.c b/hw/tpm/tpm_tis_isa.c index 0367401586e..8887b3c9c49 100644 --- a/hw/tpm/tpm_tis_isa.c +++ b/hw/tpm/tpm_tis_isa.c @@ -53,7 +53,7 @@ static const VMStateDescription vmstate_tpm_tis_isa = { .name = "tpm-tis", .version_id = 0, .pre_save = tpm_tis_pre_save_isa, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(state.buffer, TPMStateISA), VMSTATE_UINT16(state.rw_offset, TPMStateISA), VMSTATE_UINT8(state.active_locty, TPMStateISA), diff --git a/hw/tpm/tpm_tis_sysbus.c b/hw/tpm/tpm_tis_sysbus.c index 2fc550f1197..941f7f7f62c 100644 --- a/hw/tpm/tpm_tis_sysbus.c +++ b/hw/tpm/tpm_tis_sysbus.c @@ -52,7 +52,7 @@ static const VMStateDescription vmstate_tpm_tis_sysbus = { .name = "tpm-tis", .version_id = 0, .pre_save = tpm_tis_pre_save_sysbus, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(state.buffer, TPMStateSysBus), VMSTATE_UINT16(state.rw_offset, TPMStateSysBus), VMSTATE_UINT8(state.active_locty, TPMStateSysBus), diff --git a/hw/tricore/tricore_testboard.c b/hw/tricore/tricore_testboard.c index b6810e3be05..c29db8b451c 100644 --- a/hw/tricore/tricore_testboard.c +++ b/hw/tricore/tricore_testboard.c @@ -89,9 +89,7 @@ static void tricore_testboard_init(MachineState *machine, int board_id) memory_region_add_subregion(sysmem, 0xf0050000, pcp_data); memory_region_add_subregion(sysmem, 0xf0060000, pcp_text); - test_dev = g_new(TriCoreTestDeviceState, 1); - object_initialize(test_dev, sizeof(TriCoreTestDeviceState), - TYPE_TRICORE_TESTDEVICE); + test_dev = TRICORE_TESTDEVICE(qdev_new(TYPE_TRICORE_TESTDEVICE)); memory_region_add_subregion(sysmem, 0xf0000000, &test_dev->iomem); diff --git a/hw/usb/Kconfig b/hw/usb/Kconfig index 0f486764ed6..f569ed7eeaa 100644 --- a/hw/usb/Kconfig +++ b/hw/usb/Kconfig @@ -11,6 +11,10 @@ config USB_OHCI bool select USB +config USB_OHCI_SYSBUS + bool + select USB_OHCI + config USB_OHCI_PCI bool default y if PCI_DEVICES diff --git a/hw/usb/bus.c b/hw/usb/bus.c index 4d4c6719136..bfab2807d75 100644 --- a/hw/usb/bus.c +++ b/hw/usb/bus.c @@ -69,7 +69,7 @@ const VMStateDescription vmstate_usb_device = { .version_id = 1, .minimum_version_id = 1, .post_load = usb_device_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(addr, USBDevice), VMSTATE_INT32(state, USBDevice), VMSTATE_INT32(remote_wakeup, USBDevice), @@ -100,19 +100,6 @@ void usb_bus_release(USBBus *bus) QTAILQ_REMOVE(&busses, bus, next); } -USBBus *usb_bus_find(int busnr) -{ - USBBus *bus; - - if (-1 == busnr) - return QTAILQ_FIRST(&busses); - QTAILQ_FOREACH(bus, &busses, next) { - if (bus->busnr == busnr) - return bus; - } - return NULL; -} - static void usb_device_realize(USBDevice *dev, Error **errp) { USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); @@ -330,29 +317,6 @@ void usb_legacy_register(const char *typename, const char *usbdevice_name, } } -USBDevice *usb_new(const char *name) -{ - return USB_DEVICE(qdev_new(name)); -} - -static USBDevice *usb_try_new(const char *name) -{ - return USB_DEVICE(qdev_try_new(name)); -} - -bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **errp) -{ - return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); -} - -USBDevice *usb_create_simple(USBBus *bus, const char *name) -{ - USBDevice *dev = usb_new(name); - - usb_realize_and_unref(dev, bus, &error_abort); - return dev; -} - static void usb_fill_port(USBPort *port, void *opaque, int index, USBPortOps *ops, int speedmask) { @@ -667,7 +631,7 @@ HumanReadableText *qmp_x_query_usb(Error **errp) /* handle legacy -usbdevice cmd line option */ USBDevice *usbdevice_create(const char *driver) { - USBBus *bus = usb_bus_find(-1 /* any */); + USBBus *bus = QTAILQ_FIRST(&busses); LegacyUSBFactory *f = NULL; Error *err = NULL; GSList *i; diff --git a/hw/usb/ccid-card-passthru.c b/hw/usb/ccid-card-passthru.c index 07ee42f304f..a5157039042 100644 --- a/hw/usb/ccid-card-passthru.c +++ b/hw/usb/ccid-card-passthru.c @@ -378,7 +378,7 @@ static const VMStateDescription passthru_vmstate = { .name = "ccid-card-passthru", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(vscard_in_data, PassthruState), VMSTATE_UINT32(vscard_in_pos, PassthruState), VMSTATE_UINT32(vscard_in_hdr, PassthruState), diff --git a/hw/usb/dev-audio.c b/hw/usb/dev-audio.c index d5ac1f8962e..1897fff9e6f 100644 --- a/hw/usb/dev-audio.c +++ b/hw/usb/dev-audio.c @@ -124,7 +124,6 @@ static const USBDescIface desc_iface[] = { .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_AUDIO, .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL, - .bInterfaceProtocol = 0x04, .iInterface = STRING_USBAUDIO_CONTROL, .ndesc = 4, .descs = (USBDescOther[]) { @@ -282,7 +281,6 @@ static const USBDescIface desc_iface_multi[] = { .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_AUDIO, .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL, - .bInterfaceProtocol = 0x04, .iInterface = STRING_USBAUDIO_CONTROL, .ndesc = 4, .descs = (USBDescOther[]) { @@ -293,7 +291,7 @@ static const USBDescIface desc_iface_multi[] = { USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ DST_AC_HEADER, /* u8 bDescriptorSubtype */ U16(0x0100), /* u16 bcdADC */ - U16(0x38), /* u16 wTotalLength */ + U16(0x37), /* u16 wTotalLength */ 0x01, /* u8 bInCollection */ 0x01, /* u8 baInterfaceNr */ } diff --git a/hw/usb/dev-hid.c b/hw/usb/dev-hid.c index bdd6d1ffafe..9e358c934ef 100644 --- a/hw/usb/dev-hid.c +++ b/hw/usb/dev-hid.c @@ -756,7 +756,7 @@ static const VMStateDescription vmstate_usb_ptr = { .version_id = 1, .minimum_version_id = 1, .post_load = usb_ptr_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, USBHIDState), VMSTATE_HID_POINTER_DEVICE(hid, USBHIDState), VMSTATE_END_OF_LIST() @@ -767,7 +767,7 @@ static const VMStateDescription vmstate_usb_kbd = { .name = "usb-kbd", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, USBHIDState), VMSTATE_HID_KEYBOARD_DEVICE(hid, USBHIDState), VMSTATE_END_OF_LIST() diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c index 5703e0e826e..06e9537d035 100644 --- a/hw/usb/dev-hub.c +++ b/hw/usb/dev-hub.c @@ -623,7 +623,7 @@ static const VMStateDescription vmstate_usb_hub_port = { .name = "usb-hub-port", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(wPortStatus, USBHubPort), VMSTATE_UINT16(wPortChange, USBHubPort), VMSTATE_END_OF_LIST() @@ -642,7 +642,7 @@ static const VMStateDescription vmstate_usb_hub_port_timer = { .version_id = 1, .minimum_version_id = 1, .needed = usb_hub_port_timer_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(port_timer, USBHubState), VMSTATE_END_OF_LIST() }, @@ -652,13 +652,13 @@ static const VMStateDescription vmstate_usb_hub = { .name = "usb-hub", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, USBHubState), VMSTATE_STRUCT_ARRAY(ports, USBHubState, MAX_PORTS, 0, vmstate_usb_hub_port, USBHubPort), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_usb_hub_port_timer, NULL } diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c index 1cac1cd4350..7e4a0765ae6 100644 --- a/hw/usb/dev-mtp.c +++ b/hw/usb/dev-mtp.c @@ -2072,7 +2072,7 @@ static const VMStateDescription vmstate_usb_mtp = { .unmigratable = 1, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, MTPState), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c index be0a4fc3bc4..c0d63e04251 100644 --- a/hw/usb/dev-smartcard-reader.c +++ b/hw/usb/dev-smartcard-reader.c @@ -1367,7 +1367,7 @@ static const VMStateDescription bulk_in_vmstate = { .name = "CCID BulkIn state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(data, BulkIn), VMSTATE_UINT32(len, BulkIn), VMSTATE_UINT32(pos, BulkIn), @@ -1379,7 +1379,7 @@ static const VMStateDescription answer_vmstate = { .name = "CCID Answer state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(slot, Answer), VMSTATE_UINT8(seq, Answer), VMSTATE_END_OF_LIST() @@ -1390,7 +1390,7 @@ static const VMStateDescription usb_device_vmstate = { .name = "usb_device", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(addr, USBDevice), VMSTATE_BUFFER(setup_buf, USBDevice), VMSTATE_BUFFER(data_buf, USBDevice), @@ -1404,7 +1404,7 @@ static const VMStateDescription ccid_vmstate = { .minimum_version_id = 1, .post_load = ccid_post_load, .pre_save = ccid_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(dev, USBCCIDState, 1, usb_device_vmstate, USBDevice), VMSTATE_UINT8(debug, USBCCIDState), VMSTATE_BUFFER(bulk_out_data, USBCCIDState), diff --git a/hw/usb/dev-storage-classic.c b/hw/usb/dev-storage-classic.c index 84d19752b55..6147387dc61 100644 --- a/hw/usb/dev-storage-classic.c +++ b/hw/usb/dev-storage-classic.c @@ -38,15 +38,6 @@ static void usb_msd_storage_realize(USBDevice *dev, Error **errp) return; } - if (!blkconf_blocksizes(&s->conf, errp)) { - return; - } - - if (!blkconf_apply_backend_options(&s->conf, !blk_supports_write_perm(blk), - true, errp)) { - return; - } - /* * Hack alert: this pretends to be a block device, but it's really * a SCSI bus that can serve only a single device, which it @@ -67,10 +58,7 @@ static void usb_msd_storage_realize(USBDevice *dev, Error **errp) scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &usb_msd_scsi_info_storage); scsi_dev = scsi_bus_legacy_add_drive(&s->bus, blk, 0, !!s->removable, - s->conf.bootindex, s->conf.share_rw, - s->conf.rerror, s->conf.werror, - dev->serial, - errp); + &s->conf, dev->serial, errp); blk_unref(blk); if (!scsi_dev) { return; diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c index a496c811a71..341e505bd0a 100644 --- a/hw/usb/dev-storage.c +++ b/hw/usb/dev-storage.c @@ -572,7 +572,7 @@ static const VMStateDescription vmstate_usb_msd = { .name = "usb-storage", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, MSDState), VMSTATE_UINT32(mode, MSDState), VMSTATE_UINT32(scsi_len, MSDState), diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c index f013ded91eb..1804cb67997 100644 --- a/hw/usb/dev-uas.c +++ b/hw/usb/dev-uas.c @@ -947,7 +947,7 @@ static void usb_uas_realize(USBDevice *dev, Error **errp) static const VMStateDescription vmstate_usb_uas = { .name = "usb-uas", .unmigratable = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, UASDevice), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/hcd-dwc2.c b/hw/usb/hcd-dwc2.c index a0c4e782b2a..222eef82a55 100644 --- a/hw/usb/hcd-dwc2.c +++ b/hw/usb/hcd-dwc2.c @@ -1391,7 +1391,7 @@ static const VMStateDescription vmstate_dwc2_state_packet = { .name = "dwc2/packet", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(devadr, DWC2Packet), VMSTATE_UINT32(epnum, DWC2Packet), VMSTATE_UINT32(epdir, DWC2Packet), @@ -1411,7 +1411,7 @@ const VMStateDescription vmstate_dwc2_state = { .name = "dwc2", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(glbreg, DWC2State, DWC2_GLBREG_SIZE / sizeof(uint32_t)), VMSTATE_UINT32_ARRAY(fszreg, DWC2State, diff --git a/hw/usb/hcd-dwc3.c b/hw/usb/hcd-dwc3.c index 279263489e4..09d8e25b971 100644 --- a/hw/usb/hcd-dwc3.c +++ b/hw/usb/hcd-dwc3.c @@ -648,7 +648,7 @@ static void usb_dwc3_init(Object *obj) static const VMStateDescription vmstate_usb_dwc3 = { .name = "usb-dwc3", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, USBDWC3, USB_DWC3_R_MAX), VMSTATE_UINT8(cfg.mode, USBDWC3), VMSTATE_UINT32(cfg.dwc_usb3_user, USBDWC3), diff --git a/hw/usb/hcd-ehci-pci.c b/hw/usb/hcd-ehci-pci.c index 345444a5739..3ff54edf62a 100644 --- a/hw/usb/hcd-ehci-pci.c +++ b/hw/usb/hcd-ehci-pci.c @@ -83,7 +83,7 @@ static void usb_ehci_pci_init(Object *obj) s->capsbase = 0x00; s->opregbase = 0x20; s->portscbase = 0x44; - s->portnr = NB_PORTS; + s->portnr = EHCI_PORTS; if (!dc->hotpluggable) { s->companion_enable = true; @@ -144,7 +144,7 @@ static const VMStateDescription vmstate_ehci_pci = { .name = "ehci", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pcidev, EHCIPCIState), VMSTATE_STRUCT(ehci, EHCIPCIState, 2, vmstate_ehci, EHCIState), VMSTATE_END_OF_LIST() diff --git a/hw/usb/hcd-ehci-sysbus.c b/hw/usb/hcd-ehci-sysbus.c index a12e2188489..fe1dabd0bbe 100644 --- a/hw/usb/hcd-ehci-sysbus.c +++ b/hw/usb/hcd-ehci-sysbus.c @@ -25,7 +25,7 @@ static const VMStateDescription vmstate_ehci_sysbus = { .name = "ehci-sysbus", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(ehci, EHCISysBusState, 2, vmstate_ehci, EHCIState), VMSTATE_END_OF_LIST() } @@ -88,7 +88,7 @@ static void ehci_sysbus_class_init(ObjectClass *klass, void *data) SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(klass); sec->portscbase = 0x44; - sec->portnr = NB_PORTS; + sec->portnr = EHCI_PORTS; dc->realize = usb_ehci_sysbus_realize; dc->vmsd = &vmstate_ehci_sysbus; diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c index 19b4534c20c..01864d46499 100644 --- a/hw/usb/hcd-ehci.c +++ b/hw/usb/hcd-ehci.c @@ -783,9 +783,9 @@ static void ehci_register_companion(USBBus *bus, USBPort *ports[], EHCIState *s = container_of(bus, EHCIState, bus); uint32_t i; - if (firstport + portcount > NB_PORTS) { + if (firstport + portcount > EHCI_PORTS) { error_setg(errp, "firstport must be between 0 and %u", - NB_PORTS - portcount); + EHCI_PORTS - portcount); return; } @@ -831,7 +831,7 @@ static USBDevice *ehci_find_device(EHCIState *ehci, uint8_t addr) USBPort *port; int i; - for (i = 0; i < NB_PORTS; i++) { + for (i = 0; i < EHCI_PORTS; i++) { port = &ehci->ports[i]; if (!(ehci->portsc[i] & PORTSC_PED)) { DPRINTF("Port %d not enabled\n", i); @@ -850,7 +850,7 @@ void ehci_reset(void *opaque) { EHCIState *s = opaque; int i; - USBDevice *devs[NB_PORTS]; + USBDevice *devs[EHCI_PORTS]; trace_usb_ehci_reset(); @@ -858,7 +858,7 @@ void ehci_reset(void *opaque) * Do the detach before touching portsc, so that it correctly gets send to * us or to our companion based on PORTSC_POWNER before the reset. */ - for(i = 0; i < NB_PORTS; i++) { + for(i = 0; i < EHCI_PORTS; i++) { devs[i] = s->ports[i].dev; if (devs[i] && devs[i]->attached) { usb_detach(&s->ports[i]); @@ -877,7 +877,7 @@ void ehci_reset(void *opaque) s->astate = EST_INACTIVE; s->pstate = EST_INACTIVE; - for(i = 0; i < NB_PORTS; i++) { + for(i = 0; i < EHCI_PORTS; i++) { if (s->companion_ports[i]) { s->portsc[i] = PORTSC_POWNER | PORTSC_PPOWER; } else { @@ -1086,8 +1086,9 @@ static void ehci_opreg_write(void *ptr, hwaddr addr, case CONFIGFLAG: val &= 0x1; if (val) { - for(i = 0; i < NB_PORTS; i++) + for (i = 0; i < EHCI_PORTS; i++) { handle_port_owner_write(s, i, 0); + } } break; @@ -2426,7 +2427,7 @@ static int usb_ehci_post_load(void *opaque, int version_id) EHCIState *s = opaque; int i; - for (i = 0; i < NB_PORTS; i++) { + for (i = 0; i < EHCI_PORTS; i++) { USBPort *companion = s->companion_ports[i]; if (companion == NULL) { continue; @@ -2451,7 +2452,7 @@ static void usb_ehci_vm_state_change(void *opaque, bool running, RunState state) * USB-devices which have async handled packages have a packet in the * ep queue to match the completion with. */ - if (state == RUN_STATE_RUNNING) { + if (running) { ehci_advance_async_state(ehci); } @@ -2473,7 +2474,7 @@ const VMStateDescription vmstate_ehci = { .minimum_version_id = 1, .pre_save = usb_ehci_pre_save, .post_load = usb_ehci_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* mmio registers */ VMSTATE_UINT32(usbcmd, EHCIState), VMSTATE_UINT32(usbsts, EHCIState), @@ -2508,9 +2509,9 @@ void usb_ehci_realize(EHCIState *s, DeviceState *dev, Error **errp) { int i; - if (s->portnr > NB_PORTS) { + if (s->portnr > EHCI_PORTS) { error_setg(errp, "Too many ports! Max. port number is %d.", - NB_PORTS); + EHCI_PORTS); return; } if (s->maxframes < 8 || s->maxframes > 512) { diff --git a/hw/usb/hcd-ehci.h b/hw/usb/hcd-ehci.h index 2cd821f49e4..56a1c09d1f3 100644 --- a/hw/usb/hcd-ehci.h +++ b/hw/usb/hcd-ehci.h @@ -37,7 +37,7 @@ #define MMIO_SIZE 0x1000 #define CAPA_SIZE 0x10 -#define NB_PORTS 6 /* Max. Number of downstream ports */ +#define EHCI_PORTS 6 /* Max. Number of downstream ports */ typedef struct EHCIPacket EHCIPacket; typedef struct EHCIQueue EHCIQueue; @@ -288,7 +288,7 @@ struct EHCIState { uint32_t configflag; }; }; - uint32_t portsc[NB_PORTS]; + uint32_t portsc[EHCI_PORTS]; /* * Internal states, shadow registers, etc @@ -298,8 +298,8 @@ struct EHCIState { bool working; uint32_t astate; /* Current state in asynchronous schedule */ uint32_t pstate; /* Current state in periodic schedule */ - USBPort ports[NB_PORTS]; - USBPort *companion_ports[NB_PORTS]; + USBPort ports[EHCI_PORTS]; + USBPort *companion_ports[EHCI_PORTS]; uint32_t usbsts_pending; uint32_t usbsts_frindex; EHCIQueueHead aqueues; diff --git a/hw/usb/hcd-ohci-pci.c b/hw/usb/hcd-ohci-pci.c index 6b630d35a7f..33ed9b6f5a5 100644 --- a/hw/usb/hcd-ohci-pci.c +++ b/hw/usb/hcd-ohci-pci.c @@ -120,7 +120,7 @@ static const VMStateDescription vmstate_ohci = { .name = "ohci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, OHCIPCIState), VMSTATE_STRUCT(state, OHCIPCIState, 1, vmstate_ohci_state, OHCIState), VMSTATE_END_OF_LIST() diff --git a/hw/usb/hcd-ohci-sysbus.c b/hw/usb/hcd-ohci-sysbus.c new file mode 100644 index 00000000000..6fba7f50f87 --- /dev/null +++ b/hw/usb/hcd-ohci-sysbus.c @@ -0,0 +1,88 @@ +/* + * QEMU USB OHCI Emulation + * Copyright (c) 2006 Openedhand Ltd. + * Copyright (c) 2010 CodeSourcery + * Copyright (c) 2024 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "hw/sysbus.h" +#include "hw/qdev-dma.h" +#include "hw/qdev-properties.h" +#include "trace.h" +#include "hcd-ohci.h" + + +static void ohci_sysbus_realize(DeviceState *dev, Error **errp) +{ + OHCISysBusState *s = SYSBUS_OHCI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + Error *err = NULL; + + usb_ohci_init(&s->ohci, dev, s->num_ports, s->dma_offset, + s->masterbus, s->firstport, + &address_space_memory, ohci_sysbus_die, &err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_init_irq(sbd, &s->ohci.irq); + sysbus_init_mmio(sbd, &s->ohci.mem); +} + +static void ohci_sysbus_reset(DeviceState *dev) +{ + OHCISysBusState *s = SYSBUS_OHCI(dev); + OHCIState *ohci = &s->ohci; + + ohci_hard_reset(ohci); +} + +static Property ohci_sysbus_properties[] = { + DEFINE_PROP_STRING("masterbus", OHCISysBusState, masterbus), + DEFINE_PROP_UINT32("num-ports", OHCISysBusState, num_ports, 3), + DEFINE_PROP_UINT32("firstport", OHCISysBusState, firstport, 0), + DEFINE_PROP_DMAADDR("dma-offset", OHCISysBusState, dma_offset, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ohci_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = ohci_sysbus_realize; + set_bit(DEVICE_CATEGORY_USB, dc->categories); + dc->desc = "OHCI USB Controller"; + device_class_set_props(dc, ohci_sysbus_properties); + dc->reset = ohci_sysbus_reset; +} + +static const TypeInfo ohci_sysbus_types[] = { + { + .name = TYPE_SYSBUS_OHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(OHCISysBusState), + .class_init = ohci_sysbus_class_init, + }, +}; + +DEFINE_TYPES(ohci_sysbus_types); diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 7ff1b65ced1..fc8fc91a1d1 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -1955,36 +1955,11 @@ void ohci_sysbus_die(struct OHCIState *ohci) ohci_bus_stop(ohci); } -static void ohci_realize_pxa(DeviceState *dev, Error **errp) -{ - OHCISysBusState *s = SYSBUS_OHCI(dev); - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - Error *err = NULL; - - usb_ohci_init(&s->ohci, dev, s->num_ports, s->dma_offset, - s->masterbus, s->firstport, - &address_space_memory, ohci_sysbus_die, &err); - if (err) { - error_propagate(errp, err); - return; - } - sysbus_init_irq(sbd, &s->ohci.irq); - sysbus_init_mmio(sbd, &s->ohci.mem); -} - -static void usb_ohci_reset_sysbus(DeviceState *dev) -{ - OHCISysBusState *s = SYSBUS_OHCI(dev); - OHCIState *ohci = &s->ohci; - - ohci_hard_reset(ohci); -} - static const VMStateDescription vmstate_ohci_state_port = { .name = "ohci-core/port", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctrl, OHCIPort), VMSTATE_END_OF_LIST() }, @@ -2002,7 +1977,7 @@ static const VMStateDescription vmstate_ohci_eof_timer = { .version_id = 1, .minimum_version_id = 1, .needed = ohci_eof_timer_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(eof_timer, OHCIState), VMSTATE_END_OF_LIST() }, @@ -2012,7 +1987,7 @@ const VMStateDescription vmstate_ohci_state = { .name = "ohci-core", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(sof_time, OHCIState), VMSTATE_UINT32(ctl, OHCIState), VMSTATE_UINT32(status, OHCIState), @@ -2049,41 +2024,8 @@ const VMStateDescription vmstate_ohci_state = { VMSTATE_BOOL(async_complete, OHCIState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_ohci_eof_timer, NULL } }; - -static Property ohci_sysbus_properties[] = { - DEFINE_PROP_STRING("masterbus", OHCISysBusState, masterbus), - DEFINE_PROP_UINT32("num-ports", OHCISysBusState, num_ports, 3), - DEFINE_PROP_UINT32("firstport", OHCISysBusState, firstport, 0), - DEFINE_PROP_DMAADDR("dma-offset", OHCISysBusState, dma_offset, 0), - DEFINE_PROP_END_OF_LIST(), -}; - -static void ohci_sysbus_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = ohci_realize_pxa; - set_bit(DEVICE_CATEGORY_USB, dc->categories); - dc->desc = "OHCI USB Controller"; - device_class_set_props(dc, ohci_sysbus_properties); - dc->reset = usb_ohci_reset_sysbus; -} - -static const TypeInfo ohci_sysbus_info = { - .name = TYPE_SYSBUS_OHCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(OHCISysBusState), - .class_init = ohci_sysbus_class_init, -}; - -static void ohci_register_types(void) -{ - type_register_static(&ohci_sysbus_info); -} - -type_init(ohci_register_types) diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index 77baaa7a6b1..a03cf22e69f 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -322,7 +322,7 @@ static void uhci_reset(DeviceState *dev) s->fl_base_addr = 0; s->sof_timing = 64; - for(i = 0; i < NB_PORTS; i++) { + for(i = 0; i < UHCI_PORTS; i++) { port = &s->ports[i]; port->ctrl = 0x0080; if (port->port.dev && port->port.dev->attached) { @@ -339,7 +339,7 @@ static const VMStateDescription vmstate_uhci_port = { .name = "uhci port", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(ctrl, UHCIPort), VMSTATE_END_OF_LIST() } @@ -361,10 +361,10 @@ static const VMStateDescription vmstate_uhci = { .version_id = 3, .minimum_version_id = 1, .post_load = uhci_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, UHCIState), VMSTATE_UINT8_EQUAL(num_ports_vmstate, UHCIState, NULL), - VMSTATE_STRUCT_ARRAY(ports, UHCIState, NB_PORTS, 1, + VMSTATE_STRUCT_ARRAY(ports, UHCIState, UHCI_PORTS, 1, vmstate_uhci_port, UHCIPort), VMSTATE_UINT16(cmd, UHCIState), VMSTATE_UINT16(status, UHCIState), @@ -404,7 +404,7 @@ static void uhci_port_write(void *opaque, hwaddr addr, int i; /* send reset on the USB bus */ - for(i = 0; i < NB_PORTS; i++) { + for(i = 0; i < UHCI_PORTS; i++) { port = &s->ports[i]; usb_device_reset(port->port.dev); } @@ -457,8 +457,9 @@ static void uhci_port_write(void *opaque, hwaddr addr, int n; n = (addr >> 1) & 7; - if (n >= NB_PORTS) + if (n >= UHCI_PORTS) { return; + } port = &s->ports[n]; dev = port->port.dev; if (dev && dev->attached) { @@ -513,8 +514,9 @@ static uint64_t uhci_port_read(void *opaque, hwaddr addr, unsigned size) UHCIPort *port; int n; n = (addr >> 1) & 7; - if (n >= NB_PORTS) + if (n >= UHCI_PORTS) { goto read_default; + } port = &s->ports[n]; val = port->ctrl; } @@ -607,7 +609,7 @@ static USBDevice *uhci_find_device(UHCIState *s, uint8_t addr) USBDevice *dev; int i; - for (i = 0; i < NB_PORTS; i++) { + for (i = 0; i < UHCI_PORTS; i++) { UHCIPort *port = &s->ports[i]; if (!(port->ctrl & UHCI_PORT_EN)) { continue; @@ -1171,11 +1173,11 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp) s->irq = pci_allocate_irq(dev); if (s->masterbus) { - USBPort *ports[NB_PORTS]; - for(i = 0; i < NB_PORTS; i++) { + USBPort *ports[UHCI_PORTS]; + for(i = 0; i < UHCI_PORTS; i++) { ports[i] = &s->ports[i].port; } - usb_register_companion(s->masterbus, ports, NB_PORTS, + usb_register_companion(s->masterbus, ports, UHCI_PORTS, s->firstport, s, &uhci_port_ops, USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL, &err); @@ -1185,14 +1187,14 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp) } } else { usb_bus_new(&s->bus, sizeof(s->bus), &uhci_bus_ops, DEVICE(dev)); - for (i = 0; i < NB_PORTS; i++) { + for (i = 0; i < UHCI_PORTS; i++) { usb_register_port(&s->bus, &s->ports[i].port, s, i, &uhci_port_ops, USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); } } s->bh = qemu_bh_new_guarded(uhci_bh, s, &DEVICE(dev)->mem_reentrancy_guard); s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, uhci_frame_timer, s); - s->num_ports_vmstate = NB_PORTS; + s->num_ports_vmstate = UHCI_PORTS; QTAILQ_INIT(&s->queues); memory_region_init_io(&s->io_bar, OBJECT(s), &uhci_ioport_ops, s, diff --git a/hw/usb/hcd-uhci.h b/hw/usb/hcd-uhci.h index 69f8b40c49c..6d26b94e929 100644 --- a/hw/usb/hcd-uhci.h +++ b/hw/usb/hcd-uhci.h @@ -35,7 +35,7 @@ typedef struct UHCIQueue UHCIQueue; -#define NB_PORTS 2 +#define UHCI_PORTS 2 typedef struct UHCIPort { USBPort port; @@ -59,7 +59,7 @@ typedef struct UHCIState { uint32_t frame_bytes; uint32_t frame_bandwidth; bool completions_only; - UHCIPort ports[NB_PORTS]; + UHCIPort ports[UHCI_PORTS]; qemu_irq irq; /* Interrupts that should be raised at the end of the current frame. */ uint32_t pending_int_mask; diff --git a/hw/usb/hcd-xhci-pci.c b/hw/usb/hcd-xhci-pci.c index 643d4643e4d..4423983308a 100644 --- a/hw/usb/hcd-xhci-pci.c +++ b/hw/usb/hcd-xhci-pci.c @@ -178,7 +178,7 @@ static const VMStateDescription vmstate_xhci_pci = { .name = "xhci", .version_id = 1, .post_load = xhci_pci_vmstate_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, XHCIPciState), VMSTATE_MSIX(parent_obj, XHCIPciState), VMSTATE_STRUCT(xhci, XHCIPciState, 1, vmstate_xhci, XHCIState), diff --git a/hw/usb/hcd-xhci-sysbus.c b/hw/usb/hcd-xhci-sysbus.c index faf57b47975..d93bae31f93 100644 --- a/hw/usb/hcd-xhci-sysbus.c +++ b/hw/usb/hcd-xhci-sysbus.c @@ -91,7 +91,7 @@ static Property xhci_sysbus_props[] = { static const VMStateDescription vmstate_xhci_sysbus = { .name = "xhci-sysbus", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(xhci, XHCISysbusState, 1, vmstate_xhci, XHCIState), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index 4b60114207b..ad40232eb69 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -3522,7 +3522,7 @@ static int usb_xhci_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_xhci_ring = { .name = "xhci-ring", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(dequeue, XHCIRing), VMSTATE_BOOL(ccs, XHCIRing), VMSTATE_END_OF_LIST() @@ -3532,7 +3532,7 @@ static const VMStateDescription vmstate_xhci_ring = { static const VMStateDescription vmstate_xhci_port = { .name = "xhci-port", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(portsc, XHCIPort), VMSTATE_END_OF_LIST() } @@ -3541,7 +3541,7 @@ static const VMStateDescription vmstate_xhci_port = { static const VMStateDescription vmstate_xhci_slot = { .name = "xhci-slot", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(enabled, XHCISlot), VMSTATE_BOOL(addressed, XHCISlot), VMSTATE_END_OF_LIST() @@ -3551,7 +3551,7 @@ static const VMStateDescription vmstate_xhci_slot = { static const VMStateDescription vmstate_xhci_event = { .name = "xhci-event", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(type, XHCIEvent), VMSTATE_UINT32(ccode, XHCIEvent), VMSTATE_UINT64(ptr, XHCIEvent), @@ -3571,7 +3571,7 @@ static bool xhci_er_full(void *opaque, int version_id) static const VMStateDescription vmstate_xhci_intr = { .name = "xhci-intr", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* registers */ VMSTATE_UINT32(iman, XHCIInterrupter), VMSTATE_UINT32(imod, XHCIInterrupter), @@ -3604,7 +3604,7 @@ const VMStateDescription vmstate_xhci = { .name = "xhci-core", .version_id = 1, .post_load = usb_xhci_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_UINT32(ports, XHCIState, numports, 1, vmstate_xhci_port, XHCIPort), VMSTATE_STRUCT_VARRAY_UINT32(slots, XHCIState, numslots, 1, diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c index d7060a42d57..80122b41259 100644 --- a/hw/usb/host-libusb.c +++ b/hw/usb/host-libusb.c @@ -1753,7 +1753,7 @@ static const VMStateDescription vmstate_usb_host = { .version_id = 1, .minimum_version_id = 1, .post_load = usb_host_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(parent_obj, USBHostDevice), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/imx-usb-phy.c b/hw/usb/imx-usb-phy.c index 1a97b36a119..18917d7599e 100644 --- a/hw/usb/imx-usb-phy.c +++ b/hw/usb/imx-usb-phy.c @@ -20,7 +20,7 @@ static const VMStateDescription vmstate_imx_usbphy = { .name = TYPE_IMX_USBPHY, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(usbphy, IMXUSBPHYState, USBPHY_MAX), VMSTATE_END_OF_LIST() }, diff --git a/hw/usb/meson.build b/hw/usb/meson.build index e94149ebdeb..aac3bb35f27 100644 --- a/hw/usb/meson.build +++ b/hw/usb/meson.build @@ -15,9 +15,10 @@ system_ss.add(when: 'CONFIG_USB', if_true: files( system_ss.add(when: 'CONFIG_USB_UHCI', if_true: files('hcd-uhci.c')) system_ss.add(when: 'CONFIG_USB_OHCI', if_true: files('hcd-ohci.c')) system_ss.add(when: 'CONFIG_USB_OHCI_PCI', if_true: files('hcd-ohci-pci.c')) +system_ss.add(when: 'CONFIG_USB_OHCI_SYSBUS', if_true: files('hcd-ohci-sysbus.c')) system_ss.add(when: 'CONFIG_USB_EHCI', if_true: files('hcd-ehci.c')) system_ss.add(when: 'CONFIG_USB_EHCI_PCI', if_true: files('hcd-ehci-pci.c')) -system_ss.add(when: 'CONFIG_USB_EHCI_SYSBUS', if_true: files('hcd-ehci.c', 'hcd-ehci-sysbus.c')) +system_ss.add(when: 'CONFIG_USB_EHCI_SYSBUS', if_true: files('hcd-ehci-sysbus.c')) system_ss.add(when: 'CONFIG_USB_XHCI', if_true: files('hcd-xhci.c')) system_ss.add(when: 'CONFIG_USB_XHCI_PCI', if_true: files('hcd-xhci-pci.c')) system_ss.add(when: 'CONFIG_USB_XHCI_SYSBUS', if_true: files('hcd-xhci-sysbus.c')) @@ -44,7 +45,9 @@ system_ss.add(when: 'CONFIG_USB_STORAGE_UAS', if_true: files('dev-uas.c')) system_ss.add(when: 'CONFIG_USB_AUDIO', if_true: files('dev-audio.c')) system_ss.add(when: 'CONFIG_USB_SERIAL', if_true: files('dev-serial.c')) system_ss.add(when: 'CONFIG_USB_NETWORK', if_true: files('dev-network.c')) -system_ss.add(when: ['CONFIG_POSIX', 'CONFIG_USB_STORAGE_MTP'], if_true: files('dev-mtp.c')) +if host_os != 'windows' + system_ss.add(when: 'CONFIG_USB_STORAGE_MTP', if_true: files('dev-mtp.c')) +endif # smartcard system_ss.add(when: 'CONFIG_USB_SMARTCARD', if_true: files('dev-smartcard-reader.c')) @@ -58,7 +61,9 @@ endif # U2F system_ss.add(when: 'CONFIG_USB_U2F', if_true: files('u2f.c')) -system_ss.add(when: ['CONFIG_LINUX', 'CONFIG_USB_U2F'], if_true: [libudev, files('u2f-passthru.c')]) +if host_os == 'linux' + system_ss.add(when: 'CONFIG_USB_U2F', if_true: [libudev, files('u2f-passthru.c')]) +endif if u2f.found() system_ss.add(when: 'CONFIG_USB_U2F', if_true: [u2f, files('u2f-emulated.c')]) endif diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index c9893df8677..0f2dd2e5040 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -1403,7 +1403,7 @@ static void usbredir_vm_state_change(void *priv, bool running, RunState state) { USBRedirDevice *dev = priv; - if (state == RUN_STATE_RUNNING && dev->parser != NULL) { + if (running && dev->parser != NULL) { usbredirparser_do_write(dev->parser); /* Flush any pending writes */ } } @@ -2373,7 +2373,7 @@ static const VMStateDescription usbredir_bulk_receiving_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = usbredir_bulk_receiving_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(bulk_receiving_started, struct endp_data), VMSTATE_END_OF_LIST() } @@ -2391,7 +2391,7 @@ static const VMStateDescription usbredir_stream_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = usbredir_stream_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(max_streams, struct endp_data), VMSTATE_END_OF_LIST() } @@ -2401,7 +2401,7 @@ static const VMStateDescription usbredir_ep_vmstate = { .name = "usb-redir-ep", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(type, struct endp_data), VMSTATE_UINT8(interval, struct endp_data), VMSTATE_UINT8(interface, struct endp_data), @@ -2424,7 +2424,7 @@ static const VMStateDescription usbredir_ep_vmstate = { VMSTATE_INT32(bufpq_target_size, struct endp_data), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &usbredir_bulk_receiving_vmstate, &usbredir_stream_vmstate, NULL @@ -2481,7 +2481,7 @@ static const VMStateDescription usbredir_ep_packet_id_queue_vmstate = { .name = "usb-redir-packet-id-queue", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { { .name = "queue", .version_id = 0, @@ -2501,7 +2501,7 @@ static const VMStateDescription usbredir_device_info_vmstate = { .name = "usb-redir-device-info", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(speed, struct usb_redir_device_connect_header), VMSTATE_UINT8(device_class, struct usb_redir_device_connect_header), VMSTATE_UINT8(device_subclass, struct usb_redir_device_connect_header), @@ -2520,7 +2520,7 @@ static const VMStateDescription usbredir_interface_info_vmstate = { .name = "usb-redir-interface-info", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(interface_count, struct usb_redir_interface_info_header), VMSTATE_UINT8_ARRAY(interface, @@ -2543,7 +2543,7 @@ static const VMStateDescription usbredir_vmstate = { .minimum_version_id = 1, .pre_save = usbredir_pre_save, .post_load = usbredir_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, USBRedirDevice), VMSTATE_TIMER_PTR(attach_timer, USBRedirDevice), { diff --git a/hw/usb/u2f-passthru.c b/hw/usb/u2f-passthru.c index fc93429c9c0..b7025d303d0 100644 --- a/hw/usb/u2f-passthru.c +++ b/hw/usb/u2f-passthru.c @@ -512,7 +512,7 @@ static const VMStateDescription u2f_passthru_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = u2f_passthru_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_U2F_KEY(base, U2FPassthruState), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/u2f.c b/hw/usb/u2f.c index 56001249a44..1fb59cf404f 100644 --- a/hw/usb/u2f.c +++ b/hw/usb/u2f.c @@ -305,7 +305,7 @@ const VMStateDescription vmstate_u2f_key = { .name = "u2f-key", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, U2FKeyState), VMSTATE_UINT8(idle, U2FKeyState), VMSTATE_UINT8_2DARRAY(pending_in, U2FKeyState, diff --git a/hw/usb/xlnx-versal-usb2-ctrl-regs.c b/hw/usb/xlnx-versal-usb2-ctrl-regs.c index 1c094aa1a63..6fc453817ea 100644 --- a/hw/usb/xlnx-versal-usb2-ctrl-regs.c +++ b/hw/usb/xlnx-versal-usb2-ctrl-regs.c @@ -196,7 +196,7 @@ static const VMStateDescription vmstate_usb2_ctrl_regs = { .name = TYPE_XILINX_VERSAL_USB2_CTRL_REGS, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, VersalUsb2CtrlRegs, USB2_REGS_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/vfio/ap.c b/hw/vfio/ap.c index bbf69ff55ae..7c4caa59386 100644 --- a/hw/vfio/ap.c +++ b/hw/vfio/ap.c @@ -11,10 +11,12 @@ */ #include "qemu/osdep.h" +#include CONFIG_DEVICES /* CONFIG_IOMMUFD */ #include #include #include "qapi/error.h" #include "hw/vfio/vfio-common.h" +#include "sysemu/iommufd.h" #include "hw/s390x/ap-device.h" #include "qemu/error-report.h" #include "qemu/event_notifier.h" @@ -153,23 +155,15 @@ static void vfio_ap_unregister_irq_notifier(VFIOAPDevice *vapdev, static void vfio_ap_realize(DeviceState *dev, Error **errp) { + ERRP_GUARD(); int ret; Error *err = NULL; VFIOAPDevice *vapdev = VFIO_AP_DEVICE(dev); VFIODevice *vbasedev = &vapdev->vdev; - vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); - vbasedev->ops = &vfio_ap_ops; - vbasedev->type = VFIO_DEVICE_TYPE_AP; - vbasedev->dev = dev; - - /* - * vfio-ap devices operate in a way compatible with discarding of - * memory in RAM blocks, as no pages are pinned in the host. - * This needs to be set before vfio_get_device() for vfio common to - * handle ram_block_discard_disable(). - */ - vapdev->vdev.ram_block_discard_allowed = true; + if (vfio_device_get_name(vbasedev, errp) < 0) { + return; + } ret = vfio_attach_device(vbasedev->name, vbasedev, &address_space_memory, errp); @@ -204,6 +198,10 @@ static void vfio_ap_unrealize(DeviceState *dev) static Property vfio_ap_properties[] = { DEFINE_PROP_STRING("sysfsdev", VFIOAPDevice, vdev.sysfsdev), +#ifdef CONFIG_IOMMUFD + DEFINE_PROP_LINK("iommufd", VFIOAPDevice, vdev.iommufd, + TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), +#endif DEFINE_PROP_END_OF_LIST(), }; @@ -224,11 +222,36 @@ static const VMStateDescription vfio_ap_vmstate = { .unmigratable = 1, }; +static void vfio_ap_instance_init(Object *obj) +{ + VFIOAPDevice *vapdev = VFIO_AP_DEVICE(obj); + VFIODevice *vbasedev = &vapdev->vdev; + + /* + * vfio-ap devices operate in a way compatible with discarding of + * memory in RAM blocks, as no pages are pinned in the host. + * This needs to be set before vfio_get_device() for vfio common to + * handle ram_block_discard_disable(). + */ + vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_AP, &vfio_ap_ops, + DEVICE(vapdev), true); +} + +#ifdef CONFIG_IOMMUFD +static void vfio_ap_set_fd(Object *obj, const char *str, Error **errp) +{ + vfio_device_set_fd(&VFIO_AP_DEVICE(obj)->vdev, str, errp); +} +#endif + static void vfio_ap_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, vfio_ap_properties); +#ifdef CONFIG_IOMMUFD + object_class_property_add_str(klass, "fd", NULL, vfio_ap_set_fd); +#endif dc->vmsd = &vfio_ap_vmstate; dc->desc = "VFIO-based AP device assignment"; set_bit(DEVICE_CATEGORY_MISC, dc->categories); @@ -243,6 +266,7 @@ static const TypeInfo vfio_ap_info = { .name = TYPE_VFIO_AP_DEVICE, .parent = TYPE_AP_DEVICE, .instance_size = sizeof(VFIOAPDevice), + .instance_init = vfio_ap_instance_init, .class_init = vfio_ap_class_init, }; diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c index d857bb8d0fe..90e4a534371 100644 --- a/hw/vfio/ccw.c +++ b/hw/vfio/ccw.c @@ -15,12 +15,14 @@ */ #include "qemu/osdep.h" +#include CONFIG_DEVICES /* CONFIG_IOMMUFD */ #include #include #include #include "qapi/error.h" #include "hw/vfio/vfio-common.h" +#include "sysemu/iommufd.h" #include "hw/s390x/s390-ccw.h" #include "hw/s390x/vfio-ccw.h" #include "hw/qdev-properties.h" @@ -588,22 +590,9 @@ static void vfio_ccw_realize(DeviceState *dev, Error **errp) } } - vbasedev->ops = &vfio_ccw_ops; - vbasedev->type = VFIO_DEVICE_TYPE_CCW; - vbasedev->name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid, - vcdev->cdev.hostid.ssid, - vcdev->cdev.hostid.devid); - vbasedev->dev = dev; - - /* - * All vfio-ccw devices are believed to operate in a way compatible with - * discarding of memory in RAM blocks, ie. pages pinned in the host are - * in the current working set of the guest driver and therefore never - * overlap e.g., with pages available to the guest balloon driver. This - * needs to be set before vfio_get_device() for vfio common to handle - * ram_block_discard_disable(). - */ - vbasedev->ram_block_discard_allowed = true; + if (vfio_device_get_name(vbasedev, errp) < 0) { + return; + } ret = vfio_attach_device(cdev->mdevid, vbasedev, &address_space_memory, errp); @@ -677,6 +666,10 @@ static void vfio_ccw_unrealize(DeviceState *dev) static Property vfio_ccw_properties[] = { DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev), DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false), +#ifdef CONFIG_IOMMUFD + DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd, + TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), +#endif DEFINE_PROP_END_OF_LIST(), }; @@ -685,12 +678,39 @@ static const VMStateDescription vfio_ccw_vmstate = { .unmigratable = 1, }; +static void vfio_ccw_instance_init(Object *obj) +{ + VFIOCCWDevice *vcdev = VFIO_CCW(obj); + VFIODevice *vbasedev = &vcdev->vdev; + + /* + * All vfio-ccw devices are believed to operate in a way compatible with + * discarding of memory in RAM blocks, ie. pages pinned in the host are + * in the current working set of the guest driver and therefore never + * overlap e.g., with pages available to the guest balloon driver. This + * needs to be set before vfio_get_device() for vfio common to handle + * ram_block_discard_disable(). + */ + vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_CCW, &vfio_ccw_ops, + DEVICE(vcdev), true); +} + +#ifdef CONFIG_IOMMUFD +static void vfio_ccw_set_fd(Object *obj, const char *str, Error **errp) +{ + vfio_device_set_fd(&VFIO_CCW(obj)->vdev, str, errp); +} +#endif + static void vfio_ccw_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass); device_class_set_props(dc, vfio_ccw_properties); +#ifdef CONFIG_IOMMUFD + object_class_property_add_str(klass, "fd", NULL, vfio_ccw_set_fd); +#endif dc->vmsd = &vfio_ccw_vmstate; dc->desc = "VFIO-based subchannel assignment"; set_bit(DEVICE_CATEGORY_MISC, dc->categories); @@ -708,6 +728,7 @@ static const TypeInfo vfio_ccw_info = { .name = TYPE_VFIO_CCW, .parent = TYPE_S390_CCW, .instance_size = sizeof(VFIOCCWDevice), + .instance_init = vfio_ccw_instance_init, .class_init = vfio_ccw_class_init, }; diff --git a/hw/vfio/common.c b/hw/vfio/common.c index a5dfc2d27ee..011ceaab894 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -39,7 +39,6 @@ #include "sysemu/runstate.h" #include "trace.h" #include "qapi/error.h" -#include "migration/migration.h" #include "migration/misc.h" #include "migration/blocker.h" #include "migration/qemu-file.h" @@ -128,7 +127,7 @@ int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp) error_setg(&multiple_devices_migration_blocker, "Multiple VFIO devices migration is supported only if all of " "them support P2P migration"); - ret = migrate_add_blocker(&multiple_devices_migration_blocker, errp); + ret = migrate_add_blocker_normal(&multiple_devices_migration_blocker, errp); return ret; } @@ -145,19 +144,13 @@ void vfio_unblock_multiple_devices_migration(void) bool vfio_viommu_preset(VFIODevice *vbasedev) { - return vbasedev->container->space->as != &address_space_memory; + return vbasedev->bcontainer->space->as != &address_space_memory; } static void vfio_set_migration_error(int err) { - MigrationState *ms = migrate_get_current(); - - if (migration_is_setup_or_active(ms->state)) { - WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { - if (ms->to_dst_file) { - qemu_file_set_error(ms->to_dst_file, err); - } - } + if (migration_is_setup_or_active()) { + migration_file_set_error(err); } } @@ -177,17 +170,15 @@ bool vfio_device_state_is_precopy(VFIODevice *vbasedev) migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P; } -static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) +static bool vfio_devices_all_dirty_tracking(VFIOContainerBase *bcontainer) { VFIODevice *vbasedev; - MigrationState *ms = migrate_get_current(); - if (ms->state != MIGRATION_STATUS_ACTIVE && - ms->state != MIGRATION_STATUS_DEVICE) { + if (!migration_is_active() && !migration_is_device()) { return false; } - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { VFIOMigration *migration = vbasedev->migration; if (!migration) { @@ -203,11 +194,11 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) return true; } -bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container) +bool vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer) { VFIODevice *vbasedev; - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { if (!vbasedev->dirty_pages_supported) { return false; } @@ -220,15 +211,16 @@ bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container) * Check if all VFIO devices are running and migration is active, which is * essentially equivalent to the migration being in pre-copy phase. */ -bool vfio_devices_all_running_and_mig_active(VFIOContainer *container) +bool +vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer) { VFIODevice *vbasedev; - if (!migration_is_active(migrate_get_current())) { + if (!migration_is_active()) { return false; } - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { VFIOMigration *migration = vbasedev->migration; if (!migration) { @@ -292,7 +284,7 @@ static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) { VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); - VFIOContainer *container = giommu->container; + VFIOContainerBase *bcontainer = giommu->bcontainer; hwaddr iova = iotlb->iova + giommu->iommu_offset; void *vaddr; int ret; @@ -322,21 +314,22 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) * of vaddr will always be there, even if the memory object is * destroyed and its backing memory munmap-ed. */ - ret = vfio_dma_map(container, iova, - iotlb->addr_mask + 1, vaddr, - read_only); + ret = vfio_container_dma_map(bcontainer, iova, + iotlb->addr_mask + 1, vaddr, + read_only); if (ret) { - error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " + error_report("vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx", %p) = %d (%s)", - container, iova, + bcontainer, iova, iotlb->addr_mask + 1, vaddr, ret, strerror(-ret)); } } else { - ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb); + ret = vfio_container_dma_unmap(bcontainer, iova, + iotlb->addr_mask + 1, iotlb); if (ret) { - error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%s)", - container, iova, + bcontainer, iova, iotlb->addr_mask + 1, ret, strerror(-ret)); vfio_set_migration_error(ret); } @@ -350,14 +343,15 @@ static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl, { VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, listener); + VFIOContainerBase *bcontainer = vrdl->bcontainer; const hwaddr size = int128_get64(section->size); const hwaddr iova = section->offset_within_address_space; int ret; /* Unmap with a single call. */ - ret = vfio_dma_unmap(vrdl->container, iova, size , NULL); + ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL); if (ret) { - error_report("%s: vfio_dma_unmap() failed: %s", __func__, + error_report("%s: vfio_container_dma_unmap() failed: %s", __func__, strerror(-ret)); } } @@ -367,6 +361,7 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, { VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, listener); + VFIOContainerBase *bcontainer = vrdl->bcontainer; const hwaddr end = section->offset_within_region + int128_get64(section->size); hwaddr start, next, iova; @@ -385,8 +380,8 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, section->offset_within_address_space; vaddr = memory_region_get_ram_ptr(section->mr) + start; - ret = vfio_dma_map(vrdl->container, iova, next - start, - vaddr, section->readonly); + ret = vfio_container_dma_map(bcontainer, iova, next - start, + vaddr, section->readonly); if (ret) { /* Rollback */ vfio_ram_discard_notify_discard(rdl, section); @@ -396,7 +391,7 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, return 0; } -static void vfio_register_ram_discard_listener(VFIOContainer *container, +static void vfio_register_ram_discard_listener(VFIOContainerBase *bcontainer, MemoryRegionSection *section) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); @@ -409,7 +404,7 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container, g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE)); vrdl = g_new0(VFIORamDiscardListener, 1); - vrdl->container = container; + vrdl->bcontainer = bcontainer; vrdl->mr = section->mr; vrdl->offset_within_address_space = section->offset_within_address_space; vrdl->size = int128_get64(section->size); @@ -417,14 +412,14 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container, section->mr); g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity)); - g_assert(container->pgsizes && - vrdl->granularity >= 1ULL << ctz64(container->pgsizes)); + g_assert(bcontainer->pgsizes && + vrdl->granularity >= 1ULL << ctz64(bcontainer->pgsizes)); ram_discard_listener_init(&vrdl->listener, vfio_ram_discard_notify_populate, vfio_ram_discard_notify_discard, true); ram_discard_manager_register_listener(rdm, &vrdl->listener, section); - QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next); + QLIST_INSERT_HEAD(&bcontainer->vrdl_list, vrdl, next); /* * Sanity-check if we have a theoretically problematic setup where we could @@ -439,7 +434,7 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container, * number of sections in the address space we could have over time, * also consuming DMA mappings. */ - if (container->dma_max_mappings) { + if (bcontainer->dma_max_mappings) { unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512; #ifdef CONFIG_KVM @@ -448,7 +443,7 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container, } #endif - QLIST_FOREACH(vrdl, &container->vrdl_list, next) { + QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { hwaddr start, end; start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space, @@ -460,23 +455,23 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container, } if (vrdl_mappings + max_memslots - vrdl_count > - container->dma_max_mappings) { + bcontainer->dma_max_mappings) { warn_report("%s: possibly running out of DMA mappings. E.g., try" " increasing the 'block-size' of virtio-mem devies." " Maximum possible DMA mappings: %d, Maximum possible" - " memslots: %d", __func__, container->dma_max_mappings, + " memslots: %d", __func__, bcontainer->dma_max_mappings, max_memslots); } } } -static void vfio_unregister_ram_discard_listener(VFIOContainer *container, +static void vfio_unregister_ram_discard_listener(VFIOContainerBase *bcontainer, MemoryRegionSection *section) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); VFIORamDiscardListener *vrdl = NULL; - QLIST_FOREACH(vrdl, &container->vrdl_list, next) { + QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { if (vrdl->mr == section->mr && vrdl->offset_within_address_space == section->offset_within_address_space) { @@ -538,7 +533,7 @@ static bool vfio_listener_valid_section(MemoryRegionSection *section, return true; } -static bool vfio_get_section_iova_range(VFIOContainer *container, +static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer, MemoryRegionSection *section, hwaddr *out_iova, hwaddr *out_end, Int128 *out_llend) @@ -566,7 +561,8 @@ static bool vfio_get_section_iova_range(VFIOContainer *container, static void vfio_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); hwaddr iova, end; Int128 llend, llsize; void *vaddr; @@ -577,7 +573,8 @@ static void vfio_listener_region_add(MemoryListener *listener, return; } - if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) { + if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end, + &llend)) { if (memory_region_is_ram_device(section->mr)) { trace_vfio_listener_region_add_no_dma_map( memory_region_name(section->mr), @@ -588,7 +585,7 @@ static void vfio_listener_region_add(MemoryListener *listener, return; } - if (vfio_container_add_section_window(container, section, &err)) { + if (vfio_container_add_section_window(bcontainer, section, &err)) { goto fail; } @@ -610,7 +607,7 @@ static void vfio_listener_region_add(MemoryListener *listener, giommu->iommu_mr = iommu_mr; giommu->iommu_offset = section->offset_within_address_space - section->offset_within_region; - giommu->container = container; + giommu->bcontainer = bcontainer; llend = int128_add(int128_make64(section->offset_within_region), section->size); llend = int128_sub(llend, int128_one()); @@ -623,16 +620,17 @@ static void vfio_listener_region_add(MemoryListener *listener, iommu_idx); ret = memory_region_iommu_set_page_size_mask(giommu->iommu_mr, - container->pgsizes, + bcontainer->pgsizes, &err); if (ret) { g_free(giommu); goto fail; } - if (container->iova_ranges) { + if (bcontainer->iova_ranges) { ret = memory_region_iommu_set_iova_ranges(giommu->iommu_mr, - container->iova_ranges, &err); + bcontainer->iova_ranges, + &err); if (ret) { g_free(giommu); goto fail; @@ -645,7 +643,7 @@ static void vfio_listener_region_add(MemoryListener *listener, g_free(giommu); goto fail; } - QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); + QLIST_INSERT_HEAD(&bcontainer->giommu_list, giommu, giommu_next); memory_region_iommu_replay(giommu->iommu_mr, &giommu->n); return; @@ -659,7 +657,7 @@ static void vfio_listener_region_add(MemoryListener *listener, * about changes. */ if (memory_region_has_ram_discard_manager(section->mr)) { - vfio_register_ram_discard_listener(container, section); + vfio_register_ram_discard_listener(bcontainer, section); return; } @@ -672,7 +670,7 @@ static void vfio_listener_region_add(MemoryListener *listener, llsize = int128_sub(llend, int128_make64(iova)); if (memory_region_is_ram_device(section->mr)) { - hwaddr pgmask = (1ULL << ctz64(container->pgsizes)) - 1; + hwaddr pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1; if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { trace_vfio_listener_region_add_no_dma_map( @@ -684,12 +682,12 @@ static void vfio_listener_region_add(MemoryListener *listener, } } - ret = vfio_dma_map(container, iova, int128_get64(llsize), - vaddr, section->readonly); + ret = vfio_container_dma_map(bcontainer, iova, int128_get64(llsize), + vaddr, section->readonly); if (ret) { - error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", " + error_setg(&err, "vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx", %p) = %d (%s)", - container, iova, int128_get64(llsize), vaddr, ret, + bcontainer, iova, int128_get64(llsize), vaddr, ret, strerror(-ret)); if (memory_region_is_ram_device(section->mr)) { /* Allow unexpected mappings not to be fatal for RAM devices */ @@ -711,9 +709,9 @@ static void vfio_listener_region_add(MemoryListener *listener, * can gracefully fail. Runtime, there's not much we can do other * than throw a hardware error. */ - if (!container->initialized) { - if (!container->error) { - error_propagate_prepend(&container->error, err, + if (!bcontainer->initialized) { + if (!bcontainer->error) { + error_propagate_prepend(&bcontainer->error, err, "Region %s: ", memory_region_name(section->mr)); } else { @@ -728,7 +726,8 @@ static void vfio_listener_region_add(MemoryListener *listener, static void vfio_listener_region_del(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); hwaddr iova, end; Int128 llend, llsize; int ret; @@ -741,7 +740,7 @@ static void vfio_listener_region_del(MemoryListener *listener, if (memory_region_is_iommu(section->mr)) { VFIOGuestIOMMU *giommu; - QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { + QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) { if (MEMORY_REGION(giommu->iommu_mr) == section->mr && giommu->n.start == section->offset_within_region) { memory_region_unregister_iommu_notifier(section->mr, @@ -761,7 +760,8 @@ static void vfio_listener_region_del(MemoryListener *listener, */ } - if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) { + if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end, + &llend)) { return; } @@ -772,10 +772,10 @@ static void vfio_listener_region_del(MemoryListener *listener, if (memory_region_is_ram_device(section->mr)) { hwaddr pgmask; - pgmask = (1ULL << ctz64(container->pgsizes)) - 1; + pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1; try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); } else if (memory_region_has_ram_discard_manager(section->mr)) { - vfio_unregister_ram_discard_listener(container, section); + vfio_unregister_ram_discard_listener(bcontainer, section); /* Unregistering will trigger an unmap. */ try_unmap = false; } @@ -784,27 +784,29 @@ static void vfio_listener_region_del(MemoryListener *listener, if (int128_eq(llsize, int128_2_64())) { /* The unmap ioctl doesn't accept a full 64-bit span. */ llsize = int128_rshift(llsize, 1); - ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); + ret = vfio_container_dma_unmap(bcontainer, iova, + int128_get64(llsize), NULL); if (ret) { - error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%s)", - container, iova, int128_get64(llsize), ret, + bcontainer, iova, int128_get64(llsize), ret, strerror(-ret)); } iova += int128_get64(llsize); } - ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); + ret = vfio_container_dma_unmap(bcontainer, iova, + int128_get64(llsize), NULL); if (ret) { - error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%s)", - container, iova, int128_get64(llsize), ret, + bcontainer, iova, int128_get64(llsize), ret, strerror(-ret)); } } memory_region_unref(section->mr); - vfio_container_del_section_window(container, section); + vfio_container_del_section_window(bcontainer, section); } typedef struct VFIODirtyRanges { @@ -817,13 +819,13 @@ typedef struct VFIODirtyRanges { } VFIODirtyRanges; typedef struct VFIODirtyRangesListener { - VFIOContainer *container; + VFIOContainerBase *bcontainer; VFIODirtyRanges ranges; MemoryListener listener; } VFIODirtyRangesListener; static bool vfio_section_is_vfio_pci(MemoryRegionSection *section, - VFIOContainer *container) + VFIOContainerBase *bcontainer) { VFIOPCIDevice *pcidev; VFIODevice *vbasedev; @@ -831,7 +833,7 @@ static bool vfio_section_is_vfio_pci(MemoryRegionSection *section, owner = memory_region_owner(section->mr); - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { continue; } @@ -854,7 +856,7 @@ static void vfio_dirty_tracking_update(MemoryListener *listener, hwaddr iova, end, *min, *max; if (!vfio_listener_valid_section(section, "tracking_update") || - !vfio_get_section_iova_range(dirty->container, section, + !vfio_get_section_iova_range(dirty->bcontainer, section, &iova, &end, NULL)) { return; } @@ -878,7 +880,7 @@ static void vfio_dirty_tracking_update(MemoryListener *listener, * The alternative would be an IOVATree but that has a much bigger runtime * overhead and unnecessary complexity. */ - if (vfio_section_is_vfio_pci(section, dirty->container) && + if (vfio_section_is_vfio_pci(section, dirty->bcontainer) && iova >= UINT32_MAX) { min = &range->minpci64; max = &range->maxpci64; @@ -902,7 +904,7 @@ static const MemoryListener vfio_dirty_tracking_listener = { .region_add = vfio_dirty_tracking_update, }; -static void vfio_dirty_tracking_init(VFIOContainer *container, +static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer, VFIODirtyRanges *ranges) { VFIODirtyRangesListener dirty; @@ -912,10 +914,10 @@ static void vfio_dirty_tracking_init(VFIOContainer *container, dirty.ranges.min64 = UINT64_MAX; dirty.ranges.minpci64 = UINT64_MAX; dirty.listener = vfio_dirty_tracking_listener; - dirty.container = container; + dirty.bcontainer = bcontainer; memory_listener_register(&dirty.listener, - container->space->as); + bcontainer->space->as); *ranges = dirty.ranges; @@ -927,7 +929,7 @@ static void vfio_dirty_tracking_init(VFIOContainer *container, memory_listener_unregister(&dirty.listener); } -static void vfio_devices_dma_logging_stop(VFIOContainer *container) +static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer) { uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature), sizeof(uint64_t))] = {}; @@ -938,7 +940,7 @@ static void vfio_devices_dma_logging_stop(VFIOContainer *container) feature->flags = VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP; - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { if (!vbasedev->dirty_tracking) { continue; } @@ -952,7 +954,7 @@ static void vfio_devices_dma_logging_stop(VFIOContainer *container) } static struct vfio_device_feature * -vfio_device_feature_dma_logging_start_create(VFIOContainer *container, +vfio_device_feature_dma_logging_start_create(VFIOContainerBase *bcontainer, VFIODirtyRanges *tracking) { struct vfio_device_feature *feature; @@ -989,7 +991,7 @@ vfio_device_feature_dma_logging_start_create(VFIOContainer *container, return NULL; } - control->ranges = (__u64)(uintptr_t)ranges; + control->ranges = (uintptr_t)ranges; if (tracking->max32) { ranges->iova = tracking->min32; ranges->length = (tracking->max32 - tracking->min32) + 1; @@ -1025,21 +1027,21 @@ static void vfio_device_feature_dma_logging_start_destroy( g_free(feature); } -static int vfio_devices_dma_logging_start(VFIOContainer *container) +static int vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer) { struct vfio_device_feature *feature; VFIODirtyRanges ranges; VFIODevice *vbasedev; int ret = 0; - vfio_dirty_tracking_init(container, &ranges); - feature = vfio_device_feature_dma_logging_start_create(container, + vfio_dirty_tracking_init(bcontainer, &ranges); + feature = vfio_device_feature_dma_logging_start_create(bcontainer, &ranges); if (!feature) { return -errno; } - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { if (vbasedev->dirty_tracking) { continue; } @@ -1056,7 +1058,7 @@ static int vfio_devices_dma_logging_start(VFIOContainer *container) out: if (ret) { - vfio_devices_dma_logging_stop(container); + vfio_devices_dma_logging_stop(bcontainer); } vfio_device_feature_dma_logging_start_destroy(feature); @@ -1066,13 +1068,14 @@ static int vfio_devices_dma_logging_start(VFIOContainer *container) static void vfio_listener_log_global_start(MemoryListener *listener) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); int ret; - if (vfio_devices_all_device_dirty_tracking(container)) { - ret = vfio_devices_dma_logging_start(container); + if (vfio_devices_all_device_dirty_tracking(bcontainer)) { + ret = vfio_devices_dma_logging_start(bcontainer); } else { - ret = vfio_set_dirty_page_tracking(container, true); + ret = vfio_container_set_dirty_page_tracking(bcontainer, true); } if (ret) { @@ -1084,13 +1087,14 @@ static void vfio_listener_log_global_start(MemoryListener *listener) static void vfio_listener_log_global_stop(MemoryListener *listener) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); int ret = 0; - if (vfio_devices_all_device_dirty_tracking(container)) { - vfio_devices_dma_logging_stop(container); + if (vfio_devices_all_device_dirty_tracking(bcontainer)) { + vfio_devices_dma_logging_stop(bcontainer); } else { - ret = vfio_set_dirty_page_tracking(container, false); + ret = vfio_container_set_dirty_page_tracking(bcontainer, false); } if (ret) { @@ -1105,7 +1109,7 @@ static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova, { uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) + sizeof(struct vfio_device_feature_dma_logging_report), - sizeof(__u64))] = {}; + sizeof(uint64_t))] = {}; struct vfio_device_feature *feature = (struct vfio_device_feature *)buf; struct vfio_device_feature_dma_logging_report *report = (struct vfio_device_feature_dma_logging_report *)feature->data; @@ -1113,7 +1117,7 @@ static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova, report->iova = iova; report->length = size; report->page_size = qemu_real_host_page_size(); - report->bitmap = (__u64)(uintptr_t)bitmap; + report->bitmap = (uintptr_t)bitmap; feature->argsz = sizeof(buf); feature->flags = VFIO_DEVICE_FEATURE_GET | @@ -1126,14 +1130,14 @@ static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova, return 0; } -int vfio_devices_query_dirty_bitmap(VFIOContainer *container, +int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer, VFIOBitmap *vbmap, hwaddr iova, hwaddr size) { VFIODevice *vbasedev; int ret; - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { ret = vfio_device_dma_logging_report(vbasedev, iova, size, vbmap->bitmap); if (ret) { @@ -1149,16 +1153,16 @@ int vfio_devices_query_dirty_bitmap(VFIOContainer *container, return 0; } -int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, +int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova, uint64_t size, ram_addr_t ram_addr) { bool all_device_dirty_tracking = - vfio_devices_all_device_dirty_tracking(container); + vfio_devices_all_device_dirty_tracking(bcontainer); uint64_t dirty_pages; VFIOBitmap vbmap; int ret; - if (!container->dirty_pages_supported && !all_device_dirty_tracking) { + if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) { cpu_physical_memory_set_dirty_range(ram_addr, size, tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE); @@ -1171,9 +1175,9 @@ int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, } if (all_device_dirty_tracking) { - ret = vfio_devices_query_dirty_bitmap(container, &vbmap, iova, size); + ret = vfio_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size); } else { - ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size); + ret = vfio_container_query_dirty_bitmap(bcontainer, &vbmap, iova, size); } if (ret) { @@ -1183,8 +1187,7 @@ int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, vbmap.pages); - trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size, - ram_addr, dirty_pages); + trace_vfio_get_dirty_bitmap(iova, size, vbmap.size, ram_addr, dirty_pages); out: g_free(vbmap.bitmap); @@ -1201,7 +1204,7 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) vfio_giommu_dirty_notifier *gdn = container_of(n, vfio_giommu_dirty_notifier, n); VFIOGuestIOMMU *giommu = gdn->giommu; - VFIOContainer *container = giommu->container; + VFIOContainerBase *bcontainer = giommu->bcontainer; hwaddr iova = iotlb->iova + giommu->iommu_offset; ram_addr_t translated_addr; int ret = -EINVAL; @@ -1216,12 +1219,12 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) rcu_read_lock(); if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { - ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, + ret = vfio_get_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1, translated_addr); if (ret) { error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%s)", - container, iova, iotlb->addr_mask + 1, ret, + bcontainer, iova, iotlb->addr_mask + 1, ret, strerror(-ret)); } } @@ -1246,16 +1249,17 @@ static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section, * Sync the whole mapped region (spanning multiple individual mappings) * in one go. */ - return vfio_get_dirty_bitmap(vrdl->container, iova, size, ram_addr); + return vfio_get_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr); } -static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container, - MemoryRegionSection *section) +static int +vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); VFIORamDiscardListener *vrdl = NULL; - QLIST_FOREACH(vrdl, &container->vrdl_list, next) { + QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { if (vrdl->mr == section->mr && vrdl->offset_within_address_space == section->offset_within_address_space) { @@ -1276,7 +1280,7 @@ static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container, &vrdl); } -static int vfio_sync_dirty_bitmap(VFIOContainer *container, +static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer, MemoryRegionSection *section) { ram_addr_t ram_addr; @@ -1284,7 +1288,7 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container, if (memory_region_is_iommu(section->mr)) { VFIOGuestIOMMU *giommu; - QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { + QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) { if (MEMORY_REGION(giommu->iommu_mr) == section->mr && giommu->n.start == section->offset_within_region) { Int128 llend; @@ -1308,13 +1312,13 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container, } return 0; } else if (memory_region_has_ram_discard_manager(section->mr)) { - return vfio_sync_ram_discard_listener_dirty_bitmap(container, section); + return vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section); } ram_addr = memory_region_get_ram_addr(section->mr) + section->offset_within_region; - return vfio_get_dirty_bitmap(container, + return vfio_get_dirty_bitmap(bcontainer, REAL_HOST_PAGE_ALIGN(section->offset_within_address_space), int128_get64(section->size), ram_addr); } @@ -1322,15 +1326,16 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container, static void vfio_listener_log_sync(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); int ret; if (vfio_listener_skipped_section(section)) { return; } - if (vfio_devices_all_dirty_tracking(container)) { - ret = vfio_sync_dirty_bitmap(container, section); + if (vfio_devices_all_dirty_tracking(bcontainer)) { + ret = vfio_sync_dirty_bitmap(bcontainer, section); if (ret) { error_report("vfio: Failed to sync dirty bitmap, err: %d (%s)", ret, strerror(-ret)); @@ -1449,10 +1454,13 @@ VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) void vfio_put_address_space(VFIOAddressSpace *space) { - if (QLIST_EMPTY(&space->containers)) { - QLIST_REMOVE(space, list); - g_free(space); + if (!QLIST_EMPTY(&space->containers)) { + return; } + + QLIST_REMOVE(space, list); + g_free(space); + if (QLIST_EMPTY(&vfio_address_spaces)) { qemu_unregister_reset(vfio_reset_handler, NULL); } @@ -1481,3 +1489,26 @@ struct vfio_device_info *vfio_get_device_info(int fd) return info; } + +int vfio_attach_device(char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp) +{ + const VFIOIOMMUClass *ops = + VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_LEGACY)); + + if (vbasedev->iommufd) { + ops = VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD)); + } + + assert(ops); + + return ops->attach_device(name, vbasedev, as, errp); +} + +void vfio_detach_device(VFIODevice *vbasedev) +{ + if (!vbasedev->bcontainer) { + return; + } + vbasedev->bcontainer->ops->detach_device(vbasedev); +} diff --git a/hw/vfio/container-base.c b/hw/vfio/container-base.c new file mode 100644 index 00000000000..913ae49077c --- /dev/null +++ b/hw/vfio/container-base.c @@ -0,0 +1,111 @@ +/* + * VFIO BASE CONTAINER + * + * Copyright (C) 2023 Intel Corporation. + * Copyright Red Hat, Inc. 2023 + * + * Authors: Yi Liu + * Eric Auger + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/vfio/vfio-container-base.h" + +int vfio_container_dma_map(VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + void *vaddr, bool readonly) +{ + g_assert(bcontainer->ops->dma_map); + return bcontainer->ops->dma_map(bcontainer, iova, size, vaddr, readonly); +} + +int vfio_container_dma_unmap(VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) +{ + g_assert(bcontainer->ops->dma_unmap); + return bcontainer->ops->dma_unmap(bcontainer, iova, size, iotlb); +} + +int vfio_container_add_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, + Error **errp) +{ + if (!bcontainer->ops->add_window) { + return 0; + } + + return bcontainer->ops->add_window(bcontainer, section, errp); +} + +void vfio_container_del_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) +{ + if (!bcontainer->ops->del_window) { + return; + } + + return bcontainer->ops->del_window(bcontainer, section); +} + +int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer, + bool start) +{ + if (!bcontainer->dirty_pages_supported) { + return 0; + } + + g_assert(bcontainer->ops->set_dirty_page_tracking); + return bcontainer->ops->set_dirty_page_tracking(bcontainer, start); +} + +int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, + VFIOBitmap *vbmap, + hwaddr iova, hwaddr size) +{ + g_assert(bcontainer->ops->query_dirty_bitmap); + return bcontainer->ops->query_dirty_bitmap(bcontainer, vbmap, iova, size); +} + +void vfio_container_init(VFIOContainerBase *bcontainer, VFIOAddressSpace *space, + const VFIOIOMMUClass *ops) +{ + bcontainer->ops = ops; + bcontainer->space = space; + bcontainer->error = NULL; + bcontainer->dirty_pages_supported = false; + bcontainer->dma_max_mappings = 0; + bcontainer->iova_ranges = NULL; + QLIST_INIT(&bcontainer->giommu_list); + QLIST_INIT(&bcontainer->vrdl_list); +} + +void vfio_container_destroy(VFIOContainerBase *bcontainer) +{ + VFIOGuestIOMMU *giommu, *tmp; + + QLIST_REMOVE(bcontainer, next); + + QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) { + memory_region_unregister_iommu_notifier( + MEMORY_REGION(giommu->iommu_mr), &giommu->n); + QLIST_REMOVE(giommu, giommu_next); + g_free(giommu); + } + + g_list_free_full(bcontainer->iova_ranges, g_free); +} + +static const TypeInfo types[] = { + { + .name = TYPE_VFIO_IOMMU, + .parent = TYPE_INTERFACE, + .class_size = sizeof(VFIOIOMMUClass), + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/vfio/container.c b/hw/vfio/container.c index adc3005beb7..77bdec276ec 100644 --- a/hw/vfio/container.c +++ b/hw/vfio/container.c @@ -32,7 +32,7 @@ #include "sysemu/reset.h" #include "trace.h" #include "qapi/error.h" -#include "migration/migration.h" +#include "pci.h" VFIOGroupList vfio_group_list = QLIST_HEAD_INITIALIZER(vfio_group_list); @@ -60,10 +60,11 @@ static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state) } } -static int vfio_dma_unmap_bitmap(VFIOContainer *container, +static int vfio_dma_unmap_bitmap(const VFIOContainer *container, hwaddr iova, ram_addr_t size, IOMMUTLBEntry *iotlb) { + const VFIOContainerBase *bcontainer = &container->bcontainer; struct vfio_iommu_type1_dma_unmap *unmap; struct vfio_bitmap *bitmap; VFIOBitmap vbmap; @@ -91,7 +92,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container, bitmap->size = vbmap.size; bitmap->data = (__u64 *)vbmap.bitmap; - if (vbmap.size > container->max_dirty_bitmap_size) { + if (vbmap.size > bcontainer->max_dirty_bitmap_size) { error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size); ret = -E2BIG; goto unmap_exit; @@ -115,9 +116,12 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container, /* * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 */ -int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, - ram_addr_t size, IOMMUTLBEntry *iotlb) +static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) { + const VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); struct vfio_iommu_type1_dma_unmap unmap = { .argsz = sizeof(unmap), .flags = 0, @@ -127,9 +131,9 @@ int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, bool need_dirty_sync = false; int ret; - if (iotlb && vfio_devices_all_running_and_mig_active(container)) { - if (!vfio_devices_all_device_dirty_tracking(container) && - container->dirty_pages_supported) { + if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) { + if (!vfio_devices_all_device_dirty_tracking(bcontainer) && + bcontainer->dirty_pages_supported) { return vfio_dma_unmap_bitmap(container, iova, size, iotlb); } @@ -151,8 +155,8 @@ int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, */ if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && container->iommu_type == VFIO_TYPE1v2_IOMMU) { - trace_vfio_dma_unmap_overflow_workaround(); - unmap.size -= 1ULL << ctz64(container->pgsizes); + trace_vfio_legacy_dma_unmap_overflow_workaround(); + unmap.size -= 1ULL << ctz64(bcontainer->pgsizes); continue; } error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); @@ -160,7 +164,7 @@ int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, } if (need_dirty_sync) { - ret = vfio_get_dirty_bitmap(container, iova, size, + ret = vfio_get_dirty_bitmap(bcontainer, iova, size, iotlb->translated_addr); if (ret) { return ret; @@ -170,9 +174,11 @@ int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, return 0; } -int vfio_dma_map(VFIOContainer *container, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly) +static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly) { + const VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); struct vfio_iommu_type1_dma_map map = { .argsz = sizeof(map), .flags = VFIO_DMA_MAP_FLAG_READ, @@ -191,7 +197,8 @@ int vfio_dma_map(VFIOContainer *container, hwaddr iova, * the VGA ROM space. */ if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || - (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 && + (errno == EBUSY && + vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 && ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { return 0; } @@ -200,17 +207,17 @@ int vfio_dma_map(VFIOContainer *container, hwaddr iova, return -errno; } -int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) +static int +vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer, + bool start) { + const VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); int ret; struct vfio_iommu_type1_dirty_bitmap dirty = { .argsz = sizeof(dirty), }; - if (!container->dirty_pages_supported) { - return 0; - } - if (start) { dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; } else { @@ -227,9 +234,12 @@ int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) return ret; } -int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap, - hwaddr iova, hwaddr size) +static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer, + VFIOBitmap *vbmap, + hwaddr iova, hwaddr size) { + const VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); struct vfio_iommu_type1_dirty_bitmap *dbitmap; struct vfio_iommu_type1_dirty_bitmap_get *range; int ret; @@ -296,7 +306,7 @@ bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, } static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, - VFIOContainer *container) + VFIOContainerBase *bcontainer) { struct vfio_info_cap_header *hdr; struct vfio_iommu_type1_info_cap_iova_range *cap; @@ -314,8 +324,8 @@ static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, range_set_bounds(range, cap->iova_ranges[i].start, cap->iova_ranges[i].end); - container->iova_ranges = - range_list_insert(container->iova_ranges, range); + bcontainer->iova_ranges = + range_list_insert(bcontainer->iova_ranges, range); } return true; @@ -358,10 +368,34 @@ static int vfio_get_iommu_type(VFIOContainer *container, return -EINVAL; } -static int vfio_init_container(VFIOContainer *container, int group_fd, - Error **errp) +/* + * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type + */ +static const VFIOIOMMUClass *vfio_get_iommu_class(int iommu_type, Error **errp) +{ + ObjectClass *klass = NULL; + + switch (iommu_type) { + case VFIO_TYPE1v2_IOMMU: + case VFIO_TYPE1_IOMMU: + klass = object_class_by_name(TYPE_VFIO_IOMMU_LEGACY); + break; + case VFIO_SPAPR_TCE_v2_IOMMU: + case VFIO_SPAPR_TCE_IOMMU: + klass = object_class_by_name(TYPE_VFIO_IOMMU_SPAPR); + break; + default: + g_assert_not_reached(); + }; + + return VFIO_IOMMU_CLASS(klass); +} + +static int vfio_set_iommu(VFIOContainer *container, int group_fd, + VFIOAddressSpace *space, Error **errp) { int iommu_type, ret; + const VFIOIOMMUClass *vioc; iommu_type = vfio_get_iommu_type(container, errp); if (iommu_type < 0) { @@ -390,6 +424,14 @@ static int vfio_init_container(VFIOContainer *container, int group_fd, } container->iommu_type = iommu_type; + + vioc = vfio_get_iommu_class(iommu_type, errp); + if (!vioc) { + error_setg(errp, "No available IOMMU models"); + return -EINVAL; + } + + vfio_container_init(&container->bcontainer, space, vioc); return 0; } @@ -442,6 +484,7 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container, { struct vfio_info_cap_header *hdr; struct vfio_iommu_type1_info_cap_migration *cap_mig; + VFIOContainerBase *bcontainer = &container->bcontainer; hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); if (!hdr) { @@ -456,22 +499,46 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container, * qemu_real_host_page_size to mark those dirty. */ if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) { - container->dirty_pages_supported = true; - container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; - container->dirty_pgsizes = cap_mig->pgsize_bitmap; + bcontainer->dirty_pages_supported = true; + bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; + bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap; } } -static void vfio_free_container(VFIOContainer *container) +static int vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp) { - g_list_free_full(container->iova_ranges, g_free); - g_free(container); + VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + g_autofree struct vfio_iommu_type1_info *info = NULL; + int ret; + + ret = vfio_get_iommu_info(container, &info); + if (ret) { + error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); + return ret; + } + + if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { + bcontainer->pgsizes = info->iova_pgsizes; + } else { + bcontainer->pgsizes = qemu_real_host_page_size(); + } + + if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) { + bcontainer->dma_max_mappings = 65535; + } + + vfio_get_info_iova_range(info, bcontainer); + + vfio_get_iommu_info_migration(container, info); + return 0; } static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, Error **errp) { VFIOContainer *container; + VFIOContainerBase *bcontainer; int ret, fd; VFIOAddressSpace *space; @@ -508,7 +575,8 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, * details once we know which type of IOMMU we are using. */ - QLIST_FOREACH(container, &space->containers, next) { + QLIST_FOREACH(bcontainer, &space->containers, next) { + container = container_of(bcontainer, VFIOContainer, bcontainer); if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { ret = vfio_ram_block_discard_disable(container, true); if (ret) { @@ -544,102 +612,70 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, } container = g_malloc0(sizeof(*container)); - container->space = space; container->fd = fd; - container->error = NULL; - container->dirty_pages_supported = false; - container->dma_max_mappings = 0; - container->iova_ranges = NULL; - QLIST_INIT(&container->giommu_list); - QLIST_INIT(&container->vrdl_list); - - ret = vfio_init_container(container, group->fd, errp); + bcontainer = &container->bcontainer; + + ret = vfio_set_iommu(container, group->fd, space, errp); if (ret) { goto free_container_exit; } - ret = vfio_ram_block_discard_disable(container, true); + ret = vfio_cpr_register_container(bcontainer, errp); if (ret) { - error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); goto free_container_exit; } - switch (container->iommu_type) { - case VFIO_TYPE1v2_IOMMU: - case VFIO_TYPE1_IOMMU: - { - struct vfio_iommu_type1_info *info; - - ret = vfio_get_iommu_info(container, &info); - if (ret) { - error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); - goto enable_discards_exit; - } - - if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { - container->pgsizes = info->iova_pgsizes; - } else { - container->pgsizes = qemu_real_host_page_size(); - } - - if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) { - container->dma_max_mappings = 65535; - } + ret = vfio_ram_block_discard_disable(container, true); + if (ret) { + error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); + goto unregister_container_exit; + } - vfio_get_info_iova_range(info, container); + assert(bcontainer->ops->setup); - vfio_get_iommu_info_migration(container, info); - g_free(info); - break; - } - case VFIO_SPAPR_TCE_v2_IOMMU: - case VFIO_SPAPR_TCE_IOMMU: - { - ret = vfio_spapr_container_init(container, errp); - if (ret) { - goto enable_discards_exit; - } - break; - } + ret = bcontainer->ops->setup(bcontainer, errp); + if (ret) { + goto enable_discards_exit; } vfio_kvm_device_add_group(group); QLIST_INIT(&container->group_list); - QLIST_INSERT_HEAD(&space->containers, container, next); + QLIST_INSERT_HEAD(&space->containers, bcontainer, next); group->container = container; QLIST_INSERT_HEAD(&container->group_list, group, container_next); - container->listener = vfio_memory_listener; - - memory_listener_register(&container->listener, container->space->as); + bcontainer->listener = vfio_memory_listener; + memory_listener_register(&bcontainer->listener, bcontainer->space->as); - if (container->error) { + if (bcontainer->error) { ret = -1; - error_propagate_prepend(errp, container->error, + error_propagate_prepend(errp, bcontainer->error, "memory listener initialization failed: "); goto listener_release_exit; } - container->initialized = true; + bcontainer->initialized = true; return 0; listener_release_exit: QLIST_REMOVE(group, container_next); - QLIST_REMOVE(container, next); + QLIST_REMOVE(bcontainer, next); vfio_kvm_device_del_group(group); - memory_listener_unregister(&container->listener); - if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU || - container->iommu_type == VFIO_SPAPR_TCE_IOMMU) { - vfio_spapr_container_deinit(container); + memory_listener_unregister(&bcontainer->listener); + if (bcontainer->ops->release) { + bcontainer->ops->release(bcontainer); } enable_discards_exit: vfio_ram_block_discard_disable(container, false); +unregister_container_exit: + vfio_cpr_unregister_container(bcontainer); + free_container_exit: - vfio_free_container(container); + g_free(container); close_fd_exit: close(fd); @@ -653,6 +689,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, static void vfio_disconnect_container(VFIOGroup *group) { VFIOContainer *container = group->container; + VFIOContainerBase *bcontainer = &container->bcontainer; QLIST_REMOVE(group, container_next); group->container = NULL; @@ -663,10 +700,9 @@ static void vfio_disconnect_container(VFIOGroup *group) * group. */ if (QLIST_EMPTY(&container->group_list)) { - memory_listener_unregister(&container->listener); - if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU || - container->iommu_type == VFIO_SPAPR_TCE_IOMMU) { - vfio_spapr_container_deinit(container); + memory_listener_unregister(&bcontainer->listener); + if (bcontainer->ops->release) { + bcontainer->ops->release(bcontainer); } } @@ -676,21 +712,14 @@ static void vfio_disconnect_container(VFIOGroup *group) } if (QLIST_EMPTY(&container->group_list)) { - VFIOAddressSpace *space = container->space; - VFIOGuestIOMMU *giommu, *tmp; + VFIOAddressSpace *space = bcontainer->space; - QLIST_REMOVE(container, next); - - QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { - memory_region_unregister_iommu_notifier( - MEMORY_REGION(giommu->iommu_mr), &giommu->n); - QLIST_REMOVE(giommu, giommu_next); - g_free(giommu); - } + vfio_container_destroy(bcontainer); trace_vfio_disconnect_container(container->fd); + vfio_cpr_unregister_container(bcontainer); close(container->fd); - vfio_free_container(container); + g_free(container); vfio_put_address_space(space); } @@ -698,6 +727,7 @@ static void vfio_disconnect_container(VFIOGroup *group) static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) { + ERRP_GUARD(); VFIOGroup *group; char path[32]; struct vfio_group_status status = { .argsz = sizeof(status) }; @@ -705,7 +735,7 @@ static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) QLIST_FOREACH(group, &vfio_group_list, next) { if (group->groupid == groupid) { /* Found it. Now is it already in the right context? */ - if (group->container->space->as == as) { + if (group->container->bcontainer.space->as == as) { return group; } else { error_setg(errp, "group %d used in multiple address spaces", @@ -878,13 +908,13 @@ static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp) * @name and @vbasedev->name are likely to be different depending * on the type of the device, hence the need for passing @name */ -int vfio_attach_device(char *name, VFIODevice *vbasedev, - AddressSpace *as, Error **errp) +static int vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp) { int groupid = vfio_device_groupid(vbasedev, errp); VFIODevice *vbasedev_iter; VFIOGroup *group; - VFIOContainer *container; + VFIOContainerBase *bcontainer; int ret; if (groupid < 0) { @@ -911,26 +941,214 @@ int vfio_attach_device(char *name, VFIODevice *vbasedev, return ret; } - container = group->container; - vbasedev->container = container; - QLIST_INSERT_HEAD(&container->device_list, vbasedev, container_next); + bcontainer = &group->container->bcontainer; + vbasedev->bcontainer = bcontainer; + QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); return ret; } -void vfio_detach_device(VFIODevice *vbasedev) +static void vfio_legacy_detach_device(VFIODevice *vbasedev) { VFIOGroup *group = vbasedev->group; - if (!vbasedev->container) { - return; - } - QLIST_REMOVE(vbasedev, global_next); QLIST_REMOVE(vbasedev, container_next); - vbasedev->container = NULL; + vbasedev->bcontainer = NULL; trace_vfio_detach_device(vbasedev->name, group->groupid); vfio_put_base_device(vbasedev); vfio_put_group(group); } + +static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + VFIOGroup *group; + struct vfio_pci_hot_reset_info *info = NULL; + struct vfio_pci_dependent_device *devices; + struct vfio_pci_hot_reset *reset; + int32_t *fds; + int ret, i, count; + bool multi = false; + + trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); + + if (!single) { + vfio_pci_pre_reset(vdev); + } + vdev->vbasedev.needs_reset = false; + + ret = vfio_pci_get_pci_hot_reset_info(vdev, &info); + + if (ret) { + goto out_single; + } + devices = &info->devices[0]; + + trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); + + /* Verify that we have all the groups required */ + for (i = 0; i < info->count; i++) { + PCIHostDeviceAddress host; + VFIOPCIDevice *tmp; + VFIODevice *vbasedev_iter; + + host.domain = devices[i].segment; + host.bus = devices[i].bus; + host.slot = PCI_SLOT(devices[i].devfn); + host.function = PCI_FUNC(devices[i].devfn); + + trace_vfio_pci_hot_reset_dep_devices(host.domain, + host.bus, host.slot, host.function, devices[i].group_id); + + if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { + continue; + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + if (group->groupid == devices[i].group_id) { + break; + } + } + + if (!group) { + if (!vdev->has_pm_reset) { + error_report("vfio: Cannot reset device %s, " + "depends on group %d which is not owned.", + vdev->vbasedev.name, devices[i].group_id); + } + ret = -EPERM; + goto out; + } + + /* Prep dependent devices for reset and clear our marker. */ + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (!vbasedev_iter->dev->realized || + vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { + continue; + } + tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); + if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { + if (single) { + ret = -EINVAL; + goto out_single; + } + vfio_pci_pre_reset(tmp); + tmp->vbasedev.needs_reset = false; + multi = true; + break; + } + } + } + + if (!single && !multi) { + ret = -EINVAL; + goto out_single; + } + + /* Determine how many group fds need to be passed */ + count = 0; + QLIST_FOREACH(group, &vfio_group_list, next) { + for (i = 0; i < info->count; i++) { + if (group->groupid == devices[i].group_id) { + count++; + break; + } + } + } + + reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); + reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); + fds = &reset->group_fds[0]; + + /* Fill in group fds */ + QLIST_FOREACH(group, &vfio_group_list, next) { + for (i = 0; i < info->count; i++) { + if (group->groupid == devices[i].group_id) { + fds[reset->count++] = group->fd; + break; + } + } + } + + /* Bus reset! */ + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); + g_free(reset); + if (ret) { + ret = -errno; + } + + trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, + ret ? strerror(errno) : "Success"); + +out: + /* Re-enable INTx on affected devices */ + for (i = 0; i < info->count; i++) { + PCIHostDeviceAddress host; + VFIOPCIDevice *tmp; + VFIODevice *vbasedev_iter; + + host.domain = devices[i].segment; + host.bus = devices[i].bus; + host.slot = PCI_SLOT(devices[i].devfn); + host.function = PCI_FUNC(devices[i].devfn); + + if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { + continue; + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + if (group->groupid == devices[i].group_id) { + break; + } + } + + if (!group) { + break; + } + + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (!vbasedev_iter->dev->realized || + vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { + continue; + } + tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); + if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { + vfio_pci_post_reset(tmp); + break; + } + } + } +out_single: + if (!single) { + vfio_pci_post_reset(vdev); + } + g_free(info); + + return ret; +} + +static void vfio_iommu_legacy_class_init(ObjectClass *klass, void *data) +{ + VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); + + vioc->setup = vfio_legacy_setup; + vioc->dma_map = vfio_legacy_dma_map; + vioc->dma_unmap = vfio_legacy_dma_unmap; + vioc->attach_device = vfio_legacy_attach_device; + vioc->detach_device = vfio_legacy_detach_device; + vioc->set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking; + vioc->query_dirty_bitmap = vfio_legacy_query_dirty_bitmap; + vioc->pci_hot_reset = vfio_legacy_pci_hot_reset; +}; + +static const TypeInfo types[] = { + { + .name = TYPE_VFIO_IOMMU_LEGACY, + .parent = TYPE_VFIO_IOMMU, + .class_init = vfio_iommu_legacy_class_init, + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/vfio/cpr.c b/hw/vfio/cpr.c new file mode 100644 index 00000000000..392c2dd95d1 --- /dev/null +++ b/hw/vfio/cpr.c @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2021-2024 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/vfio/vfio-common.h" +#include "migration/misc.h" +#include "qapi/error.h" +#include "sysemu/runstate.h" + +static int vfio_cpr_reboot_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, Error **errp) +{ + if (e->type == MIG_EVENT_PRECOPY_SETUP && + !runstate_check(RUN_STATE_SUSPENDED) && !vm_get_suspended()) { + + error_setg(errp, + "VFIO device only supports cpr-reboot for runstate suspended"); + + return -1; + } + return 0; +} + +int vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp) +{ + migration_add_notifier_mode(&bcontainer->cpr_reboot_notifier, + vfio_cpr_reboot_notifier, + MIG_MODE_CPR_REBOOT); + return 0; +} + +void vfio_cpr_unregister_container(VFIOContainerBase *bcontainer) +{ + migration_remove_notifier(&bcontainer->cpr_reboot_notifier); +} diff --git a/hw/vfio/display.c b/hw/vfio/display.c index 7a10fa8604a..1aa440c6634 100644 --- a/hw/vfio/display.c +++ b/hw/vfio/display.c @@ -560,7 +560,7 @@ const VMStateDescription vfio_display_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = migrate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER(ramfb, VFIODisplay, ramfb_vmstate, RAMFBState), VMSTATE_END_OF_LIST(), } diff --git a/hw/vfio/helpers.c b/hw/vfio/helpers.c index 168847e7c51..47b4096c05e 100644 --- a/hw/vfio/helpers.c +++ b/hw/vfio/helpers.c @@ -27,6 +27,7 @@ #include "trace.h" #include "qapi/error.h" #include "qemu/error-report.h" +#include "monitor/monitor.h" /* * Common VFIO interrupt disable @@ -109,6 +110,7 @@ static const char *index_to_str(VFIODevice *vbasedev, int index) int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, int action, int fd, Error **errp) { + ERRP_GUARD(); struct vfio_irq_set *irq_set; int argsz, ret = 0; const char *name; @@ -609,3 +611,58 @@ bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) return ret; } + +int vfio_device_get_name(VFIODevice *vbasedev, Error **errp) +{ + ERRP_GUARD(); + struct stat st; + + if (vbasedev->fd < 0) { + if (stat(vbasedev->sysfsdev, &st) < 0) { + error_setg_errno(errp, errno, "no such host device"); + error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev); + return -errno; + } + /* User may specify a name, e.g: VFIO platform device */ + if (!vbasedev->name) { + vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); + } + } else { + if (!vbasedev->iommufd) { + error_setg(errp, "Use FD passing only with iommufd backend"); + return -EINVAL; + } + /* + * Give a name with fd so any function printing out vbasedev->name + * will not break. + */ + if (!vbasedev->name) { + vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd); + } + } + + return 0; +} + +void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp) +{ + ERRP_GUARD(); + int fd = monitor_fd_param(monitor_cur(), str, errp); + + if (fd < 0) { + error_prepend(errp, "Could not parse remote object fd %s:", str); + return; + } + vbasedev->fd = fd; +} + +void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, + DeviceState *dev, bool ram_discard) +{ + vbasedev->type = type; + vbasedev->ops = ops; + vbasedev->dev = dev; + vbasedev->fd = -1; + + vbasedev->ram_block_discard_allowed = ram_discard; +} diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c new file mode 100644 index 00000000000..8827ffe636e --- /dev/null +++ b/hw/vfio/iommufd.c @@ -0,0 +1,645 @@ +/* + * iommufd container backend + * + * Copyright (C) 2023 Intel Corporation. + * Copyright Red Hat, Inc. 2023 + * + * Authors: Yi Liu + * Eric Auger + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include +#include +#include + +#include "hw/vfio/vfio-common.h" +#include "qemu/error-report.h" +#include "trace.h" +#include "qapi/error.h" +#include "sysemu/iommufd.h" +#include "hw/qdev-core.h" +#include "sysemu/reset.h" +#include "qemu/cutils.h" +#include "qemu/chardev_open.h" +#include "pci.h" + +static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly) +{ + const VFIOIOMMUFDContainer *container = + container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + + return iommufd_backend_map_dma(container->be, + container->ioas_id, + iova, size, vaddr, readonly); +} + +static int iommufd_cdev_unmap(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) +{ + const VFIOIOMMUFDContainer *container = + container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + + /* TODO: Handle dma_unmap_bitmap with iotlb args (migration) */ + return iommufd_backend_unmap_dma(container->be, + container->ioas_id, iova, size); +} + +static int iommufd_cdev_kvm_device_add(VFIODevice *vbasedev, Error **errp) +{ + return vfio_kvm_device_add_fd(vbasedev->fd, errp); +} + +static void iommufd_cdev_kvm_device_del(VFIODevice *vbasedev) +{ + Error *err = NULL; + + if (vfio_kvm_device_del_fd(vbasedev->fd, &err)) { + error_report_err(err); + } +} + +static int iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp) +{ + IOMMUFDBackend *iommufd = vbasedev->iommufd; + struct vfio_device_bind_iommufd bind = { + .argsz = sizeof(bind), + .flags = 0, + }; + int ret; + + ret = iommufd_backend_connect(iommufd, errp); + if (ret) { + return ret; + } + + /* + * Add device to kvm-vfio to be prepared for the tracking + * in KVM. Especially for some emulated devices, it requires + * to have kvm information in the device open. + */ + ret = iommufd_cdev_kvm_device_add(vbasedev, errp); + if (ret) { + goto err_kvm_device_add; + } + + /* Bind device to iommufd */ + bind.iommufd = iommufd->fd; + ret = ioctl(vbasedev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind); + if (ret) { + error_setg_errno(errp, errno, "error bind device fd=%d to iommufd=%d", + vbasedev->fd, bind.iommufd); + goto err_bind; + } + + vbasedev->devid = bind.out_devid; + trace_iommufd_cdev_connect_and_bind(bind.iommufd, vbasedev->name, + vbasedev->fd, vbasedev->devid); + return ret; +err_bind: + iommufd_cdev_kvm_device_del(vbasedev); +err_kvm_device_add: + iommufd_backend_disconnect(iommufd); + return ret; +} + +static void iommufd_cdev_unbind_and_disconnect(VFIODevice *vbasedev) +{ + /* Unbind is automatically conducted when device fd is closed */ + iommufd_cdev_kvm_device_del(vbasedev); + iommufd_backend_disconnect(vbasedev->iommufd); +} + +static int iommufd_cdev_getfd(const char *sysfs_path, Error **errp) +{ + ERRP_GUARD(); + long int ret = -ENOTTY; + g_autofree char *path = NULL; + g_autofree char *vfio_dev_path = NULL; + g_autofree char *vfio_path = NULL; + DIR *dir = NULL; + struct dirent *dent; + g_autofree gchar *contents = NULL; + gsize length; + int major, minor; + dev_t vfio_devt; + + path = g_strdup_printf("%s/vfio-dev", sysfs_path); + dir = opendir(path); + if (!dir) { + error_setg_errno(errp, errno, "couldn't open directory %s", path); + goto out; + } + + while ((dent = readdir(dir))) { + if (!strncmp(dent->d_name, "vfio", 4)) { + vfio_dev_path = g_strdup_printf("%s/%s/dev", path, dent->d_name); + break; + } + } + + if (!vfio_dev_path) { + error_setg(errp, "failed to find vfio-dev/vfioX/dev"); + goto out_close_dir; + } + + if (!g_file_get_contents(vfio_dev_path, &contents, &length, NULL)) { + error_setg(errp, "failed to load \"%s\"", vfio_dev_path); + goto out_close_dir; + } + + if (sscanf(contents, "%d:%d", &major, &minor) != 2) { + error_setg(errp, "failed to get major:minor for \"%s\"", vfio_dev_path); + goto out_close_dir; + } + vfio_devt = makedev(major, minor); + + vfio_path = g_strdup_printf("/dev/vfio/devices/%s", dent->d_name); + ret = open_cdev(vfio_path, vfio_devt); + if (ret < 0) { + error_setg(errp, "Failed to open %s", vfio_path); + } + + trace_iommufd_cdev_getfd(vfio_path, ret); + +out_close_dir: + closedir(dir); +out: + if (*errp) { + error_prepend(errp, VFIO_MSG_PREFIX, path); + } + + return ret; +} + +static int iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id, + Error **errp) +{ + int ret, iommufd = vbasedev->iommufd->fd; + struct vfio_device_attach_iommufd_pt attach_data = { + .argsz = sizeof(attach_data), + .flags = 0, + .pt_id = id, + }; + + /* Attach device to an IOAS or hwpt within iommufd */ + ret = ioctl(vbasedev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data); + if (ret) { + error_setg_errno(errp, errno, + "[iommufd=%d] error attach %s (%d) to id=%d", + iommufd, vbasedev->name, vbasedev->fd, id); + } else { + trace_iommufd_cdev_attach_ioas_hwpt(iommufd, vbasedev->name, + vbasedev->fd, id); + } + return ret; +} + +static int iommufd_cdev_detach_ioas_hwpt(VFIODevice *vbasedev, Error **errp) +{ + int ret, iommufd = vbasedev->iommufd->fd; + struct vfio_device_detach_iommufd_pt detach_data = { + .argsz = sizeof(detach_data), + .flags = 0, + }; + + ret = ioctl(vbasedev->fd, VFIO_DEVICE_DETACH_IOMMUFD_PT, &detach_data); + if (ret) { + error_setg_errno(errp, errno, "detach %s failed", vbasedev->name); + } else { + trace_iommufd_cdev_detach_ioas_hwpt(iommufd, vbasedev->name); + } + return ret; +} + +static int iommufd_cdev_attach_container(VFIODevice *vbasedev, + VFIOIOMMUFDContainer *container, + Error **errp) +{ + return iommufd_cdev_attach_ioas_hwpt(vbasedev, container->ioas_id, errp); +} + +static void iommufd_cdev_detach_container(VFIODevice *vbasedev, + VFIOIOMMUFDContainer *container) +{ + Error *err = NULL; + + if (iommufd_cdev_detach_ioas_hwpt(vbasedev, &err)) { + error_report_err(err); + } +} + +static void iommufd_cdev_container_destroy(VFIOIOMMUFDContainer *container) +{ + VFIOContainerBase *bcontainer = &container->bcontainer; + + if (!QLIST_EMPTY(&bcontainer->device_list)) { + return; + } + memory_listener_unregister(&bcontainer->listener); + vfio_container_destroy(bcontainer); + iommufd_backend_free_id(container->be, container->ioas_id); + g_free(container); +} + +static int iommufd_cdev_ram_block_discard_disable(bool state) +{ + /* + * We support coordinated discarding of RAM via the RamDiscardManager. + */ + return ram_block_uncoordinated_discard_disable(state); +} + +static int iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container, + uint32_t ioas_id, Error **errp) +{ + VFIOContainerBase *bcontainer = &container->bcontainer; + struct iommu_ioas_iova_ranges *info; + struct iommu_iova_range *iova_ranges; + int ret, sz, fd = container->be->fd; + + info = g_malloc0(sizeof(*info)); + info->size = sizeof(*info); + info->ioas_id = ioas_id; + + ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info); + if (ret && errno != EMSGSIZE) { + goto error; + } + + sz = info->num_iovas * sizeof(struct iommu_iova_range); + info = g_realloc(info, sizeof(*info) + sz); + info->allowed_iovas = (uintptr_t)(info + 1); + + ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info); + if (ret) { + goto error; + } + + iova_ranges = (struct iommu_iova_range *)(uintptr_t)info->allowed_iovas; + + for (int i = 0; i < info->num_iovas; i++) { + Range *range = g_new(Range, 1); + + range_set_bounds(range, iova_ranges[i].start, iova_ranges[i].last); + bcontainer->iova_ranges = + range_list_insert(bcontainer->iova_ranges, range); + } + bcontainer->pgsizes = info->out_iova_alignment; + + g_free(info); + return 0; + +error: + ret = -errno; + g_free(info); + error_setg_errno(errp, errno, "Cannot get IOVA ranges"); + return ret; +} + +static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp) +{ + VFIOContainerBase *bcontainer; + VFIOIOMMUFDContainer *container; + VFIOAddressSpace *space; + struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; + int ret, devfd; + uint32_t ioas_id; + Error *err = NULL; + const VFIOIOMMUClass *iommufd_vioc = + VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD)); + + if (vbasedev->fd < 0) { + devfd = iommufd_cdev_getfd(vbasedev->sysfsdev, errp); + if (devfd < 0) { + return devfd; + } + vbasedev->fd = devfd; + } else { + devfd = vbasedev->fd; + } + + ret = iommufd_cdev_connect_and_bind(vbasedev, errp); + if (ret) { + goto err_connect_bind; + } + + space = vfio_get_address_space(as); + + /* try to attach to an existing container in this space */ + QLIST_FOREACH(bcontainer, &space->containers, next) { + container = container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + if (bcontainer->ops != iommufd_vioc || + vbasedev->iommufd != container->be) { + continue; + } + if (iommufd_cdev_attach_container(vbasedev, container, &err)) { + const char *msg = error_get_pretty(err); + + trace_iommufd_cdev_fail_attach_existing_container(msg); + error_free(err); + err = NULL; + } else { + ret = iommufd_cdev_ram_block_discard_disable(true); + if (ret) { + error_setg(errp, + "Cannot set discarding of RAM broken (%d)", ret); + goto err_discard_disable; + } + goto found_container; + } + } + + /* Need to allocate a new dedicated container */ + ret = iommufd_backend_alloc_ioas(vbasedev->iommufd, &ioas_id, errp); + if (ret < 0) { + goto err_alloc_ioas; + } + + trace_iommufd_cdev_alloc_ioas(vbasedev->iommufd->fd, ioas_id); + + container = g_malloc0(sizeof(*container)); + container->be = vbasedev->iommufd; + container->ioas_id = ioas_id; + + bcontainer = &container->bcontainer; + vfio_container_init(bcontainer, space, iommufd_vioc); + QLIST_INSERT_HEAD(&space->containers, bcontainer, next); + + ret = iommufd_cdev_attach_container(vbasedev, container, errp); + if (ret) { + goto err_attach_container; + } + + ret = iommufd_cdev_ram_block_discard_disable(true); + if (ret) { + goto err_discard_disable; + } + + ret = iommufd_cdev_get_info_iova_range(container, ioas_id, &err); + if (ret) { + error_append_hint(&err, + "Fallback to default 64bit IOVA range and 4K page size\n"); + warn_report_err(err); + err = NULL; + bcontainer->pgsizes = qemu_real_host_page_size(); + } + + bcontainer->listener = vfio_memory_listener; + memory_listener_register(&bcontainer->listener, bcontainer->space->as); + + if (bcontainer->error) { + ret = -1; + error_propagate_prepend(errp, bcontainer->error, + "memory listener initialization failed: "); + goto err_listener_register; + } + + bcontainer->initialized = true; + +found_container: + ret = ioctl(devfd, VFIO_DEVICE_GET_INFO, &dev_info); + if (ret) { + error_setg_errno(errp, errno, "error getting device info"); + goto err_listener_register; + } + + ret = vfio_cpr_register_container(bcontainer, errp); + if (ret) { + goto err_listener_register; + } + + /* + * TODO: examine RAM_BLOCK_DISCARD stuff, should we do group level + * for discarding incompatibility check as well? + */ + if (vbasedev->ram_block_discard_allowed) { + iommufd_cdev_ram_block_discard_disable(false); + } + + vbasedev->group = 0; + vbasedev->num_irqs = dev_info.num_irqs; + vbasedev->num_regions = dev_info.num_regions; + vbasedev->flags = dev_info.flags; + vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); + vbasedev->bcontainer = bcontainer; + QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); + QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); + + trace_iommufd_cdev_device_info(vbasedev->name, devfd, vbasedev->num_irqs, + vbasedev->num_regions, vbasedev->flags); + return 0; + +err_listener_register: + iommufd_cdev_ram_block_discard_disable(false); +err_discard_disable: + iommufd_cdev_detach_container(vbasedev, container); +err_attach_container: + iommufd_cdev_container_destroy(container); +err_alloc_ioas: + vfio_put_address_space(space); + iommufd_cdev_unbind_and_disconnect(vbasedev); +err_connect_bind: + close(vbasedev->fd); + return ret; +} + +static void iommufd_cdev_detach(VFIODevice *vbasedev) +{ + VFIOContainerBase *bcontainer = vbasedev->bcontainer; + VFIOAddressSpace *space = bcontainer->space; + VFIOIOMMUFDContainer *container = container_of(bcontainer, + VFIOIOMMUFDContainer, + bcontainer); + QLIST_REMOVE(vbasedev, global_next); + QLIST_REMOVE(vbasedev, container_next); + vbasedev->bcontainer = NULL; + + if (!vbasedev->ram_block_discard_allowed) { + iommufd_cdev_ram_block_discard_disable(false); + } + + vfio_cpr_unregister_container(bcontainer); + iommufd_cdev_detach_container(vbasedev, container); + iommufd_cdev_container_destroy(container); + vfio_put_address_space(space); + + iommufd_cdev_unbind_and_disconnect(vbasedev); + close(vbasedev->fd); +} + +static VFIODevice *iommufd_cdev_pci_find_by_devid(__u32 devid) +{ + VFIODevice *vbasedev_iter; + const VFIOIOMMUClass *iommufd_vioc = + VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD)); + + QLIST_FOREACH(vbasedev_iter, &vfio_device_list, global_next) { + if (vbasedev_iter->bcontainer->ops != iommufd_vioc) { + continue; + } + if (devid == vbasedev_iter->devid) { + return vbasedev_iter; + } + } + return NULL; +} + +static VFIOPCIDevice * +iommufd_cdev_dep_get_realized_vpdev(struct vfio_pci_dependent_device *dep_dev, + VFIODevice *reset_dev) +{ + VFIODevice *vbasedev_tmp; + + if (dep_dev->devid == reset_dev->devid || + dep_dev->devid == VFIO_PCI_DEVID_OWNED) { + return NULL; + } + + vbasedev_tmp = iommufd_cdev_pci_find_by_devid(dep_dev->devid); + if (!vbasedev_tmp || !vbasedev_tmp->dev->realized || + vbasedev_tmp->type != VFIO_DEVICE_TYPE_PCI) { + return NULL; + } + + return container_of(vbasedev_tmp, VFIOPCIDevice, vbasedev); +} + +static int iommufd_cdev_pci_hot_reset(VFIODevice *vbasedev, bool single) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + struct vfio_pci_hot_reset_info *info = NULL; + struct vfio_pci_dependent_device *devices; + struct vfio_pci_hot_reset *reset; + int ret, i; + bool multi = false; + + trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); + + if (!single) { + vfio_pci_pre_reset(vdev); + } + vdev->vbasedev.needs_reset = false; + + ret = vfio_pci_get_pci_hot_reset_info(vdev, &info); + + if (ret) { + goto out_single; + } + + assert(info->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID); + + devices = &info->devices[0]; + + if (!(info->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED)) { + if (!vdev->has_pm_reset) { + for (i = 0; i < info->count; i++) { + if (devices[i].devid == VFIO_PCI_DEVID_NOT_OWNED) { + error_report("vfio: Cannot reset device %s, " + "depends on device %04x:%02x:%02x.%x " + "which is not owned.", + vdev->vbasedev.name, devices[i].segment, + devices[i].bus, PCI_SLOT(devices[i].devfn), + PCI_FUNC(devices[i].devfn)); + } + } + } + ret = -EPERM; + goto out_single; + } + + trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); + + for (i = 0; i < info->count; i++) { + VFIOPCIDevice *tmp; + + trace_iommufd_cdev_pci_hot_reset_dep_devices(devices[i].segment, + devices[i].bus, + PCI_SLOT(devices[i].devfn), + PCI_FUNC(devices[i].devfn), + devices[i].devid); + + /* + * If a VFIO cdev device is resettable, all the dependent devices + * are either bound to same iommufd or within same iommu_groups as + * one of the iommufd bound devices. + */ + assert(devices[i].devid != VFIO_PCI_DEVID_NOT_OWNED); + + tmp = iommufd_cdev_dep_get_realized_vpdev(&devices[i], &vdev->vbasedev); + if (!tmp) { + continue; + } + + if (single) { + ret = -EINVAL; + goto out_single; + } + vfio_pci_pre_reset(tmp); + tmp->vbasedev.needs_reset = false; + multi = true; + } + + if (!single && !multi) { + ret = -EINVAL; + goto out_single; + } + + /* Use zero length array for hot reset with iommufd backend */ + reset = g_malloc0(sizeof(*reset)); + reset->argsz = sizeof(*reset); + + /* Bus reset! */ + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); + g_free(reset); + if (ret) { + ret = -errno; + } + + trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, + ret ? strerror(errno) : "Success"); + + /* Re-enable INTx on affected devices */ + for (i = 0; i < info->count; i++) { + VFIOPCIDevice *tmp; + + tmp = iommufd_cdev_dep_get_realized_vpdev(&devices[i], &vdev->vbasedev); + if (!tmp) { + continue; + } + vfio_pci_post_reset(tmp); + } +out_single: + if (!single) { + vfio_pci_post_reset(vdev); + } + g_free(info); + + return ret; +} + +static void vfio_iommu_iommufd_class_init(ObjectClass *klass, void *data) +{ + VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); + + vioc->dma_map = iommufd_cdev_map; + vioc->dma_unmap = iommufd_cdev_unmap; + vioc->attach_device = iommufd_cdev_attach; + vioc->detach_device = iommufd_cdev_detach; + vioc->pci_hot_reset = iommufd_cdev_pci_hot_reset; +}; + +static const TypeInfo types[] = { + { + .name = TYPE_VFIO_IOMMU_IOMMUFD, + .parent = TYPE_VFIO_IOMMU, + .class_init = vfio_iommu_iommufd_class_init, + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/vfio/meson.build b/hw/vfio/meson.build index 2a6912c9402..bba776f75cc 100644 --- a/hw/vfio/meson.build +++ b/hw/vfio/meson.build @@ -2,9 +2,14 @@ vfio_ss = ss.source_set() vfio_ss.add(files( 'helpers.c', 'common.c', + 'container-base.c', 'container.c', - 'spapr.c', 'migration.c', + 'cpr.c', +)) +vfio_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr.c')) +vfio_ss.add(when: 'CONFIG_IOMMUFD', if_true: files( + 'iommufd.c', )) vfio_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files( 'display.c', diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c index 28d422b39f9..1149c6b3740 100644 --- a/hw/vfio/migration.c +++ b/hw/vfio/migration.c @@ -17,14 +17,12 @@ #include "sysemu/runstate.h" #include "hw/vfio/vfio-common.h" -#include "migration/migration.h" -#include "migration/options.h" +#include "migration/misc.h" #include "migration/savevm.h" #include "migration/vmstate.h" #include "migration/qemu-file.h" #include "migration/register.h" #include "migration/blocker.h" -#include "migration/misc.h" #include "qapi/error.h" #include "exec/ramlist.h" #include "exec/ram_addr.h" @@ -163,6 +161,19 @@ static int vfio_migration_set_state(VFIODevice *vbasedev, return ret; } +/* + * Some device state transitions require resetting the device if they fail. + * This function sets the device in new_state and resets the device if that + * fails. Reset is done by using ERROR as the recover state. + */ +static int +vfio_migration_set_state_or_reset(VFIODevice *vbasedev, + enum vfio_device_mig_state new_state) +{ + return vfio_migration_set_state(vbasedev, new_state, + VFIO_DEVICE_STATE_ERROR); +} + static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev, uint64_t data_size) { @@ -422,12 +433,7 @@ static void vfio_save_cleanup(void *opaque) * after migration has completed, so it won't increase downtime. */ if (migration->device_state == VFIO_DEVICE_STATE_STOP_COPY) { - /* - * If setting the device in STOP state fails, the device should be - * reset. To do so, use ERROR state as a recover state. - */ - vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOP, - VFIO_DEVICE_STATE_ERROR); + vfio_migration_set_state_or_reset(vbasedev, VFIO_DEVICE_STATE_STOP); } g_free(migration->data_buffer); @@ -497,6 +503,12 @@ static bool vfio_is_active_iterate(void *opaque) return vfio_device_state_is_precopy(vbasedev); } +/* + * Note about migration rate limiting: VFIO migration buffer size is currently + * limited to 1MB, so there is no need to check if migration rate exceeded (as + * in the worst case it will exceed by 1MB). However, if the buffer size is + * later changed to a bigger value, migration rate should be enforced here. + */ static int vfio_save_iterate(QEMUFile *f, void *opaque) { VFIODevice *vbasedev = opaque; @@ -521,11 +533,7 @@ static int vfio_save_iterate(QEMUFile *f, void *opaque) trace_vfio_save_iterate(vbasedev->name, migration->precopy_init_size, migration->precopy_dirty_size); - /* - * A VFIO device's pre-copy dirty_bytes is not guaranteed to reach zero. - * Return 1 so following handlers will not be potentially blocked. - */ - return 1; + return !migration->precopy_init_size && !migration->precopy_dirty_size; } static int vfio_save_complete_precopy(QEMUFile *f, void *opaque) @@ -699,20 +707,13 @@ static void vfio_vmstate_change_prepare(void *opaque, bool running, VFIO_DEVICE_STATE_PRE_COPY_P2P : VFIO_DEVICE_STATE_RUNNING_P2P; - /* - * If setting the device in new_state fails, the device should be reset. - * To do so, use ERROR state as a recover state. - */ - ret = vfio_migration_set_state(vbasedev, new_state, - VFIO_DEVICE_STATE_ERROR); + ret = vfio_migration_set_state_or_reset(vbasedev, new_state); if (ret) { /* * Migration should be aborted in this case, but vm_state_notify() * currently does not support reporting failures. */ - if (migrate_get_current()->to_dst_file) { - qemu_file_set_error(migrate_get_current()->to_dst_file, ret); - } + migration_file_set_error(ret); } trace_vfio_vmstate_change_prepare(vbasedev->name, running, @@ -736,47 +737,32 @@ static void vfio_vmstate_change(void *opaque, bool running, RunState state) VFIO_DEVICE_STATE_STOP; } - /* - * If setting the device in new_state fails, the device should be reset. - * To do so, use ERROR state as a recover state. - */ - ret = vfio_migration_set_state(vbasedev, new_state, - VFIO_DEVICE_STATE_ERROR); + ret = vfio_migration_set_state_or_reset(vbasedev, new_state); if (ret) { /* * Migration should be aborted in this case, but vm_state_notify() * currently does not support reporting failures. */ - if (migrate_get_current()->to_dst_file) { - qemu_file_set_error(migrate_get_current()->to_dst_file, ret); - } + migration_file_set_error(ret); } trace_vfio_vmstate_change(vbasedev->name, running, RunState_str(state), mig_state_to_str(new_state)); } -static void vfio_migration_state_notifier(Notifier *notifier, void *data) +static int vfio_migration_state_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, Error **errp) { - MigrationState *s = data; VFIOMigration *migration = container_of(notifier, VFIOMigration, migration_state); VFIODevice *vbasedev = migration->vbasedev; - trace_vfio_migration_state_notifier(vbasedev->name, - MigrationStatus_str(s->state)); + trace_vfio_migration_state_notifier(vbasedev->name, e->type); - switch (s->state) { - case MIGRATION_STATUS_CANCELLING: - case MIGRATION_STATUS_CANCELLED: - case MIGRATION_STATUS_FAILED: - /* - * If setting the device in RUNNING state fails, the device should - * be reset. To do so, use ERROR state as a recover state. - */ - vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RUNNING, - VFIO_DEVICE_STATE_ERROR); + if (e->type == MIG_EVENT_PRECOPY_FAILED) { + vfio_migration_set_state_or_reset(vbasedev, VFIO_DEVICE_STATE_RUNNING); } + return 0; } static void vfio_migration_free(VFIODevice *vbasedev) @@ -899,7 +885,7 @@ static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp) vbasedev->migration_blocker = error_copy(err); error_free(err); - return migrate_add_blocker(&vbasedev->migration_blocker, errp); + return migrate_add_blocker_normal(&vbasedev->migration_blocker, errp); } /* ---------------------------------------------------------------------- */ diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index 84b1a7b9485..496fd1ee86b 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -1538,6 +1538,7 @@ static bool is_valid_std_cap_offset(uint8_t pos) static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp) { + ERRP_GUARD(); PCIDevice *pdev = &vdev->pdev; int ret, pos; bool c8_conflict = false, d4_conflict = false; @@ -1630,6 +1631,7 @@ static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp) #define VMD_SHADOW_CAP_LEN 24 static int vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp) { + ERRP_GUARD(); uint8_t membar_phys[16]; int ret, pos = 0xE8; diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index e167bef2ad1..64780d1b793 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include CONFIG_DEVICES /* CONFIG_IOMMUFD */ #include #include @@ -42,6 +43,7 @@ #include "qapi/error.h" #include "migration/blocker.h" #include "migration/qemu-file.h" +#include "sysemu/iommufd.h" #define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug" @@ -2134,6 +2136,7 @@ static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos) static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp) { + ERRP_GUARD(); PCIDevice *pdev = &vdev->pdev; uint8_t cap_id, next, size; int ret; @@ -2376,7 +2379,7 @@ static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp) return 0; } -static void vfio_pci_pre_reset(VFIOPCIDevice *vdev) +void vfio_pci_pre_reset(VFIOPCIDevice *vdev) { PCIDevice *pdev = &vdev->pdev; uint16_t cmd; @@ -2413,7 +2416,7 @@ static void vfio_pci_pre_reset(VFIOPCIDevice *vdev) vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2); } -static void vfio_pci_post_reset(VFIOPCIDevice *vdev) +void vfio_pci_post_reset(VFIOPCIDevice *vdev) { Error *err = NULL; int nr; @@ -2437,7 +2440,7 @@ static void vfio_pci_post_reset(VFIOPCIDevice *vdev) vfio_quirk_reset(vdev); } -static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name) +bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name) { char tmp[13]; @@ -2447,22 +2450,13 @@ static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name) return (strcmp(tmp, name) == 0); } -static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) +int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev, + struct vfio_pci_hot_reset_info **info_p) { - VFIOGroup *group; struct vfio_pci_hot_reset_info *info; - struct vfio_pci_dependent_device *devices; - struct vfio_pci_hot_reset *reset; - int32_t *fds; - int ret, i, count; - bool multi = false; + int ret, count; - trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); - - if (!single) { - vfio_pci_pre_reset(vdev); - } - vdev->vbasedev.needs_reset = false; + assert(info_p && !*info_p); info = g_malloc0(sizeof(*info)); info->argsz = sizeof(*info); @@ -2470,163 +2464,36 @@ static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); if (ret && errno != ENOSPC) { ret = -errno; + g_free(info); if (!vdev->has_pm_reset) { error_report("vfio: Cannot reset device %s, " "no available reset mechanism.", vdev->vbasedev.name); } - goto out_single; + return ret; } count = info->count; - info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices))); - info->argsz = sizeof(*info) + (count * sizeof(*devices)); - devices = &info->devices[0]; + info = g_realloc(info, sizeof(*info) + (count * sizeof(info->devices[0]))); + info->argsz = sizeof(*info) + (count * sizeof(info->devices[0])); ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); if (ret) { ret = -errno; + g_free(info); error_report("vfio: hot reset info failed: %m"); - goto out_single; - } - - trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); - - /* Verify that we have all the groups required */ - for (i = 0; i < info->count; i++) { - PCIHostDeviceAddress host; - VFIOPCIDevice *tmp; - VFIODevice *vbasedev_iter; - - host.domain = devices[i].segment; - host.bus = devices[i].bus; - host.slot = PCI_SLOT(devices[i].devfn); - host.function = PCI_FUNC(devices[i].devfn); - - trace_vfio_pci_hot_reset_dep_devices(host.domain, - host.bus, host.slot, host.function, devices[i].group_id); - - if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { - continue; - } - - QLIST_FOREACH(group, &vfio_group_list, next) { - if (group->groupid == devices[i].group_id) { - break; - } - } - - if (!group) { - if (!vdev->has_pm_reset) { - error_report("vfio: Cannot reset device %s, " - "depends on group %d which is not owned.", - vdev->vbasedev.name, devices[i].group_id); - } - ret = -EPERM; - goto out; - } - - /* Prep dependent devices for reset and clear our marker. */ - QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { - if (!vbasedev_iter->dev->realized || - vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { - continue; - } - tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); - if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { - if (single) { - ret = -EINVAL; - goto out_single; - } - vfio_pci_pre_reset(tmp); - tmp->vbasedev.needs_reset = false; - multi = true; - break; - } - } - } - - if (!single && !multi) { - ret = -EINVAL; - goto out_single; - } - - /* Determine how many group fds need to be passed */ - count = 0; - QLIST_FOREACH(group, &vfio_group_list, next) { - for (i = 0; i < info->count; i++) { - if (group->groupid == devices[i].group_id) { - count++; - break; - } - } - } - - reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); - reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); - fds = &reset->group_fds[0]; - - /* Fill in group fds */ - QLIST_FOREACH(group, &vfio_group_list, next) { - for (i = 0; i < info->count; i++) { - if (group->groupid == devices[i].group_id) { - fds[reset->count++] = group->fd; - break; - } - } + return ret; } - /* Bus reset! */ - ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); - g_free(reset); - - trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, - ret ? strerror(errno) : "Success"); - -out: - /* Re-enable INTx on affected devices */ - for (i = 0; i < info->count; i++) { - PCIHostDeviceAddress host; - VFIOPCIDevice *tmp; - VFIODevice *vbasedev_iter; - - host.domain = devices[i].segment; - host.bus = devices[i].bus; - host.slot = PCI_SLOT(devices[i].devfn); - host.function = PCI_FUNC(devices[i].devfn); - - if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { - continue; - } - - QLIST_FOREACH(group, &vfio_group_list, next) { - if (group->groupid == devices[i].group_id) { - break; - } - } - - if (!group) { - break; - } + *info_p = info; + return 0; +} - QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { - if (!vbasedev_iter->dev->realized || - vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { - continue; - } - tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); - if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { - vfio_pci_post_reset(tmp); - break; - } - } - } -out_single: - if (!single) { - vfio_pci_post_reset(vdev); - } - g_free(info); +static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) +{ + VFIODevice *vbasedev = &vdev->vbasedev; + const VFIOIOMMUClass *ops = vbasedev->bcontainer->ops; - return ret; + return ops->pci_hot_reset(vbasedev, single); } /* @@ -2692,28 +2559,28 @@ static bool vfio_display_migration_needed(void *opaque) (vdev->ramfb_migrate == ON_OFF_AUTO_AUTO && vdev->enable_ramfb); } -const VMStateDescription vmstate_vfio_display = { +static const VMStateDescription vmstate_vfio_display = { .name = "VFIOPCIDevice/VFIODisplay", .version_id = 1, .minimum_version_id = 1, .needed = vfio_display_migration_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_STRUCT_POINTER(dpy, VFIOPCIDevice, vfio_display_vmstate, VFIODisplay), VMSTATE_END_OF_LIST() } }; -const VMStateDescription vmstate_vfio_pci_config = { +static const VMStateDescription vmstate_vfio_pci_config = { .name = "VFIOPCIDevice", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice), VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, vfio_msix_present), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_vfio_display, NULL } @@ -3076,21 +2943,24 @@ static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev) static void vfio_realize(PCIDevice *pdev, Error **errp) { + ERRP_GUARD(); VFIOPCIDevice *vdev = VFIO_PCI(pdev); VFIODevice *vbasedev = &vdev->vbasedev; char *tmp, *subsys; Error *err = NULL; - struct stat st; int i, ret; bool is_mdev; char uuid[UUID_STR_LEN]; char *name; - if (!vbasedev->sysfsdev) { + if (vbasedev->fd < 0 && !vbasedev->sysfsdev) { if (!(~vdev->host.domain || ~vdev->host.bus || ~vdev->host.slot || ~vdev->host.function)) { error_setg(errp, "No provided host device"); error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F " +#ifdef CONFIG_IOMMUFD + "or -device vfio-pci,fd=DEVICE_FD " +#endif "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n"); return; } @@ -3100,17 +2970,10 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) vdev->host.slot, vdev->host.function); } - if (stat(vbasedev->sysfsdev, &st) < 0) { - error_setg_errno(errp, errno, "no such host device"); - error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev); + if (vfio_device_get_name(vbasedev, errp) < 0) { return; } - vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); - vbasedev->ops = &vfio_pci_ops; - vbasedev->type = VFIO_DEVICE_TYPE_PCI; - vbasedev->dev = DEVICE(vdev); - /* * Mediated devices *might* operate compatibly with discarding of RAM, but * we cannot know for certain, it depends on whether the mdev vendor driver @@ -3458,6 +3321,7 @@ static void vfio_instance_init(Object *obj) { PCIDevice *pci_dev = PCI_DEVICE(obj); VFIOPCIDevice *vdev = VFIO_PCI(obj); + VFIODevice *vbasedev = &vdev->vbasedev; device_add_bootindex_property(obj, &vdev->bootindex, "bootindex", NULL, @@ -3467,6 +3331,9 @@ static void vfio_instance_init(Object *obj) vdev->host.slot = ~0U; vdev->host.function = ~0U; + vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PCI, &vfio_pci_ops, + DEVICE(vdev), false); + vdev->nv_gpudirect_clique = 0xFF; /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command @@ -3519,14 +3386,20 @@ static Property vfio_pci_dev_properties[] = { qdev_prop_nv_gpudirect_clique, uint8_t), DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo, OFF_AUTOPCIBAR_OFF), - /* - * TODO - support passed fds... is this necessary? - * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name), - * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name), - */ +#ifdef CONFIG_IOMMUFD + DEFINE_PROP_LINK("iommufd", VFIOPCIDevice, vbasedev.iommufd, + TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), +#endif DEFINE_PROP_END_OF_LIST(), }; +#ifdef CONFIG_IOMMUFD +static void vfio_pci_set_fd(Object *obj, const char *str, Error **errp) +{ + vfio_device_set_fd(&VFIO_PCI(obj)->vbasedev, str, errp); +} +#endif + static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -3534,6 +3407,9 @@ static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) dc->reset = vfio_pci_reset; device_class_set_props(dc, vfio_pci_dev_properties); +#ifdef CONFIG_IOMMUFD + object_class_property_add_str(klass, "fd", NULL, vfio_pci_set_fd); +#endif dc->desc = "VFIO-based PCI device assignment"; set_bit(DEVICE_CATEGORY_MISC, dc->categories); pdc->realize = vfio_realize; diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h index fba8737ab2c..6e64a2654e6 100644 --- a/hw/vfio/pci.h +++ b/hw/vfio/pci.h @@ -218,6 +218,12 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr); extern const PropertyInfo qdev_prop_nv_gpudirect_clique; +void vfio_pci_pre_reset(VFIOPCIDevice *vdev); +void vfio_pci_post_reset(VFIOPCIDevice *vdev); +bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name); +int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev, + struct vfio_pci_hot_reset_info **info_p); + int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp); int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c index 8e3d4ac4582..dcd2365fb35 100644 --- a/hw/vfio/platform.c +++ b/hw/vfio/platform.c @@ -15,11 +15,13 @@ */ #include "qemu/osdep.h" +#include CONFIG_DEVICES /* CONFIG_IOMMUFD */ #include "qapi/error.h" #include #include #include "hw/vfio/vfio-platform.h" +#include "sysemu/iommufd.h" #include "migration/vmstate.h" #include "qemu/error-report.h" #include "qemu/lockable.h" @@ -529,14 +531,13 @@ static VFIODeviceOps vfio_platform_ops = { */ static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp) { - struct stat st; int ret; - /* @sysfsdev takes precedence over @host */ - if (vbasedev->sysfsdev) { + /* @fd takes precedence over @sysfsdev which takes precedence over @host */ + if (vbasedev->fd < 0 && vbasedev->sysfsdev) { g_free(vbasedev->name); vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); - } else { + } else if (vbasedev->fd < 0) { if (!vbasedev->name || strchr(vbasedev->name, '/')) { error_setg(errp, "wrong host device name"); return -EINVAL; @@ -546,10 +547,9 @@ static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp) vbasedev->name); } - if (stat(vbasedev->sysfsdev, &st) < 0) { - error_setg_errno(errp, errno, - "failed to get the sysfs host device file status"); - return -errno; + ret = vfio_device_get_name(vbasedev, errp); + if (ret) { + return ret; } ret = vfio_attach_device(vbasedev->name, vbasedev, @@ -576,15 +576,12 @@ static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp) */ static void vfio_platform_realize(DeviceState *dev, Error **errp) { + ERRP_GUARD(); VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); VFIODevice *vbasedev = &vdev->vbasedev; int i, ret; - vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM; - vbasedev->dev = dev; - vbasedev->ops = &vfio_platform_ops; - qemu_mutex_init(&vdev->intp_mutex); trace_vfio_platform_realize(vbasedev->sysfsdev ? @@ -649,9 +646,29 @@ static Property vfio_platform_dev_properties[] = { DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice, mmap_timeout, 1100), DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true), +#ifdef CONFIG_IOMMUFD + DEFINE_PROP_LINK("iommufd", VFIOPlatformDevice, vbasedev.iommufd, + TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), +#endif DEFINE_PROP_END_OF_LIST(), }; +static void vfio_platform_instance_init(Object *obj) +{ + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(obj); + VFIODevice *vbasedev = &vdev->vbasedev; + + vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PLATFORM, &vfio_platform_ops, + DEVICE(vdev), false); +} + +#ifdef CONFIG_IOMMUFD +static void vfio_platform_set_fd(Object *obj, const char *str, Error **errp) +{ + vfio_device_set_fd(&VFIO_PLATFORM_DEVICE(obj)->vbasedev, str, errp); +} +#endif + static void vfio_platform_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -659,6 +676,9 @@ static void vfio_platform_class_init(ObjectClass *klass, void *data) dc->realize = vfio_platform_realize; device_class_set_props(dc, vfio_platform_dev_properties); +#ifdef CONFIG_IOMMUFD + object_class_property_add_str(klass, "fd", NULL, vfio_platform_set_fd); +#endif dc->vmsd = &vfio_platform_vmstate; dc->desc = "VFIO-based platform device assignment"; sbc->connect_irq_notifier = vfio_start_irqfd_injection; @@ -671,6 +691,7 @@ static const TypeInfo vfio_platform_dev_info = { .name = TYPE_VFIO_PLATFORM, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(VFIOPlatformDevice), + .instance_init = vfio_platform_instance_init, .class_init = vfio_platform_class_init, .class_size = sizeof(VFIOPlatformDeviceClass), }; diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index 83da2f7ec21..0d949bb7282 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -24,6 +24,12 @@ #include "qapi/error.h" #include "trace.h" +typedef struct VFIOSpaprContainer { + VFIOContainer container; + MemoryListener prereg_listener; + QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list; +} VFIOSpaprContainer; + static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section) { if (memory_region_is_iommu(section->mr)) { @@ -44,8 +50,10 @@ static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa) static void vfio_prereg_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, - prereg_listener); + VFIOSpaprContainer *scontainer = container_of(listener, VFIOSpaprContainer, + prereg_listener); + VFIOContainer *container = &scontainer->container; + VFIOContainerBase *bcontainer = &container->bcontainer; const hwaddr gpa = section->offset_within_address_space; hwaddr end; int ret; @@ -88,9 +96,9 @@ static void vfio_prereg_listener_region_add(MemoryListener *listener, * can gracefully fail. Runtime, there's not much we can do other * than throw a hardware error. */ - if (!container->initialized) { - if (!container->error) { - error_setg_errno(&container->error, -ret, + if (!bcontainer->initialized) { + if (!bcontainer->error) { + error_setg_errno(&bcontainer->error, -ret, "Memory registering failed"); } } else { @@ -102,8 +110,9 @@ static void vfio_prereg_listener_region_add(MemoryListener *listener, static void vfio_prereg_listener_region_del(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, - prereg_listener); + VFIOSpaprContainer *scontainer = container_of(listener, VFIOSpaprContainer, + prereg_listener); + VFIOContainer *container = &scontainer->container; const hwaddr gpa = section->offset_within_address_space; hwaddr end; int ret; @@ -146,12 +155,12 @@ static const MemoryListener vfio_prereg_listener = { .region_del = vfio_prereg_listener_region_del, }; -static void vfio_host_win_add(VFIOContainer *container, hwaddr min_iova, +static void vfio_host_win_add(VFIOSpaprContainer *scontainer, hwaddr min_iova, hwaddr max_iova, uint64_t iova_pgsizes) { VFIOHostDMAWindow *hostwin; - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) { if (ranges_overlap(hostwin->min_iova, hostwin->max_iova - hostwin->min_iova + 1, min_iova, @@ -165,15 +174,15 @@ static void vfio_host_win_add(VFIOContainer *container, hwaddr min_iova, hostwin->min_iova = min_iova; hostwin->max_iova = max_iova; hostwin->iova_pgsizes = iova_pgsizes; - QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); + QLIST_INSERT_HEAD(&scontainer->hostwin_list, hostwin, hostwin_next); } -static int vfio_host_win_del(VFIOContainer *container, +static int vfio_host_win_del(VFIOSpaprContainer *scontainer, hwaddr min_iova, hwaddr max_iova) { VFIOHostDMAWindow *hostwin; - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) { if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { QLIST_REMOVE(hostwin, hostwin_next); g_free(hostwin); @@ -184,7 +193,7 @@ static int vfio_host_win_del(VFIOContainer *container, return -1; } -static VFIOHostDMAWindow *vfio_find_hostwin(VFIOContainer *container, +static VFIOHostDMAWindow *vfio_find_hostwin(VFIOSpaprContainer *container, hwaddr iova, hwaddr end) { VFIOHostDMAWindow *hostwin; @@ -226,6 +235,7 @@ static int vfio_spapr_create_window(VFIOContainer *container, hwaddr *pgsize) { int ret = 0; + VFIOContainerBase *bcontainer = &container->bcontainer; IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr), pgmask; unsigned entries, bits_total, bits_per_level, max_levels; @@ -239,13 +249,13 @@ static int vfio_spapr_create_window(VFIOContainer *container, if (pagesize > rampagesize) { pagesize = rampagesize; } - pgmask = container->pgsizes & (pagesize | (pagesize - 1)); + pgmask = bcontainer->pgsizes & (pagesize | (pagesize - 1)); pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0; if (!pagesize) { error_report("Host doesn't support page size 0x%"PRIx64 ", the supported mask is 0x%lx", memory_region_iommu_get_min_page_size(iommu_mr), - container->pgsizes); + bcontainer->pgsizes); return -EINVAL; } @@ -313,10 +323,15 @@ static int vfio_spapr_create_window(VFIOContainer *container, return 0; } -int vfio_container_add_section_window(VFIOContainer *container, - MemoryRegionSection *section, - Error **errp) +static int +vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, + Error **errp) { + VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, + container); VFIOHostDMAWindow *hostwin; hwaddr pgsize = 0; int ret; @@ -332,7 +347,7 @@ int vfio_container_add_section_window(VFIOContainer *container, iova = section->offset_within_address_space; end = iova + int128_get64(section->size) - 1; - if (!vfio_find_hostwin(container, iova, end)) { + if (!vfio_find_hostwin(scontainer, iova, end)) { error_setg(errp, "Container %p can't map guest IOVA region" " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end); @@ -346,7 +361,7 @@ int vfio_container_add_section_window(VFIOContainer *container, } /* For now intersections are not allowed, we may relax this later */ - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) { if (ranges_overlap(hostwin->min_iova, hostwin->max_iova - hostwin->min_iova + 1, section->offset_within_address_space, @@ -368,7 +383,7 @@ int vfio_container_add_section_window(VFIOContainer *container, return ret; } - vfio_host_win_add(container, section->offset_within_address_space, + vfio_host_win_add(scontainer, section->offset_within_address_space, section->offset_within_address_space + int128_get64(section->size) - 1, pgsize); #ifdef CONFIG_KVM @@ -401,16 +416,22 @@ int vfio_container_add_section_window(VFIOContainer *container, return 0; } -void vfio_container_del_section_window(VFIOContainer *container, - MemoryRegionSection *section) +static void +vfio_spapr_container_del_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) { + VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, + container); + if (container->iommu_type != VFIO_SPAPR_TCE_v2_IOMMU) { return; } vfio_spapr_remove_window(container, section->offset_within_address_space); - if (vfio_host_win_del(container, + if (vfio_host_win_del(scontainer, section->offset_within_address_space, section->offset_within_address_space + int128_get64(section->size) - 1) < 0) { @@ -419,13 +440,36 @@ void vfio_container_del_section_window(VFIOContainer *container, } } -int vfio_spapr_container_init(VFIOContainer *container, Error **errp) +static void vfio_spapr_container_release(VFIOContainerBase *bcontainer) +{ + VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, + container); + VFIOHostDMAWindow *hostwin, *next; + + if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + memory_listener_unregister(&scontainer->prereg_listener); + } + QLIST_FOREACH_SAFE(hostwin, &scontainer->hostwin_list, hostwin_next, + next) { + QLIST_REMOVE(hostwin, hostwin_next); + g_free(hostwin); + } +} + +static int vfio_spapr_container_setup(VFIOContainerBase *bcontainer, + Error **errp) { + VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, + container); struct vfio_iommu_spapr_tce_info info; bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU; int ret, fd = container->fd; - QLIST_INIT(&container->hostwin_list); + QLIST_INIT(&scontainer->hostwin_list); /* * The host kernel code implementing VFIO_IOMMU_DISABLE is called @@ -439,13 +483,13 @@ int vfio_spapr_container_init(VFIOContainer *container, Error **errp) return -errno; } } else { - container->prereg_listener = vfio_prereg_listener; + scontainer->prereg_listener = vfio_prereg_listener; - memory_listener_register(&container->prereg_listener, + memory_listener_register(&scontainer->prereg_listener, &address_space_memory); - if (container->error) { + if (bcontainer->error) { ret = -1; - error_propagate_prepend(errp, container->error, + error_propagate_prepend(errp, bcontainer->error, "RAM memory listener initialization failed: "); goto listener_unregister_exit; } @@ -461,7 +505,7 @@ int vfio_spapr_container_init(VFIOContainer *container, Error **errp) } if (v2) { - container->pgsizes = info.ddw.pgsizes; + bcontainer->pgsizes = info.ddw.pgsizes; /* * There is a default window in just created container. * To make region_add/del simpler, we better remove this @@ -476,8 +520,8 @@ int vfio_spapr_container_init(VFIOContainer *container, Error **errp) } } else { /* The default table uses 4K pages */ - container->pgsizes = 0x1000; - vfio_host_win_add(container, info.dma32_window_start, + bcontainer->pgsizes = 0x1000; + vfio_host_win_add(scontainer, info.dma32_window_start, info.dma32_window_start + info.dma32_window_size - 1, 0x1000); @@ -487,21 +531,27 @@ int vfio_spapr_container_init(VFIOContainer *container, Error **errp) listener_unregister_exit: if (v2) { - memory_listener_unregister(&container->prereg_listener); + memory_listener_unregister(&scontainer->prereg_listener); } return ret; } -void vfio_spapr_container_deinit(VFIOContainer *container) +static void vfio_iommu_spapr_class_init(ObjectClass *klass, void *data) { - VFIOHostDMAWindow *hostwin, *next; + VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); - if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { - memory_listener_unregister(&container->prereg_listener); - } - QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next, - next) { - QLIST_REMOVE(hostwin, hostwin_next); - g_free(hostwin); - } -} + vioc->add_window = vfio_spapr_container_add_section_window; + vioc->del_window = vfio_spapr_container_del_section_window; + vioc->release = vfio_spapr_container_release; + vioc->setup = vfio_spapr_container_setup; +}; + +static const TypeInfo types[] = { + { + .name = TYPE_VFIO_IOMMU_SPAPR, + .parent = TYPE_VFIO_IOMMU_LEGACY, + .class_init = vfio_iommu_spapr_class_init, + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 0eb2387cf24..f0474b244bf 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -116,8 +116,8 @@ vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Re vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries" vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]" vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x" -vfio_dma_unmap_overflow_workaround(void) "" -vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64 +vfio_legacy_dma_unmap_overflow_workaround(void) "" +vfio_get_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64 vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 # platform.c @@ -153,7 +153,7 @@ vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64 vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size 0x%"PRIx64" ret %d" vfio_migration_realize(const char *name) " (%s)" vfio_migration_set_state(const char *name, const char *state) " (%s) state %s" -vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s" +vfio_migration_state_notifier(const char *name, int state) " (%s) state %d" vfio_save_block(const char *name, int data_size) " (%s) data_size %d" vfio_save_cleanup(const char *name) " (%s)" vfio_save_complete_precopy(const char *name, int ret) " (%s) ret %d" @@ -164,3 +164,14 @@ vfio_state_pending_estimate(const char *name, uint64_t precopy, uint64_t postcop vfio_state_pending_exact(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t stopcopy_size, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" stopcopy size 0x%"PRIx64" precopy initial size 0x%"PRIx64" precopy dirty size 0x%"PRIx64 vfio_vmstate_change(const char *name, int running, const char *reason, const char *dev_state) " (%s) running %d reason %s device state %s" vfio_vmstate_change_prepare(const char *name, int running, const char *reason, const char *dev_state) " (%s) running %d reason %s device state %s" + +#iommufd.c + +iommufd_cdev_connect_and_bind(int iommufd, const char *name, int devfd, int devid) " [iommufd=%d] Successfully bound device %s (fd=%d): output devid=%d" +iommufd_cdev_getfd(const char *dev, int devfd) " %s (fd=%d)" +iommufd_cdev_attach_ioas_hwpt(int iommufd, const char *name, int devfd, int id) " [iommufd=%d] Successfully attached device %s (%d) to id=%d" +iommufd_cdev_detach_ioas_hwpt(int iommufd, const char *name) " [iommufd=%d] Successfully detached %s" +iommufd_cdev_fail_attach_existing_container(const char *msg) " %s" +iommufd_cdev_alloc_ioas(int iommufd, int ioas_id) " [iommufd=%d] new IOMMUFD container with ioasid=%d" +iommufd_cdev_device_info(char *name, int devfd, int num_irqs, int num_regions, int flags) " %s (%d) num_irqs=%d num_regions=%d flags=%d" +iommufd_cdev_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int dev_id) "\t%04x:%02x:%02x.%x devid %d" diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig index 92c9cf6c96c..aa63ff7fd41 100644 --- a/hw/virtio/Kconfig +++ b/hw/virtio/Kconfig @@ -101,6 +101,11 @@ config VHOST_VDPA_DEV default y depends on VIRTIO && VHOST_VDPA && LINUX +config VHOST_USER_SND + bool + default y + depends on VIRTIO && VHOST_USER + config VHOST_USER_SCMI bool default y diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build index c0055a78326..d7f18c96e60 100644 --- a/hw/virtio/meson.build +++ b/hw/virtio/meson.build @@ -17,8 +17,28 @@ if have_vhost if have_vhost_user # fixme - this really should be generic specific_virtio_ss.add(files('vhost-user.c')) + system_virtio_ss.add(files('vhost-user-base.c')) + + # MMIO Stubs system_virtio_ss.add(files('vhost-user-device.c')) + system_virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c')) + system_virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c')) + system_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c')) + system_virtio_ss.add(when: 'CONFIG_VHOST_USER_SND', if_true: files('vhost-user-snd.c')) + system_virtio_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input.c')) + + # PCI Stubs system_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('vhost-user-device-pci.c')) + system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], + if_true: files('vhost-user-gpio-pci.c')) + system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_I2C'], + if_true: files('vhost-user-i2c-pci.c')) + system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_RNG'], + if_true: files('vhost-user-rng-pci.c')) + system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SND'], + if_true: files('vhost-user-snd-pci.c')) + system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_INPUT'], + if_true: files('vhost-user-input-pci.c')) endif if have_vhost_vdpa system_virtio_ss.add(files('vhost-vdpa.c')) @@ -35,10 +55,6 @@ specific_virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c')) specific_virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c')) specific_virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c')) -specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c')) -specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c')) -specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c')) -specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c')) specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c')) specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SCMI'], if_true: files('vhost-user-scmi-pci.c')) @@ -46,9 +62,6 @@ virtio_pci_ss = ss.source_set() virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk-pci.c')) -virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c-pci.c')) -virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input-pci.c')) -virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_SCSI', if_true: files('vhost-user-scsi-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-pci.c')) @@ -74,8 +87,7 @@ specific_virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss) system_ss.add_all(when: 'CONFIG_VIRTIO', if_true: system_virtio_ss) system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('virtio-stub.c')) system_ss.add(files('virtio-hmp-cmds.c')) specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: specific_virtio_ss) +system_ss.add(when: 'CONFIG_ACPI', if_true: files('virtio-acpi.c')) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 637cac4edf0..96632fd0263 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -30,16 +30,17 @@ vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32"" vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p" # vhost-vdpa.c -vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 -vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8 -vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 -vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 -vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64 +vhost_vdpa_skipped_memory_section(int is_ram, int is_iommu, int is_protected, int is_ram_device, uint64_t first, uint64_t last, int page_mask) "is_ram=%d, is_iommu=%d, is_protected=%d, is_ram_device=%d iova_min=0x%"PRIx64" iova_last=0x%"PRIx64" page_mask=0x%x" +vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 +vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8 +vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 +vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 +vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64 vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d" -vhost_vdpa_listener_region_del_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64 +vhost_vdpa_listener_region_del_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64 vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64 vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8 -vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p" +vhost_vdpa_init(void *dev, void *s, void *vdpa) "dev: %p, common dev: %p vdpa: %p" vhost_vdpa_cleanup(void *dev, void *vdpa) "dev: %p vdpa: %p" vhost_vdpa_memslots_limit(void *dev, int ret) "dev: %p = 0x%x" vhost_vdpa_set_mem_table(void *dev, uint32_t nregions, uint32_t padding) "dev: %p nregions: %"PRIu32" padding: 0x%"PRIx32 @@ -48,7 +49,7 @@ vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRI vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p device_id %"PRIu32 vhost_vdpa_reset_device(void *dev) "dev: %p" vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx: %d vq idx: %d" -vhost_vdpa_set_vring_ready(void *dev, unsigned i, int r) "dev: %p, idx: %u, r: %d" +vhost_vdpa_set_vring_enable_one(void *dev, unsigned i, int enable, int r) "dev: %p, idx: %u, enable: %u, r: %d" vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s" vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size, uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags: 0x%"PRIx32 vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len) "dev: %p config: %p config_len: %"PRIu32 @@ -57,8 +58,8 @@ vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d" vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p" vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64 vhost_vdpa_set_vring_num(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u" -vhost_vdpa_set_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u" -vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u" +vhost_vdpa_set_dev_vring_base(void *dev, unsigned int index, unsigned int num, bool svq) "dev: %p index: %u num: %u svq: %d" +vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num, bool svq) "dev: %p index: %u num: %u svq: %d" vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d" vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d" vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64 @@ -111,7 +112,7 @@ virtio_iommu_device_reset(void) "reset!" virtio_iommu_system_reset(void) "system reset!" virtio_iommu_get_features(uint64_t features) "device supports features=0x%"PRIx64 virtio_iommu_device_status(uint8_t status) "driver status = %d" -virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_start, uint32_t domain_end, uint32_t probe_size, uint8_t bypass) "page_size_mask=0x%"PRIx64" input range start=0x%"PRIx64" input range end=0x%"PRIx64" domain range start=%d domain range end=%d probe_size=0x%x bypass=0x%x" +virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_start, uint32_t domain_end, uint32_t probe_size, uint8_t bypass) "page_size_mask=0x%"PRIx64" input range start=0x%"PRIx64" input range end=0x%"PRIx64" domain range start=%u domain range end=%u probe_size=0x%x bypass=0x%x" virtio_iommu_set_config(uint8_t bypass) "bypass=0x%x" virtio_iommu_attach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d" virtio_iommu_detach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d" diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c index f22d5d5bc0a..64b96b226c3 100644 --- a/hw/virtio/vdpa-dev.c +++ b/hw/virtio/vdpa-dev.c @@ -66,7 +66,6 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp) if (*errp) { return; } - v->vdpa.device_fd = v->vhostfd; v->vdev_id = vhost_vdpa_device_get_u32(v->vhostfd, VHOST_VDPA_GET_DEVICE_ID, errp); @@ -114,7 +113,9 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp) strerror(-ret)); goto free_vqs; } - v->vdpa.iova_range = iova_range; + v->vdpa.shared = g_new0(VhostVDPAShared, 1); + v->vdpa.shared->device_fd = v->vhostfd; + v->vdpa.shared->iova_range = iova_range; ret = vhost_dev_init(&v->dev, &v->vdpa, VHOST_BACKEND_TYPE_VDPA, 0, NULL); if (ret < 0) { @@ -162,6 +163,7 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp) vhost_dev_cleanup(&v->dev); free_vqs: g_free(vqs); + g_free(v->vdpa.shared); out: qemu_close(v->vhostfd); v->vhostfd = -1; @@ -184,6 +186,7 @@ static void vhost_vdpa_device_unrealize(DeviceState *dev) g_free(s->config); g_free(s->dev.vqs); vhost_dev_cleanup(&s->dev); + g_free(s->vdpa.shared); qemu_close(s->vhostfd); s->vhostfd = -1; } @@ -192,7 +195,14 @@ static void vhost_vdpa_device_get_config(VirtIODevice *vdev, uint8_t *config) { VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); + int ret; + ret = vhost_dev_get_config(&s->dev, s->config, s->config_size, + NULL); + if (ret < 0) { + error_report("get device config space failed"); + return; + } memcpy(config, s->config, s->config_size); } @@ -250,14 +260,11 @@ static int vhost_vdpa_device_start(VirtIODevice *vdev, Error **errp) s->dev.acked_features = vdev->guest_features; - ret = vhost_dev_start(&s->dev, vdev, false); + ret = vhost_dev_start(&s->dev, vdev, true); if (ret < 0) { error_setg_errno(errp, -ret, "Error starting vhost"); goto err_guest_notifiers; } - for (i = 0; i < s->dev.nvqs; ++i) { - vhost_vdpa_set_vring_ready(&s->vdpa, i); - } s->started = true; /* @@ -341,7 +348,7 @@ static const VMStateDescription vmstate_vhost_vdpa_device = { .unmigratable = 1, .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c index 17f3fc6a082..833804dd40f 100644 --- a/hw/virtio/vhost-backend.c +++ b/hw/virtio/vhost-backend.c @@ -158,6 +158,30 @@ static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev, return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s); } +static int vhost_kernel_new_worker(struct vhost_dev *dev, + struct vhost_worker_state *worker) +{ + return vhost_kernel_call(dev, VHOST_NEW_WORKER, worker); +} + +static int vhost_kernel_free_worker(struct vhost_dev *dev, + struct vhost_worker_state *worker) +{ + return vhost_kernel_call(dev, VHOST_FREE_WORKER, worker); +} + +static int vhost_kernel_attach_vring_worker(struct vhost_dev *dev, + struct vhost_vring_worker *worker) +{ + return vhost_kernel_call(dev, VHOST_ATTACH_VRING_WORKER, worker); +} + +static int vhost_kernel_get_vring_worker(struct vhost_dev *dev, + struct vhost_vring_worker *worker) +{ + return vhost_kernel_call(dev, VHOST_GET_VRING_WORKER, worker); +} + static int vhost_kernel_set_features(struct vhost_dev *dev, uint64_t features) { @@ -313,6 +337,10 @@ const VhostOps kernel_ops = { .vhost_set_vring_err = vhost_kernel_set_vring_err, .vhost_set_vring_busyloop_timeout = vhost_kernel_set_vring_busyloop_timeout, + .vhost_get_vring_worker = vhost_kernel_get_vring_worker, + .vhost_attach_vring_worker = vhost_kernel_attach_vring_worker, + .vhost_new_worker = vhost_kernel_new_worker, + .vhost_free_worker = vhost_kernel_free_worker, .vhost_set_features = vhost_kernel_set_features, .vhost_get_features = vhost_kernel_get_features, .vhost_set_backend_cap = vhost_kernel_set_backend_cap, diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c new file mode 100644 index 00000000000..a83167191ee --- /dev/null +++ b/hw/virtio/vhost-user-base.c @@ -0,0 +1,371 @@ +/* + * Base vhost-user-base implementation. This can be used to derive a + * more fully specified vhost-user backend either generically (see + * vhost-user-device) or via a specific stub for a device which + * encapsulates some fixed parameters. + * + * Copyright (c) 2023 Linaro Ltd + * Author: Alex Bennée + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/vhost-user-base.h" +#include "qemu/error-report.h" + +static void vub_start(VirtIODevice *vdev) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + VHostUserBase *vub = VHOST_USER_BASE(vdev); + int ret, i; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return; + } + + ret = vhost_dev_enable_notifiers(&vub->vhost_dev, vdev); + if (ret < 0) { + error_report("Error enabling host notifiers: %d", -ret); + return; + } + + ret = k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier: %d", -ret); + goto err_host_notifiers; + } + + vub->vhost_dev.acked_features = vdev->guest_features; + + ret = vhost_dev_start(&vub->vhost_dev, vdev, true); + if (ret < 0) { + error_report("Error starting vhost-user-base: %d", -ret); + goto err_guest_notifiers; + } + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < vub->vhost_dev.nvqs; i++) { + vhost_virtqueue_mask(&vub->vhost_dev, vdev, i, false); + } + + return; + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&vub->vhost_dev, vdev); +} + +static void vub_stop(VirtIODevice *vdev) +{ + VHostUserBase *vub = VHOST_USER_BASE(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(&vub->vhost_dev, vdev, true); + + ret = k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(&vub->vhost_dev, vdev); +} + +static void vub_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserBase *vub = VHOST_USER_BASE(vdev); + bool should_start = virtio_device_should_start(vdev, status); + + if (vhost_dev_is_started(&vub->vhost_dev) == should_start) { + return; + } + + if (should_start) { + vub_start(vdev); + } else { + vub_stop(vdev); + } +} + +/* + * For an implementation where everything is delegated to the backend + * we don't do anything other than return the full feature set offered + * by the daemon (module the reserved feature bit). + */ +static uint64_t vub_get_features(VirtIODevice *vdev, + uint64_t requested_features, Error **errp) +{ + VHostUserBase *vub = VHOST_USER_BASE(vdev); + /* This should be set when the vhost connection initialises */ + g_assert(vub->vhost_dev.features); + return vub->vhost_dev.features & ~(1ULL << VHOST_USER_F_PROTOCOL_FEATURES); +} + +/* + * To handle VirtIO config we need to know the size of the config + * space. We don't cache the config but re-fetch it from the guest + * every time in case something has changed. + */ +static void vub_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VHostUserBase *vub = VHOST_USER_BASE(vdev); + Error *local_err = NULL; + + /* + * There will have been a warning during vhost_dev_init, but lets + * assert here as nothing will go right now. + */ + g_assert(vub->config_size && vub->vhost_user.supports_config == true); + + if (vhost_dev_get_config(&vub->vhost_dev, config, + vub->config_size, &local_err)) { + error_report_err(local_err); + } +} + +static void vub_set_config(VirtIODevice *vdev, const uint8_t *config_data) +{ + VHostUserBase *vub = VHOST_USER_BASE(vdev); + int ret; + + g_assert(vub->config_size && vub->vhost_user.supports_config == true); + + ret = vhost_dev_set_config(&vub->vhost_dev, config_data, + 0, vub->config_size, + VHOST_SET_CONFIG_TYPE_FRONTEND); + if (ret) { + error_report("vhost guest set device config space failed: %d", ret); + return; + } +} + +/* + * When the daemon signals an update to the config we just need to + * signal the guest as we re-read the config on demand above. + */ +static int vub_config_notifier(struct vhost_dev *dev) +{ + virtio_notify_config(dev->vdev); + return 0; +} + +const VhostDevConfigOps vub_config_ops = { + .vhost_dev_config_notifier = vub_config_notifier, +}; + +static void vub_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* + * Not normally called; it's the daemon that handles the queue; + * however virtio's cleanup path can call this. + */ +} + +static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserBase *vub) +{ + vhost_user_cleanup(&vub->vhost_user); + + for (int i = 0; i < vub->num_vqs; i++) { + VirtQueue *vq = g_ptr_array_index(vub->vqs, i); + virtio_delete_queue(vq); + } + + virtio_cleanup(vdev); +} + +static int vub_connect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBase *vub = VHOST_USER_BASE(vdev); + struct vhost_dev *vhost_dev = &vub->vhost_dev; + + if (vub->connected) { + return 0; + } + vub->connected = true; + + /* + * If we support VHOST_USER_GET_CONFIG we must enable the notifier + * so we can ping the guest when it updates. + */ + if (vub->vhost_user.supports_config) { + vhost_dev_set_config_notifier(vhost_dev, &vub_config_ops); + } + + /* restore vhost state */ + if (virtio_device_started(vdev, vdev->status)) { + vub_start(vdev); + } + + return 0; +} + +static void vub_event(void *opaque, QEMUChrEvent event); + +static void vub_disconnect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBase *vub = VHOST_USER_BASE(vdev); + + if (!vub->connected) { + return; + } + vub->connected = false; + + vub_stop(vdev); + vhost_dev_cleanup(&vub->vhost_dev); + + /* Re-instate the event handler for new connections */ + qemu_chr_fe_set_handlers(&vub->chardev, + NULL, NULL, vub_event, + NULL, dev, NULL, true); +} + +static void vub_event(void *opaque, QEMUChrEvent event) +{ + DeviceState *dev = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBase *vub = VHOST_USER_BASE(vdev); + + switch (event) { + case CHR_EVENT_OPENED: + if (vub_connect(dev) < 0) { + qemu_chr_fe_disconnect(&vub->chardev); + return; + } + break; + case CHR_EVENT_CLOSED: + /* defer close until later to avoid circular close */ + vhost_user_async_close(dev, &vub->chardev, &vub->vhost_dev, + vub_disconnect, vub_event); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static void vub_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBase *vub = VHOST_USER_BASE(dev); + int ret; + + if (!vub->chardev.chr) { + error_setg(errp, "vhost-user-base: missing chardev"); + return; + } + + if (!vub->virtio_id) { + error_setg(errp, "vhost-user-base: need to define device id"); + return; + } + + if (!vub->num_vqs) { + vub->num_vqs = 1; /* reasonable default? */ + } + + if (!vub->vq_size) { + vub->vq_size = 64; + } + + /* + * We can't handle config requests unless we know the size of the + * config region, specialisations of the vhost-user-base will be + * able to set this. + */ + if (vub->config_size) { + vub->vhost_user.supports_config = true; + } + + if (!vhost_user_init(&vub->vhost_user, &vub->chardev, errp)) { + return; + } + + virtio_init(vdev, vub->virtio_id, vub->config_size); + + /* + * Disable guest notifiers, by default all notifications will be via the + * asynchronous vhost-user socket. + */ + vdev->use_guest_notifier_mask = false; + + /* Allocate queues */ + vub->vqs = g_ptr_array_sized_new(vub->num_vqs); + for (int i = 0; i < vub->num_vqs; i++) { + g_ptr_array_add(vub->vqs, + virtio_add_queue(vdev, vub->vq_size, + vub_handle_output)); + } + + vub->vhost_dev.nvqs = vub->num_vqs; + vub->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vub->vhost_dev.nvqs); + + /* connect to backend */ + ret = vhost_dev_init(&vub->vhost_dev, &vub->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + + if (ret < 0) { + do_vhost_user_cleanup(vdev, vub); + } + + qemu_chr_fe_set_handlers(&vub->chardev, NULL, NULL, vub_event, NULL, + dev, NULL, true); +} + +static void vub_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBase *vub = VHOST_USER_BASE(dev); + struct vhost_virtqueue *vhost_vqs = vub->vhost_dev.vqs; + + /* This will stop vhost backend if appropriate. */ + vub_set_status(vdev, 0); + vhost_dev_cleanup(&vub->vhost_dev); + g_free(vhost_vqs); + do_vhost_user_cleanup(vdev, vub); +} + +static void vub_class_init(ObjectClass *klass, void *data) +{ + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + vdc->realize = vub_device_realize; + vdc->unrealize = vub_device_unrealize; + vdc->get_features = vub_get_features; + vdc->get_config = vub_get_config; + vdc->set_config = vub_set_config; + vdc->set_status = vub_set_status; +} + +static const TypeInfo vub_types[] = { + { + .name = TYPE_VHOST_USER_BASE, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostUserBase), + .class_init = vub_class_init, + .class_size = sizeof(VHostUserBaseClass), + .abstract = true + } +}; + +DEFINE_TYPES(vub_types) diff --git a/hw/virtio/vhost-user-device-pci.c b/hw/virtio/vhost-user-device-pci.c index 41f9b7905b4..efaf55d3dd4 100644 --- a/hw/virtio/vhost-user-device-pci.c +++ b/hw/virtio/vhost-user-device-pci.c @@ -9,21 +9,18 @@ #include "qemu/osdep.h" #include "hw/qdev-properties.h" -#include "hw/virtio/vhost-user-device.h" +#include "hw/virtio/vhost-user-base.h" #include "hw/virtio/virtio-pci.h" struct VHostUserDevicePCI { VirtIOPCIProxy parent_obj; + VHostUserBase vub; }; -typedef struct VHostUserDevicePCI VHostUserDevicePCI; - #define TYPE_VHOST_USER_DEVICE_PCI "vhost-user-device-pci-base" -DECLARE_INSTANCE_CHECKER(VHostUserDevicePCI, - VHOST_USER_DEVICE_PCI, - TYPE_VHOST_USER_DEVICE_PCI) +OBJECT_DECLARE_SIMPLE_TYPE(VHostUserDevicePCI, VHOST_USER_DEVICE_PCI) static void vhost_user_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { @@ -39,6 +36,10 @@ static void vhost_user_device_pci_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + /* Reason: stop users confusing themselves */ + dc->user_creatable = false; + k->realize = vhost_user_device_pci_realize; set_bit(DEVICE_CATEGORY_INPUT, dc->categories); pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; diff --git a/hw/virtio/vhost-user-device.c b/hw/virtio/vhost-user-device.c index 2b028cae08d..67aa934710d 100644 --- a/hw/virtio/vhost-user-device.c +++ b/hw/virtio/vhost-user-device.c @@ -1,7 +1,10 @@ /* - * Generic vhost-user stub. This can be used to connect to any - * vhost-user backend. All configuration details must be handled by - * the vhost-user daemon itself + * Generic vhost-user-device implementation for any vhost-user-backend + * + * This is a concrete implementation of vhost-user-base which can be + * configured via properties. It is useful for development and + * prototyping. It expects configuration details (if any) to be + * handled by the vhost-user daemon itself. * * Copyright (c) 2023 Linaro Ltd * Author: Alex Bennée @@ -13,329 +16,9 @@ #include "qapi/error.h" #include "hw/qdev-properties.h" #include "hw/virtio/virtio-bus.h" -#include "hw/virtio/vhost-user-device.h" +#include "hw/virtio/vhost-user-base.h" #include "qemu/error-report.h" -static void vub_start(VirtIODevice *vdev) -{ - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - VHostUserBase *vub = VHOST_USER_BASE(vdev); - int ret, i; - - if (!k->set_guest_notifiers) { - error_report("binding does not support guest notifiers"); - return; - } - - ret = vhost_dev_enable_notifiers(&vub->vhost_dev, vdev); - if (ret < 0) { - error_report("Error enabling host notifiers: %d", -ret); - return; - } - - ret = k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, true); - if (ret < 0) { - error_report("Error binding guest notifier: %d", -ret); - goto err_host_notifiers; - } - - vub->vhost_dev.acked_features = vdev->guest_features; - - ret = vhost_dev_start(&vub->vhost_dev, vdev, true); - if (ret < 0) { - error_report("Error starting vhost-user-device: %d", -ret); - goto err_guest_notifiers; - } - - /* - * guest_notifier_mask/pending not used yet, so just unmask - * everything here. virtio-pci will do the right thing by - * enabling/disabling irqfd. - */ - for (i = 0; i < vub->vhost_dev.nvqs; i++) { - vhost_virtqueue_mask(&vub->vhost_dev, vdev, i, false); - } - - return; - -err_guest_notifiers: - k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false); -err_host_notifiers: - vhost_dev_disable_notifiers(&vub->vhost_dev, vdev); -} - -static void vub_stop(VirtIODevice *vdev) -{ - VHostUserBase *vub = VHOST_USER_BASE(vdev); - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - int ret; - - if (!k->set_guest_notifiers) { - return; - } - - vhost_dev_stop(&vub->vhost_dev, vdev, true); - - ret = k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false); - if (ret < 0) { - error_report("vhost guest notifier cleanup failed: %d", ret); - return; - } - - vhost_dev_disable_notifiers(&vub->vhost_dev, vdev); -} - -static void vub_set_status(VirtIODevice *vdev, uint8_t status) -{ - VHostUserBase *vub = VHOST_USER_BASE(vdev); - bool should_start = virtio_device_should_start(vdev, status); - - if (vhost_dev_is_started(&vub->vhost_dev) == should_start) { - return; - } - - if (should_start) { - vub_start(vdev); - } else { - vub_stop(vdev); - } -} - -/* - * For an implementation where everything is delegated to the backend - * we don't do anything other than return the full feature set offered - * by the daemon (module the reserved feature bit). - */ -static uint64_t vub_get_features(VirtIODevice *vdev, - uint64_t requested_features, Error **errp) -{ - VHostUserBase *vub = VHOST_USER_BASE(vdev); - /* This should be set when the vhost connection initialises */ - g_assert(vub->vhost_dev.features); - return vub->vhost_dev.features & ~(1ULL << VHOST_USER_F_PROTOCOL_FEATURES); -} - -/* - * To handle VirtIO config we need to know the size of the config - * space. We don't cache the config but re-fetch it from the guest - * every time in case something has changed. - */ -static void vub_get_config(VirtIODevice *vdev, uint8_t *config) -{ - VHostUserBase *vub = VHOST_USER_BASE(vdev); - Error *local_err = NULL; - - /* - * There will have been a warning during vhost_dev_init, but lets - * assert here as nothing will go right now. - */ - g_assert(vub->config_size && vub->vhost_user.supports_config == true); - - if (vhost_dev_get_config(&vub->vhost_dev, config, - vub->config_size, &local_err)) { - error_report_err(local_err); - } -} - -/* - * When the daemon signals an update to the config we just need to - * signal the guest as we re-read the config on demand above. - */ -static int vub_config_notifier(struct vhost_dev *dev) -{ - virtio_notify_config(dev->vdev); - return 0; -} - -const VhostDevConfigOps vub_config_ops = { - .vhost_dev_config_notifier = vub_config_notifier, -}; - -static void vub_handle_output(VirtIODevice *vdev, VirtQueue *vq) -{ - /* - * Not normally called; it's the daemon that handles the queue; - * however virtio's cleanup path can call this. - */ -} - -static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserBase *vub) -{ - vhost_user_cleanup(&vub->vhost_user); - - for (int i = 0; i < vub->num_vqs; i++) { - VirtQueue *vq = g_ptr_array_index(vub->vqs, i); - virtio_delete_queue(vq); - } - - virtio_cleanup(vdev); -} - -static int vub_connect(DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserBase *vub = VHOST_USER_BASE(vdev); - struct vhost_dev *vhost_dev = &vub->vhost_dev; - - if (vub->connected) { - return 0; - } - vub->connected = true; - - /* - * If we support VHOST_USER_GET_CONFIG we must enable the notifier - * so we can ping the guest when it updates. - */ - if (vub->vhost_user.supports_config) { - vhost_dev_set_config_notifier(vhost_dev, &vub_config_ops); - } - - /* restore vhost state */ - if (virtio_device_started(vdev, vdev->status)) { - vub_start(vdev); - } - - return 0; -} - -static void vub_disconnect(DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserBase *vub = VHOST_USER_BASE(vdev); - - if (!vub->connected) { - return; - } - vub->connected = false; - - if (vhost_dev_is_started(&vub->vhost_dev)) { - vub_stop(vdev); - } -} - -static void vub_event(void *opaque, QEMUChrEvent event) -{ - DeviceState *dev = opaque; - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserBase *vub = VHOST_USER_BASE(vdev); - - switch (event) { - case CHR_EVENT_OPENED: - if (vub_connect(dev) < 0) { - qemu_chr_fe_disconnect(&vub->chardev); - return; - } - break; - case CHR_EVENT_CLOSED: - vub_disconnect(dev); - break; - case CHR_EVENT_BREAK: - case CHR_EVENT_MUX_IN: - case CHR_EVENT_MUX_OUT: - /* Ignore */ - break; - } -} - -static void vub_device_realize(DeviceState *dev, Error **errp) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserBase *vub = VHOST_USER_BASE(dev); - int ret; - - if (!vub->chardev.chr) { - error_setg(errp, "vhost-user-device: missing chardev"); - return; - } - - if (!vub->virtio_id) { - error_setg(errp, "vhost-user-device: need to define device id"); - return; - } - - if (!vub->num_vqs) { - vub->num_vqs = 1; /* reasonable default? */ - } - - /* - * We can't handle config requests unless we know the size of the - * config region, specialisations of the vhost-user-device will be - * able to set this. - */ - if (vub->config_size) { - vub->vhost_user.supports_config = true; - } - - if (!vhost_user_init(&vub->vhost_user, &vub->chardev, errp)) { - return; - } - - virtio_init(vdev, vub->virtio_id, vub->config_size); - - /* - * Disable guest notifiers, by default all notifications will be via the - * asynchronous vhost-user socket. - */ - vdev->use_guest_notifier_mask = false; - - /* Allocate queues */ - vub->vqs = g_ptr_array_sized_new(vub->num_vqs); - for (int i = 0; i < vub->num_vqs; i++) { - g_ptr_array_add(vub->vqs, - virtio_add_queue(vdev, 4, vub_handle_output)); - } - - vub->vhost_dev.nvqs = vub->num_vqs; - vub->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vub->vhost_dev.nvqs); - - /* connect to backend */ - ret = vhost_dev_init(&vub->vhost_dev, &vub->vhost_user, - VHOST_BACKEND_TYPE_USER, 0, errp); - - if (ret < 0) { - do_vhost_user_cleanup(vdev, vub); - } - - qemu_chr_fe_set_handlers(&vub->chardev, NULL, NULL, vub_event, NULL, - dev, NULL, true); -} - -static void vub_device_unrealize(DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserBase *vub = VHOST_USER_BASE(dev); - struct vhost_virtqueue *vhost_vqs = vub->vhost_dev.vqs; - - /* This will stop vhost backend if appropriate. */ - vub_set_status(vdev, 0); - vhost_dev_cleanup(&vub->vhost_dev); - g_free(vhost_vqs); - do_vhost_user_cleanup(vdev, vub); -} - -static void vub_class_init(ObjectClass *klass, void *data) -{ - VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); - - vdc->realize = vub_device_realize; - vdc->unrealize = vub_device_unrealize; - vdc->get_features = vub_get_features; - vdc->get_config = vub_get_config; - vdc->set_status = vub_set_status; -} - -static const TypeInfo vub_info = { - .name = TYPE_VHOST_USER_BASE, - .parent = TYPE_VIRTIO_DEVICE, - .instance_size = sizeof(VHostUserBase), - .class_init = vub_class_init, - .class_size = sizeof(VHostUserBaseClass), - .abstract = true -}; - - /* * The following is a concrete implementation of the base class which * allows the user to define the key parameters via the command line. @@ -349,6 +32,7 @@ static const VMStateDescription vud_vmstate = { static Property vud_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), DEFINE_PROP_UINT16("virtio-id", VHostUserBase, virtio_id, 0), + DEFINE_PROP_UINT32("vq_size", VHostUserBase, vq_size, 64), DEFINE_PROP_UINT32("num_vqs", VHostUserBase, num_vqs, 1), DEFINE_PROP_UINT32("config_size", VHostUserBase, config_size, 0), DEFINE_PROP_END_OF_LIST(), @@ -358,6 +42,9 @@ static void vud_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + /* Reason: stop inexperienced users confusing themselves */ + dc->user_creatable = false; + device_class_set_props(dc, vud_properties); dc->vmsd = &vud_vmstate; set_bit(DEVICE_CATEGORY_INPUT, dc->categories); @@ -366,14 +53,11 @@ static void vud_class_init(ObjectClass *klass, void *data) static const TypeInfo vud_info = { .name = TYPE_VHOST_USER_DEVICE, .parent = TYPE_VHOST_USER_BASE, - .instance_size = sizeof(VHostUserBase), .class_init = vud_class_init, - .class_size = sizeof(VHostUserBaseClass), }; static void vu_register_types(void) { - type_register_static(&vub_info); type_register_static(&vud_info); } diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c index eb91723855b..cca2cd41be2 100644 --- a/hw/virtio/vhost-user-fs.c +++ b/hw/virtio/vhost-user-fs.c @@ -373,11 +373,11 @@ static const VMStateDescription vuf_backend_vmstate; static const VMStateDescription vuf_vmstate = { .name = "vhost-user-fs", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vuf_backend_vmstate, NULL, } @@ -389,7 +389,7 @@ static const VMStateDescription vuf_backend_vmstate = { .needed = vuf_is_internal_migration, .pre_load = vuf_check_migration_support, .pre_save = vuf_check_migration_support, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { { .name = "back-end", .info = &(const VMStateInfo) { diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c index a83437a5da3..9f37c254159 100644 --- a/hw/virtio/vhost-user-gpio.c +++ b/hw/virtio/vhost-user-gpio.c @@ -11,388 +11,25 @@ #include "hw/qdev-properties.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/vhost-user-gpio.h" -#include "qemu/error-report.h" #include "standard-headers/linux/virtio_ids.h" -#include "trace.h" +#include "standard-headers/linux/virtio_gpio.h" -#define VHOST_NVQS 2 - -/* Features required from VirtIO */ -static const int feature_bits[] = { - VIRTIO_F_VERSION_1, - VIRTIO_F_NOTIFY_ON_EMPTY, - VIRTIO_RING_F_INDIRECT_DESC, - VIRTIO_RING_F_EVENT_IDX, - VIRTIO_GPIO_F_IRQ, - VIRTIO_F_RING_RESET, - VHOST_INVALID_FEATURE_BIT +static Property vgpio_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), + DEFINE_PROP_END_OF_LIST(), }; -static void vu_gpio_get_config(VirtIODevice *vdev, uint8_t *config) +static void vgpio_realize(DeviceState *dev, Error **errp) { - VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + VHostUserBase *vub = VHOST_USER_BASE(dev); + VHostUserBaseClass *vubc = VHOST_USER_BASE_GET_CLASS(dev); - memcpy(config, &gpio->config, sizeof(gpio->config)); -} + /* Fixed for GPIO */ + vub->virtio_id = VIRTIO_ID_GPIO; + vub->num_vqs = 2; + vub->config_size = sizeof(struct virtio_gpio_config); -static int vu_gpio_config_notifier(struct vhost_dev *dev) -{ - VHostUserGPIO *gpio = VHOST_USER_GPIO(dev->vdev); - - memcpy(dev->vdev->config, &gpio->config, sizeof(gpio->config)); - virtio_notify_config(dev->vdev); - - return 0; -} - -const VhostDevConfigOps gpio_ops = { - .vhost_dev_config_notifier = vu_gpio_config_notifier, -}; - -static int vu_gpio_start(VirtIODevice *vdev) -{ - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); - struct vhost_dev *vhost_dev = &gpio->vhost_dev; - int ret, i; - - if (!k->set_guest_notifiers) { - error_report("binding does not support guest notifiers"); - return -ENOSYS; - } - - ret = vhost_dev_enable_notifiers(vhost_dev, vdev); - if (ret < 0) { - error_report("Error enabling host notifiers: %d", ret); - return ret; - } - - ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, true); - if (ret < 0) { - error_report("Error binding guest notifier: %d", ret); - goto err_host_notifiers; - } - - /* - * Before we start up we need to ensure we have the final feature - * set needed for the vhost configuration. The backend may also - * apply backend_features when the feature set is sent. - */ - vhost_ack_features(&gpio->vhost_dev, feature_bits, vdev->guest_features); - - ret = vhost_dev_start(&gpio->vhost_dev, vdev, false); - if (ret < 0) { - error_report("Error starting vhost-user-gpio: %d", ret); - goto err_guest_notifiers; - } - gpio->started_vu = true; - - /* - * guest_notifier_mask/pending not used yet, so just unmask - * everything here. virtio-pci will do the right thing by - * enabling/disabling irqfd. - */ - for (i = 0; i < gpio->vhost_dev.nvqs; i++) { - vhost_virtqueue_mask(&gpio->vhost_dev, vdev, i, false); - } - - /* - * As we must have VHOST_USER_F_PROTOCOL_FEATURES (because - * VHOST_USER_GET_CONFIG requires it) we need to explicitly enable - * the vrings. - */ - g_assert(vhost_dev->vhost_ops && - vhost_dev->vhost_ops->vhost_set_vring_enable); - ret = vhost_dev->vhost_ops->vhost_set_vring_enable(vhost_dev, true); - if (ret == 0) { - return 0; - } - - error_report("Failed to start vrings for vhost-user-gpio: %d", ret); - -err_guest_notifiers: - k->set_guest_notifiers(qbus->parent, gpio->vhost_dev.nvqs, false); -err_host_notifiers: - vhost_dev_disable_notifiers(&gpio->vhost_dev, vdev); - - return ret; -} - -static void vu_gpio_stop(VirtIODevice *vdev) -{ - VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - struct vhost_dev *vhost_dev = &gpio->vhost_dev; - int ret; - - if (!gpio->started_vu) { - return; - } - gpio->started_vu = false; - - if (!k->set_guest_notifiers) { - return; - } - - vhost_dev_stop(vhost_dev, vdev, false); - - ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false); - if (ret < 0) { - error_report("vhost guest notifier cleanup failed: %d", ret); - return; - } - - vhost_dev_disable_notifiers(vhost_dev, vdev); -} - -static void vu_gpio_set_status(VirtIODevice *vdev, uint8_t status) -{ - VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); - bool should_start = virtio_device_should_start(vdev, status); - - trace_virtio_gpio_set_status(status); - - if (!gpio->connected) { - return; - } - - if (vhost_dev_is_started(&gpio->vhost_dev) == should_start) { - return; - } - - if (should_start) { - if (vu_gpio_start(vdev)) { - qemu_chr_fe_disconnect(&gpio->chardev); - } - } else { - vu_gpio_stop(vdev); - } -} - -static uint64_t vu_gpio_get_features(VirtIODevice *vdev, uint64_t features, - Error **errp) -{ - VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); - - return vhost_get_features(&gpio->vhost_dev, feature_bits, features); -} - -static void vu_gpio_handle_output(VirtIODevice *vdev, VirtQueue *vq) -{ - /* - * Not normally called; it's the daemon that handles the queue; - * however virtio's cleanup path can call this. - */ -} - -static void vu_gpio_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) -{ - VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); - - /* - * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 - * as the macro of configure interrupt's IDX, If this driver does not - * support, the function will return - */ - - if (idx == VIRTIO_CONFIG_IRQ_IDX) { - return; - } - - vhost_virtqueue_mask(&gpio->vhost_dev, vdev, idx, mask); -} - -static struct vhost_dev *vu_gpio_get_vhost(VirtIODevice *vdev) -{ - VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); - return &gpio->vhost_dev; -} - -static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio) -{ - virtio_delete_queue(gpio->command_vq); - virtio_delete_queue(gpio->interrupt_vq); - g_free(gpio->vhost_vqs); - virtio_cleanup(vdev); - vhost_user_cleanup(&gpio->vhost_user); -} - -static int vu_gpio_connect(DeviceState *dev, Error **errp) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); - struct vhost_dev *vhost_dev = &gpio->vhost_dev; - int ret; - - if (gpio->connected) { - return 0; - } - - vhost_dev_set_config_notifier(vhost_dev, &gpio_ops); - gpio->vhost_user.supports_config = true; - - gpio->vhost_dev.nvqs = VHOST_NVQS; - gpio->vhost_dev.vqs = gpio->vhost_vqs; - - ret = vhost_dev_init(vhost_dev, &gpio->vhost_user, - VHOST_BACKEND_TYPE_USER, 0, errp); - if (ret < 0) { - return ret; - } - - gpio->connected = true; - - /* restore vhost state */ - if (virtio_device_started(vdev, vdev->status)) { - vu_gpio_start(vdev); - } - - return 0; -} - -static void vu_gpio_event(void *opaque, QEMUChrEvent event); - -static void vu_gpio_disconnect(DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); - - if (!gpio->connected) { - return; - } - gpio->connected = false; - - vu_gpio_stop(vdev); - vhost_dev_cleanup(&gpio->vhost_dev); - - /* Re-instate the event handler for new connections */ - qemu_chr_fe_set_handlers(&gpio->chardev, - NULL, NULL, vu_gpio_event, - NULL, dev, NULL, true); -} - -static void vu_gpio_event(void *opaque, QEMUChrEvent event) -{ - DeviceState *dev = opaque; - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); - Error *local_err = NULL; - - switch (event) { - case CHR_EVENT_OPENED: - if (vu_gpio_connect(dev, &local_err) < 0) { - qemu_chr_fe_disconnect(&gpio->chardev); - return; - } - break; - case CHR_EVENT_CLOSED: - /* defer close until later to avoid circular close */ - vhost_user_async_close(dev, &gpio->chardev, &gpio->vhost_dev, - vu_gpio_disconnect, vu_gpio_event); - break; - case CHR_EVENT_BREAK: - case CHR_EVENT_MUX_IN: - case CHR_EVENT_MUX_OUT: - /* Ignore */ - break; - } -} - -static int vu_gpio_realize_connect(VHostUserGPIO *gpio, Error **errp) -{ - VirtIODevice *vdev = &gpio->parent_obj; - DeviceState *dev = &vdev->parent_obj; - struct vhost_dev *vhost_dev = &gpio->vhost_dev; - int ret; - - ret = qemu_chr_fe_wait_connected(&gpio->chardev, errp); - if (ret < 0) { - return ret; - } - - /* - * vu_gpio_connect() may have already connected (via the event - * callback) in which case it will just report success. - */ - ret = vu_gpio_connect(dev, errp); - if (ret < 0) { - qemu_chr_fe_disconnect(&gpio->chardev); - return ret; - } - g_assert(gpio->connected); - - ret = vhost_dev_get_config(vhost_dev, (uint8_t *)&gpio->config, - sizeof(gpio->config), errp); - - if (ret < 0) { - error_report("vhost-user-gpio: get config failed"); - - qemu_chr_fe_disconnect(&gpio->chardev); - vhost_dev_cleanup(vhost_dev); - return ret; - } - - return 0; -} - -static void vu_gpio_device_realize(DeviceState *dev, Error **errp) -{ - ERRP_GUARD(); - - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserGPIO *gpio = VHOST_USER_GPIO(dev); - int retries, ret; - - if (!gpio->chardev.chr) { - error_setg(errp, "vhost-user-gpio: chardev is mandatory"); - return; - } - - if (!vhost_user_init(&gpio->vhost_user, &gpio->chardev, errp)) { - return; - } - - virtio_init(vdev, VIRTIO_ID_GPIO, sizeof(gpio->config)); - - gpio->command_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); - gpio->interrupt_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); - gpio->vhost_vqs = g_new0(struct vhost_virtqueue, VHOST_NVQS); - - gpio->connected = false; - - qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, vu_gpio_event, NULL, - dev, NULL, true); - - retries = VU_REALIZE_CONN_RETRIES; - g_assert(!*errp); - do { - if (*errp) { - error_prepend(errp, "Reconnecting after error: "); - error_report_err(*errp); - *errp = NULL; - } - ret = vu_gpio_realize_connect(gpio, errp); - } while (ret < 0 && retries--); - - if (ret < 0) { - do_vhost_user_cleanup(vdev, gpio); - } - - return; -} - -static void vu_gpio_device_unrealize(DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserGPIO *gpio = VHOST_USER_GPIO(dev); - - vu_gpio_set_status(vdev, 0); - qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, NULL, NULL, NULL, NULL, - false); - vhost_dev_cleanup(&gpio->vhost_dev); - do_vhost_user_cleanup(vdev, gpio); + vubc->parent_realize(dev, errp); } static const VMStateDescription vu_gpio_vmstate = { @@ -400,31 +37,21 @@ static const VMStateDescription vu_gpio_vmstate = { .unmigratable = 1, }; -static Property vu_gpio_properties[] = { - DEFINE_PROP_CHR("chardev", VHostUserGPIO, chardev), - DEFINE_PROP_END_OF_LIST(), -}; - static void vu_gpio_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass); - device_class_set_props(dc, vu_gpio_properties); dc->vmsd = &vu_gpio_vmstate; + device_class_set_props(dc, vgpio_properties); + device_class_set_parent_realize(dc, vgpio_realize, + &vubc->parent_realize); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); - vdc->realize = vu_gpio_device_realize; - vdc->unrealize = vu_gpio_device_unrealize; - vdc->get_features = vu_gpio_get_features; - vdc->get_config = vu_gpio_get_config; - vdc->set_status = vu_gpio_set_status; - vdc->guest_notifier_mask = vu_gpio_guest_notifier_mask; - vdc->get_vhost = vu_gpio_get_vhost; } static const TypeInfo vu_gpio_info = { .name = TYPE_VHOST_USER_GPIO, - .parent = TYPE_VIRTIO_DEVICE, + .parent = TYPE_VHOST_USER_BASE, .instance_size = sizeof(VHostUserGPIO), .class_init = vu_gpio_class_init, }; diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c index 4eef3f06337..a464f5e0399 100644 --- a/hw/virtio/vhost-user-i2c.c +++ b/hw/virtio/vhost-user-i2c.c @@ -14,253 +14,22 @@ #include "qemu/error-report.h" #include "standard-headers/linux/virtio_ids.h" -static const int feature_bits[] = { - VIRTIO_I2C_F_ZERO_LENGTH_REQUEST, - VIRTIO_F_RING_RESET, - VHOST_INVALID_FEATURE_BIT +static Property vi2c_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), + DEFINE_PROP_END_OF_LIST(), }; -static void vu_i2c_start(VirtIODevice *vdev) -{ - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - VHostUserI2C *i2c = VHOST_USER_I2C(vdev); - int ret, i; - - if (!k->set_guest_notifiers) { - error_report("binding does not support guest notifiers"); - return; - } - - ret = vhost_dev_enable_notifiers(&i2c->vhost_dev, vdev); - if (ret < 0) { - error_report("Error enabling host notifiers: %d", -ret); - return; - } - - ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, true); - if (ret < 0) { - error_report("Error binding guest notifier: %d", -ret); - goto err_host_notifiers; - } - - i2c->vhost_dev.acked_features = vdev->guest_features; - - ret = vhost_dev_start(&i2c->vhost_dev, vdev, true); - if (ret < 0) { - error_report("Error starting vhost-user-i2c: %d", -ret); - goto err_guest_notifiers; - } - - /* - * guest_notifier_mask/pending not used yet, so just unmask - * everything here. virtio-pci will do the right thing by - * enabling/disabling irqfd. - */ - for (i = 0; i < i2c->vhost_dev.nvqs; i++) { - vhost_virtqueue_mask(&i2c->vhost_dev, vdev, i, false); - } - - return; - -err_guest_notifiers: - k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false); -err_host_notifiers: - vhost_dev_disable_notifiers(&i2c->vhost_dev, vdev); -} - -static void vu_i2c_stop(VirtIODevice *vdev) -{ - VHostUserI2C *i2c = VHOST_USER_I2C(vdev); - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - int ret; - - if (!k->set_guest_notifiers) { - return; - } - - vhost_dev_stop(&i2c->vhost_dev, vdev, true); - - ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false); - if (ret < 0) { - error_report("vhost guest notifier cleanup failed: %d", ret); - return; - } - - vhost_dev_disable_notifiers(&i2c->vhost_dev, vdev); -} - -static void vu_i2c_set_status(VirtIODevice *vdev, uint8_t status) -{ - VHostUserI2C *i2c = VHOST_USER_I2C(vdev); - bool should_start = virtio_device_should_start(vdev, status); - - if (vhost_dev_is_started(&i2c->vhost_dev) == should_start) { - return; - } - - if (should_start) { - vu_i2c_start(vdev); - } else { - vu_i2c_stop(vdev); - } -} - -static uint64_t vu_i2c_get_features(VirtIODevice *vdev, - uint64_t requested_features, Error **errp) -{ - VHostUserI2C *i2c = VHOST_USER_I2C(vdev); - - virtio_add_feature(&requested_features, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST); - return vhost_get_features(&i2c->vhost_dev, feature_bits, requested_features); -} - -static void vu_i2c_handle_output(VirtIODevice *vdev, VirtQueue *vq) -{ - /* - * Not normally called; it's the daemon that handles the queue; - * however virtio's cleanup path can call this. - */ -} - -static void vu_i2c_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) -{ - VHostUserI2C *i2c = VHOST_USER_I2C(vdev); - - /* - * We don't support interrupts, return early if index is set to - * VIRTIO_CONFIG_IRQ_IDX. - */ - if (idx == VIRTIO_CONFIG_IRQ_IDX) { - return; - } - - vhost_virtqueue_mask(&i2c->vhost_dev, vdev, idx, mask); -} - -static bool vu_i2c_guest_notifier_pending(VirtIODevice *vdev, int idx) -{ - VHostUserI2C *i2c = VHOST_USER_I2C(vdev); - - /* - * We don't support interrupts, return early if index is set to - * VIRTIO_CONFIG_IRQ_IDX. - */ - if (idx == VIRTIO_CONFIG_IRQ_IDX) { - return false; - } - - return vhost_virtqueue_pending(&i2c->vhost_dev, idx); -} - -static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserI2C *i2c) -{ - vhost_user_cleanup(&i2c->vhost_user); - virtio_delete_queue(i2c->vq); - virtio_cleanup(vdev); -} - -static int vu_i2c_connect(DeviceState *dev) +static void vi2c_realize(DeviceState *dev, Error **errp) { - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + VHostUserBase *vub = VHOST_USER_BASE(dev); + VHostUserBaseClass *vubc = VHOST_USER_BASE_GET_CLASS(dev); - if (i2c->connected) { - return 0; - } - i2c->connected = true; + /* Fixed for I2C */ + vub->virtio_id = VIRTIO_ID_I2C_ADAPTER; + vub->num_vqs = 1; + vub->vq_size = 4; - /* restore vhost state */ - if (virtio_device_started(vdev, vdev->status)) { - vu_i2c_start(vdev); - } - - return 0; -} - -static void vu_i2c_disconnect(DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserI2C *i2c = VHOST_USER_I2C(vdev); - - if (!i2c->connected) { - return; - } - i2c->connected = false; - - if (vhost_dev_is_started(&i2c->vhost_dev)) { - vu_i2c_stop(vdev); - } -} - -static void vu_i2c_event(void *opaque, QEMUChrEvent event) -{ - DeviceState *dev = opaque; - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserI2C *i2c = VHOST_USER_I2C(vdev); - - switch (event) { - case CHR_EVENT_OPENED: - if (vu_i2c_connect(dev) < 0) { - qemu_chr_fe_disconnect(&i2c->chardev); - return; - } - break; - case CHR_EVENT_CLOSED: - vu_i2c_disconnect(dev); - break; - case CHR_EVENT_BREAK: - case CHR_EVENT_MUX_IN: - case CHR_EVENT_MUX_OUT: - /* Ignore */ - break; - } -} - -static void vu_i2c_device_realize(DeviceState *dev, Error **errp) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserI2C *i2c = VHOST_USER_I2C(dev); - int ret; - - if (!i2c->chardev.chr) { - error_setg(errp, "vhost-user-i2c: missing chardev"); - return; - } - - if (!vhost_user_init(&i2c->vhost_user, &i2c->chardev, errp)) { - return; - } - - virtio_init(vdev, VIRTIO_ID_I2C_ADAPTER, 0); - - i2c->vhost_dev.nvqs = 1; - i2c->vq = virtio_add_queue(vdev, 4, vu_i2c_handle_output); - i2c->vhost_dev.vqs = g_new0(struct vhost_virtqueue, i2c->vhost_dev.nvqs); - - ret = vhost_dev_init(&i2c->vhost_dev, &i2c->vhost_user, - VHOST_BACKEND_TYPE_USER, 0, errp); - if (ret < 0) { - g_free(i2c->vhost_dev.vqs); - do_vhost_user_cleanup(vdev, i2c); - } - - qemu_chr_fe_set_handlers(&i2c->chardev, NULL, NULL, vu_i2c_event, NULL, - dev, NULL, true); -} - -static void vu_i2c_device_unrealize(DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserI2C *i2c = VHOST_USER_I2C(dev); - struct vhost_virtqueue *vhost_vqs = i2c->vhost_dev.vqs; - - /* This will stop vhost backend if appropriate. */ - vu_i2c_set_status(vdev, 0); - vhost_dev_cleanup(&i2c->vhost_dev); - g_free(vhost_vqs); - do_vhost_user_cleanup(vdev, i2c); + vubc->parent_realize(dev, errp); } static const VMStateDescription vu_i2c_vmstate = { @@ -268,30 +37,21 @@ static const VMStateDescription vu_i2c_vmstate = { .unmigratable = 1, }; -static Property vu_i2c_properties[] = { - DEFINE_PROP_CHR("chardev", VHostUserI2C, chardev), - DEFINE_PROP_END_OF_LIST(), -}; - static void vu_i2c_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass); - device_class_set_props(dc, vu_i2c_properties); dc->vmsd = &vu_i2c_vmstate; + device_class_set_props(dc, vi2c_properties); + device_class_set_parent_realize(dc, vi2c_realize, + &vubc->parent_realize); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); - vdc->realize = vu_i2c_device_realize; - vdc->unrealize = vu_i2c_device_unrealize; - vdc->get_features = vu_i2c_get_features; - vdc->set_status = vu_i2c_set_status; - vdc->guest_notifier_mask = vu_i2c_guest_notifier_mask; - vdc->guest_notifier_pending = vu_i2c_guest_notifier_pending; } static const TypeInfo vu_i2c_info = { .name = TYPE_VHOST_USER_I2C, - .parent = TYPE_VIRTIO_DEVICE, + .parent = TYPE_VHOST_USER_BASE, .instance_size = sizeof(VHostUserI2C), .class_init = vu_i2c_class_init, }; diff --git a/hw/virtio/vhost-user-input-pci.c b/hw/virtio/vhost-user-input-pci.c index b858898a363..3f4761ce88a 100644 --- a/hw/virtio/vhost-user-input-pci.c +++ b/hw/virtio/vhost-user-input-pci.c @@ -30,9 +30,6 @@ static void vhost_user_input_pci_instance_init(Object *obj) virtio_instance_init_common(obj, &dev->vhi, sizeof(dev->vhi), TYPE_VHOST_USER_INPUT); - - object_property_add_alias(obj, "chardev", - OBJECT(&dev->vhi), "chardev"); } static const VirtioPCIDeviceTypeInfo vhost_user_input_pci_info = { diff --git a/hw/virtio/vhost-user-input.c b/hw/virtio/vhost-user-input.c new file mode 100644 index 00000000000..bedec0468c3 --- /dev/null +++ b/hw/virtio/vhost-user-input.c @@ -0,0 +1,58 @@ +/* + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/virtio/virtio-input.h" + +static Property vinput_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vinput_realize(DeviceState *dev, Error **errp) +{ + VHostUserBase *vub = VHOST_USER_BASE(dev); + VHostUserBaseClass *vubc = VHOST_USER_BASE_GET_CLASS(dev); + + /* Fixed for input device */ + vub->virtio_id = VIRTIO_ID_INPUT; + vub->num_vqs = 2; + vub->vq_size = 4; + vub->config_size = sizeof(virtio_input_config); + + vubc->parent_realize(dev, errp); +} + +static const VMStateDescription vmstate_vhost_input = { + .name = "vhost-user-input", + .unmigratable = 1, +}; + +static void vhost_input_class_init(ObjectClass *klass, void *data) +{ + VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_vhost_input; + device_class_set_props(dc, vinput_properties); + device_class_set_parent_realize(dc, vinput_realize, + &vubc->parent_realize); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo vhost_input_info = { + .name = TYPE_VHOST_USER_INPUT, + .parent = TYPE_VHOST_USER_BASE, + .instance_size = sizeof(VHostUserInput), + .class_init = vhost_input_class_init, +}; + +static void vhost_input_register_types(void) +{ + type_register_static(&vhost_input_info); +} + +type_init(vhost_input_register_types) diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c index efc54cd3fb1..01879c863df 100644 --- a/hw/virtio/vhost-user-rng.c +++ b/hw/virtio/vhost-user-rng.c @@ -3,7 +3,7 @@ * * Copyright (c) 2021 Mathieu Poirier * - * Implementation seriously tailored on vhost-user-i2c.c + * Simple wrapper of the generic vhost-user-device. * * SPDX-License-Identifier: GPL-2.0-or-later */ @@ -13,281 +13,47 @@ #include "hw/qdev-properties.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/vhost-user-rng.h" -#include "qemu/error-report.h" #include "standard-headers/linux/virtio_ids.h" -static const int feature_bits[] = { - VIRTIO_F_RING_RESET, - VHOST_INVALID_FEATURE_BIT -}; - -static void vu_rng_start(VirtIODevice *vdev) -{ - VHostUserRNG *rng = VHOST_USER_RNG(vdev); - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - int ret; - int i; - - if (!k->set_guest_notifiers) { - error_report("binding does not support guest notifiers"); - return; - } - - ret = vhost_dev_enable_notifiers(&rng->vhost_dev, vdev); - if (ret < 0) { - error_report("Error enabling host notifiers: %d", -ret); - return; - } - - ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, true); - if (ret < 0) { - error_report("Error binding guest notifier: %d", -ret); - goto err_host_notifiers; - } - - rng->vhost_dev.acked_features = vdev->guest_features; - ret = vhost_dev_start(&rng->vhost_dev, vdev, true); - if (ret < 0) { - error_report("Error starting vhost-user-rng: %d", -ret); - goto err_guest_notifiers; - } - - /* - * guest_notifier_mask/pending not used yet, so just unmask - * everything here. virtio-pci will do the right thing by - * enabling/disabling irqfd. - */ - for (i = 0; i < rng->vhost_dev.nvqs; i++) { - vhost_virtqueue_mask(&rng->vhost_dev, vdev, i, false); - } - - return; - -err_guest_notifiers: - k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false); -err_host_notifiers: - vhost_dev_disable_notifiers(&rng->vhost_dev, vdev); -} - -static void vu_rng_stop(VirtIODevice *vdev) -{ - VHostUserRNG *rng = VHOST_USER_RNG(vdev); - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); - VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - int ret; - - if (!k->set_guest_notifiers) { - return; - } - - vhost_dev_stop(&rng->vhost_dev, vdev, true); - - ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false); - if (ret < 0) { - error_report("vhost guest notifier cleanup failed: %d", ret); - return; - } - - vhost_dev_disable_notifiers(&rng->vhost_dev, vdev); -} - -static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status) -{ - VHostUserRNG *rng = VHOST_USER_RNG(vdev); - bool should_start = virtio_device_should_start(vdev, status); - - if (vhost_dev_is_started(&rng->vhost_dev) == should_start) { - return; - } - - if (should_start) { - vu_rng_start(vdev); - } else { - vu_rng_stop(vdev); - } -} - -static uint64_t vu_rng_get_features(VirtIODevice *vdev, - uint64_t requested_features, Error **errp) -{ - VHostUserRNG *rng = VHOST_USER_RNG(vdev); - - return vhost_get_features(&rng->vhost_dev, feature_bits, - requested_features); -} - -static void vu_rng_handle_output(VirtIODevice *vdev, VirtQueue *vq) -{ - /* - * Not normally called; it's the daemon that handles the queue; - * however virtio's cleanup path can call this. - */ -} - -static void vu_rng_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) -{ - VHostUserRNG *rng = VHOST_USER_RNG(vdev); - - vhost_virtqueue_mask(&rng->vhost_dev, vdev, idx, mask); -} - -static bool vu_rng_guest_notifier_pending(VirtIODevice *vdev, int idx) -{ - VHostUserRNG *rng = VHOST_USER_RNG(vdev); - - return vhost_virtqueue_pending(&rng->vhost_dev, idx); -} - -static void vu_rng_connect(DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserRNG *rng = VHOST_USER_RNG(vdev); - - if (rng->connected) { - return; - } - - rng->connected = true; - - /* restore vhost state */ - if (virtio_device_started(vdev, vdev->status)) { - vu_rng_start(vdev); - } -} - -static void vu_rng_disconnect(DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserRNG *rng = VHOST_USER_RNG(vdev); - - if (!rng->connected) { - return; - } - - rng->connected = false; - - if (vhost_dev_is_started(&rng->vhost_dev)) { - vu_rng_stop(vdev); - } -} - -static void vu_rng_event(void *opaque, QEMUChrEvent event) -{ - DeviceState *dev = opaque; - - switch (event) { - case CHR_EVENT_OPENED: - vu_rng_connect(dev); - break; - case CHR_EVENT_CLOSED: - vu_rng_disconnect(dev); - break; - case CHR_EVENT_BREAK: - case CHR_EVENT_MUX_IN: - case CHR_EVENT_MUX_OUT: - /* Ignore */ - break; - } -} - -static void vu_rng_device_realize(DeviceState *dev, Error **errp) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserRNG *rng = VHOST_USER_RNG(dev); - int ret; - - if (!rng->chardev.chr) { - error_setg(errp, "missing chardev"); - return; - } - - if (!vhost_user_init(&rng->vhost_user, &rng->chardev, errp)) { - return; - } - - virtio_init(vdev, VIRTIO_ID_RNG, 0); - - rng->req_vq = virtio_add_queue(vdev, 4, vu_rng_handle_output); - if (!rng->req_vq) { - error_setg_errno(errp, -1, "virtio_add_queue() failed"); - goto virtio_add_queue_failed; - } - - rng->vhost_dev.nvqs = 1; - rng->vhost_dev.vqs = g_new0(struct vhost_virtqueue, rng->vhost_dev.nvqs); - ret = vhost_dev_init(&rng->vhost_dev, &rng->vhost_user, - VHOST_BACKEND_TYPE_USER, 0, errp); - if (ret < 0) { - error_setg_errno(errp, -ret, "vhost_dev_init() failed"); - goto vhost_dev_init_failed; - } - - qemu_chr_fe_set_handlers(&rng->chardev, NULL, NULL, vu_rng_event, NULL, - dev, NULL, true); - - return; - -vhost_dev_init_failed: - g_free(rng->vhost_dev.vqs); - virtio_delete_queue(rng->req_vq); -virtio_add_queue_failed: - virtio_cleanup(vdev); - vhost_user_cleanup(&rng->vhost_user); -} - -static void vu_rng_device_unrealize(DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(dev); - VHostUserRNG *rng = VHOST_USER_RNG(dev); - struct vhost_virtqueue *vhost_vqs = rng->vhost_dev.vqs; - - vu_rng_set_status(vdev, 0); - - vhost_dev_cleanup(&rng->vhost_dev); - g_free(vhost_vqs); - virtio_delete_queue(rng->req_vq); - virtio_cleanup(vdev); - vhost_user_cleanup(&rng->vhost_user); -} - -static struct vhost_dev *vu_rng_get_vhost(VirtIODevice *vdev) -{ - VHostUserRNG *rng = VHOST_USER_RNG(vdev); - return &rng->vhost_dev; -} - static const VMStateDescription vu_rng_vmstate = { .name = "vhost-user-rng", .unmigratable = 1, }; -static Property vu_rng_properties[] = { - DEFINE_PROP_CHR("chardev", VHostUserRNG, chardev), +static Property vrng_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), DEFINE_PROP_END_OF_LIST(), }; +static void vu_rng_base_realize(DeviceState *dev, Error **errp) +{ + VHostUserBase *vub = VHOST_USER_BASE(dev); + VHostUserBaseClass *vubs = VHOST_USER_BASE_GET_CLASS(dev); + + /* Fixed for RNG */ + vub->virtio_id = VIRTIO_ID_RNG; + vub->num_vqs = 1; + vub->vq_size = 4; + + vubs->parent_realize(dev, errp); +} + static void vu_rng_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass); - device_class_set_props(dc, vu_rng_properties); dc->vmsd = &vu_rng_vmstate; - set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + device_class_set_props(dc, vrng_properties); + device_class_set_parent_realize(dc, vu_rng_base_realize, + &vubc->parent_realize); - vdc->realize = vu_rng_device_realize; - vdc->unrealize = vu_rng_device_unrealize; - vdc->get_features = vu_rng_get_features; - vdc->set_status = vu_rng_set_status; - vdc->guest_notifier_mask = vu_rng_guest_notifier_mask; - vdc->guest_notifier_pending = vu_rng_guest_notifier_pending; - vdc->get_vhost = vu_rng_get_vhost; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo vu_rng_info = { .name = TYPE_VHOST_USER_RNG, - .parent = TYPE_VIRTIO_DEVICE, + .parent = TYPE_VHOST_USER_BASE, .instance_size = sizeof(VHostUserRNG), .class_init = vu_rng_class_init, }; diff --git a/hw/virtio/vhost-user-scmi.c b/hw/virtio/vhost-user-scmi.c index 918bb7dcf7d..300847e6726 100644 --- a/hw/virtio/vhost-user-scmi.c +++ b/hw/virtio/vhost-user-scmi.c @@ -56,9 +56,9 @@ static int vu_scmi_start(VirtIODevice *vdev) goto err_host_notifiers; } - vhost_ack_features(&scmi->vhost_dev, feature_bits, vdev->guest_features); + vhost_ack_features(vhost_dev, feature_bits, vdev->guest_features); - ret = vhost_dev_start(&scmi->vhost_dev, vdev, true); + ret = vhost_dev_start(vhost_dev, vdev, true); if (ret < 0) { error_report("Error starting vhost-user-scmi: %d", ret); goto err_guest_notifiers; @@ -71,7 +71,7 @@ static int vu_scmi_start(VirtIODevice *vdev) * enabling/disabling irqfd. */ for (i = 0; i < scmi->vhost_dev.nvqs; i++) { - vhost_virtqueue_mask(&scmi->vhost_dev, vdev, i, false); + vhost_virtqueue_mask(vhost_dev, vdev, i, false); } return 0; diff --git a/hw/virtio/vhost-user-snd-pci.c b/hw/virtio/vhost-user-snd-pci.c new file mode 100644 index 00000000000..d61cfdae631 --- /dev/null +++ b/hw/virtio/vhost-user-snd-pci.c @@ -0,0 +1,75 @@ +/* + * Vhost-user Sound virtio device PCI glue + * + * Copyright (c) 2023 Manos Pitsidianakis + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-user-snd.h" +#include "hw/virtio/virtio-pci.h" + +struct VHostUserSoundPCI { + VirtIOPCIProxy parent_obj; + VHostUserSound vdev; +}; + +typedef struct VHostUserSoundPCI VHostUserSoundPCI; + +#define TYPE_VHOST_USER_SND_PCI "vhost-user-snd-pci-base" + +DECLARE_INSTANCE_CHECKER(VHostUserSoundPCI, VHOST_USER_SND_PCI, + TYPE_VHOST_USER_SND_PCI) + +static Property vhost_user_snd_pci_properties[] = { + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_user_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserSoundPCI *dev = VHOST_USER_SND_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + vpci_dev->nvectors = 1; + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_snd_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_snd_pci_realize; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + device_class_set_props(dc, vhost_user_snd_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */ + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_MULTIMEDIA_AUDIO; +} + +static void vhost_user_snd_pci_instance_init(Object *obj) +{ + VHostUserSoundPCI *dev = VHOST_USER_SND_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_SND); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_snd_pci_info = { + .base_name = TYPE_VHOST_USER_SND_PCI, + .non_transitional_name = "vhost-user-snd-pci", + .instance_size = sizeof(VHostUserSoundPCI), + .instance_init = vhost_user_snd_pci_instance_init, + .class_init = vhost_user_snd_pci_class_init, +}; + +static void vhost_user_snd_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_snd_pci_info); +} + +type_init(vhost_user_snd_pci_register); diff --git a/hw/virtio/vhost-user-snd.c b/hw/virtio/vhost-user-snd.c new file mode 100644 index 00000000000..9a217543f85 --- /dev/null +++ b/hw/virtio/vhost-user-snd.c @@ -0,0 +1,67 @@ +/* + * Vhost-user snd virtio device + * + * Copyright (c) 2023 Manos Pitsidianakis + * + * Simple wrapper of the generic vhost-user-device. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/vhost-user-snd.h" +#include "standard-headers/linux/virtio_ids.h" +#include "standard-headers/linux/virtio_snd.h" + +static const VMStateDescription vu_snd_vmstate = { + .name = "vhost-user-snd", + .unmigratable = 1, +}; + +static Property vsnd_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vu_snd_base_realize(DeviceState *dev, Error **errp) +{ + VHostUserBase *vub = VHOST_USER_BASE(dev); + VHostUserBaseClass *vubs = VHOST_USER_BASE_GET_CLASS(dev); + + vub->virtio_id = VIRTIO_ID_SOUND; + vub->num_vqs = 4; + vub->config_size = sizeof(struct virtio_snd_config); + vub->vq_size = 64; + + vubs->parent_realize(dev, errp); +} + +static void vu_snd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass); + + dc->vmsd = &vu_snd_vmstate; + device_class_set_props(dc, vsnd_properties); + device_class_set_parent_realize(dc, vu_snd_base_realize, + &vubc->parent_realize); + + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); +} + +static const TypeInfo vu_snd_info = { + .name = TYPE_VHOST_USER_SND, + .parent = TYPE_VHOST_USER_BASE, + .instance_size = sizeof(VHostUserSound), + .class_init = vu_snd_class_init, +}; + +static void vu_snd_register_types(void) +{ + type_register_static(&vu_snd_info); +} + +type_init(vu_snd_register_types) diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index f214df804b2..cdf9af4a4bd 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -26,7 +26,6 @@ #include "qemu/sockets.h" #include "sysemu/runstate.h" #include "sysemu/cryptodev.h" -#include "migration/migration.h" #include "migration/postcopy-ram.h" #include "trace.h" #include "exec/ramblock.h" @@ -1611,11 +1610,27 @@ vhost_user_backend_handle_shared_object_add(struct vhost_dev *dev, } static int -vhost_user_backend_handle_shared_object_remove(VhostUserShared *object) +vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev, + VhostUserShared *object) { QemuUUID uuid; memcpy(uuid.data, object->uuid, sizeof(object->uuid)); + switch (virtio_object_type(&uuid)) { + case TYPE_VHOST_DEV: + { + struct vhost_dev *owner = virtio_lookup_vhost_device(&uuid); + if (dev != owner) { + /* Not allowed to remove non-owned entries */ + return 0; + } + break; + } + default: + /* Not allowed to remove non-owned entries */ + return 0; + } + return virtio_remove_resource(&uuid); } @@ -1794,7 +1809,8 @@ static gboolean backend_read(QIOChannel *ioc, GIOCondition condition, ret = vhost_user_backend_handle_shared_object_add(dev, &payload.object); break; case VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE: - ret = vhost_user_backend_handle_shared_object_remove(&payload.object); + ret = vhost_user_backend_handle_shared_object_remove(dev, + &payload.object); break; case VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP: ret = vhost_user_backend_handle_shared_object_lookup(dev->opaque, ioc, @@ -2084,7 +2100,7 @@ static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp) } static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier, - void *opaque) + void *opaque, Error **errp) { struct PostcopyNotifyData *pnd = opaque; struct vhost_user *u = container_of(notifier, struct vhost_user, @@ -2096,20 +2112,20 @@ static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier, if (!virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_PAGEFAULT)) { /* TODO: Get the device name into this error somehow */ - error_setg(pnd->errp, + error_setg(errp, "vhost-user backend not capable of postcopy"); return -ENOENT; } break; case POSTCOPY_NOTIFY_INBOUND_ADVISE: - return vhost_user_postcopy_advise(dev, pnd->errp); + return vhost_user_postcopy_advise(dev, errp); case POSTCOPY_NOTIFY_INBOUND_LISTEN: - return vhost_user_postcopy_listen(dev, pnd->errp); + return vhost_user_postcopy_listen(dev, errp); case POSTCOPY_NOTIFY_INBOUND_END: - return vhost_user_postcopy_end(dev, pnd->errp); + return vhost_user_postcopy_end(dev, errp); default: /* We ignore notifications we don't know */ diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 819b2d811af..e827b9175fc 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -47,12 +47,17 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, int page_mask) { Int128 llend; + bool is_ram = memory_region_is_ram(section->mr); + bool is_iommu = memory_region_is_iommu(section->mr); + bool is_protected = memory_region_is_protected(section->mr); - if ((!memory_region_is_ram(section->mr) && - !memory_region_is_iommu(section->mr)) || - memory_region_is_protected(section->mr) || - /* vhost-vDPA doesn't allow MMIO to be mapped */ - memory_region_is_ram_device(section->mr)) { + /* vhost-vDPA doesn't allow MMIO to be mapped */ + bool is_ram_device = memory_region_is_ram_device(section->mr); + + if ((!is_ram && !is_iommu) || is_protected || is_ram_device) { + trace_vhost_vdpa_skipped_memory_section(is_ram, is_iommu, is_protected, + is_ram_device, iova_min, + iova_max, page_mask); return true; } @@ -69,7 +74,7 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, * size that maps to the kernel */ - if (!memory_region_is_iommu(section->mr)) { + if (!is_iommu) { llend = vhost_vdpa_section_end(section, page_mask); if (int128_gt(llend, int128_make64(iova_max))) { error_report("RAM section out of device range (max=0x%" PRIx64 @@ -86,14 +91,14 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, * The caller must set asid = 0 if the device does not support asid. * This is not an ABI break since it is set to 0 by the initializer anyway. */ -int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, +int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova, hwaddr size, void *vaddr, bool readonly) { struct vhost_msg_v2 msg = {}; - int fd = v->device_fd; + int fd = s->device_fd; int ret = 0; - msg.type = v->msg_type; + msg.type = VHOST_IOTLB_MSG_V2; msg.asid = asid; msg.iotlb.iova = iova; msg.iotlb.size = size; @@ -101,7 +106,7 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW; msg.iotlb.type = VHOST_IOTLB_UPDATE; - trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova, + trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova, msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type); @@ -118,20 +123,20 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, * The caller must set asid = 0 if the device does not support asid. * This is not an ABI break since it is set to 0 by the initializer anyway. */ -int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, +int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova, hwaddr size) { struct vhost_msg_v2 msg = {}; - int fd = v->device_fd; + int fd = s->device_fd; int ret = 0; - msg.type = v->msg_type; + msg.type = VHOST_IOTLB_MSG_V2; msg.asid = asid; msg.iotlb.iova = iova; msg.iotlb.size = size; msg.iotlb.type = VHOST_IOTLB_INVALIDATE; - trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova, + trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova, msg.iotlb.size, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { @@ -143,56 +148,55 @@ int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, return ret; } -static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) +static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s) { - int fd = v->device_fd; + int fd = s->device_fd; struct vhost_msg_v2 msg = { - .type = v->msg_type, + .type = VHOST_IOTLB_MSG_V2, .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, }; - trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type); + trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); } } -static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v) +static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s) { - if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && - !v->iotlb_batch_begin_sent) { - vhost_vdpa_listener_begin_batch(v); + if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && + !s->iotlb_batch_begin_sent) { + vhost_vdpa_listener_begin_batch(s); } - v->iotlb_batch_begin_sent = true; + s->iotlb_batch_begin_sent = true; } static void vhost_vdpa_listener_commit(MemoryListener *listener) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); - struct vhost_dev *dev = v->dev; + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); struct vhost_msg_v2 msg = {}; - int fd = v->device_fd; + int fd = s->device_fd; - if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { + if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { return; } - if (!v->iotlb_batch_begin_sent) { + if (!s->iotlb_batch_begin_sent) { return; } - msg.type = v->msg_type; + msg.type = VHOST_IOTLB_MSG_V2; msg.iotlb.type = VHOST_IOTLB_BATCH_END; - trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type); + trace_vhost_vdpa_listener_commit(s, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); } - v->iotlb_batch_begin_sent = false; + s->iotlb_batch_begin_sent = false; } static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) @@ -200,7 +204,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n); hwaddr iova = iotlb->iova + iommu->iommu_offset; - struct vhost_vdpa *v = iommu->dev; + VhostVDPAShared *s = iommu->dev_shared; void *vaddr; int ret; Int128 llend; @@ -213,10 +217,10 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) RCU_READ_LOCK_GUARD(); /* check if RAM section out of device range */ llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova)); - if (int128_gt(llend, int128_make64(v->iova_range.last))) { + if (int128_gt(llend, int128_make64(s->iova_range.last))) { error_report("RAM section out of device range (max=0x%" PRIx64 ", end addr=0x%" PRIx64 ")", - v->iova_range.last, int128_get64(llend)); + s->iova_range.last, int128_get64(llend)); return; } @@ -226,20 +230,20 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) { return; } - ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova, iotlb->addr_mask + 1, vaddr, read_only); if (ret) { error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ", %p) = %d (%m)", - v, iova, iotlb->addr_mask + 1, vaddr, ret); + s, iova, iotlb->addr_mask + 1, vaddr, ret); } } else { - ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, iotlb->addr_mask + 1); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ") = %d (%m)", - v, iova, iotlb->addr_mask + 1, ret); + s, iova, iotlb->addr_mask + 1, ret); } } } @@ -247,7 +251,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) static void vhost_vdpa_iommu_region_add(MemoryListener *listener, MemoryRegionSection *section) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); struct vdpa_iommu *iommu; Int128 end; @@ -271,7 +275,7 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener, iommu_idx); iommu->iommu_offset = section->offset_within_address_space - section->offset_within_region; - iommu->dev = v; + iommu->dev_shared = s; ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL); if (ret) { @@ -279,7 +283,7 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener, return; } - QLIST_INSERT_HEAD(&v->iommu_list, iommu, iommu_next); + QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next); memory_region_iommu_replay(iommu->iommu_mr, &iommu->n); return; @@ -288,11 +292,11 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener, static void vhost_vdpa_iommu_region_del(MemoryListener *listener, MemoryRegionSection *section) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); struct vdpa_iommu *iommu; - QLIST_FOREACH(iommu, &v->iommu_list, iommu_next) + QLIST_FOREACH(iommu, &s->iommu_list, iommu_next) { if (MEMORY_REGION(iommu->iommu_mr) == section->mr && iommu->n.start == section->offset_within_region) { @@ -308,7 +312,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { DMAMap mem_region = {}; - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); hwaddr iova; Int128 llend, llsize; void *vaddr; @@ -316,8 +320,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, int page_size = qemu_target_page_size(); int page_mask = -page_size; - if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, - v->iova_range.last, page_mask)) { + if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first, + s->iova_range.last, page_mask)) { return; } if (memory_region_is_iommu(section->mr)) { @@ -327,7 +331,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, if (unlikely((section->offset_within_address_space & ~page_mask) != (section->offset_within_region & ~page_mask))) { - trace_vhost_vdpa_listener_region_add_unaligned(v, section->mr->name, + trace_vhost_vdpa_listener_region_add_unaligned(s, section->mr->name, section->offset_within_address_space & ~page_mask, section->offset_within_region & ~page_mask); return; @@ -347,18 +351,18 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, section->offset_within_region + (iova - section->offset_within_address_space); - trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend), + trace_vhost_vdpa_listener_region_add(s, iova, int128_get64(llend), vaddr, section->readonly); llsize = int128_sub(llend, int128_make64(iova)); - if (v->shadow_data) { + if (s->shadow_data) { int r; mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, mem_region.size = int128_get64(llsize) - 1, mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), - r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region); + r = vhost_iova_tree_map_alloc(s->iova_tree, &mem_region); if (unlikely(r != IOVA_OK)) { error_report("Can't allocate a mapping (%d)", r); goto fail; @@ -367,8 +371,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, iova = mem_region.iova; } - vhost_vdpa_iotlb_batch_begin_once(v); - ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, + vhost_vdpa_iotlb_batch_begin_once(s); + ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize), vaddr, section->readonly); if (ret) { error_report("vhost vdpa map fail!"); @@ -378,8 +382,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, return; fail_map: - if (v->shadow_data) { - vhost_iova_tree_remove(v->iova_tree, mem_region); + if (s->shadow_data) { + vhost_iova_tree_remove(s->iova_tree, mem_region); } fail: @@ -396,15 +400,15 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, static void vhost_vdpa_listener_region_del(MemoryListener *listener, MemoryRegionSection *section) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); hwaddr iova; Int128 llend, llsize; int ret; int page_size = qemu_target_page_size(); int page_mask = -page_size; - if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, - v->iova_range.last, page_mask)) { + if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first, + s->iova_range.last, page_mask)) { return; } if (memory_region_is_iommu(section->mr)) { @@ -413,7 +417,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, if (unlikely((section->offset_within_address_space & ~page_mask) != (section->offset_within_region & ~page_mask))) { - trace_vhost_vdpa_listener_region_del_unaligned(v, section->mr->name, + trace_vhost_vdpa_listener_region_del_unaligned(s, section->mr->name, section->offset_within_address_space & ~page_mask, section->offset_within_region & ~page_mask); return; @@ -422,7 +426,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, iova = ROUND_UP(section->offset_within_address_space, page_size); llend = vhost_vdpa_section_end(section, page_mask); - trace_vhost_vdpa_listener_region_del(v, iova, + trace_vhost_vdpa_listener_region_del(s, iova, int128_get64(int128_sub(llend, int128_one()))); if (int128_ge(int128_make64(iova), llend)) { @@ -431,7 +435,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, llsize = int128_sub(llend, int128_make64(iova)); - if (v->shadow_data) { + if (s->shadow_data) { const DMAMap *result; const void *vaddr = memory_region_get_ram_ptr(section->mr) + section->offset_within_region + @@ -441,37 +445,37 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, .size = int128_get64(llsize) - 1, }; - result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region); + result = vhost_iova_tree_find_iova(s->iova_tree, &mem_region); if (!result) { /* The memory listener map wasn't mapped */ return; } iova = result->iova; - vhost_iova_tree_remove(v->iova_tree, *result); + vhost_iova_tree_remove(s->iova_tree, *result); } - vhost_vdpa_iotlb_batch_begin_once(v); + vhost_vdpa_iotlb_batch_begin_once(s); /* * The unmap ioctl doesn't accept a full 64-bit. need to check it */ if (int128_eq(llsize, int128_2_64())) { llsize = int128_rshift(llsize, 1); - ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize)); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ") = %d (%m)", - v, iova, int128_get64(llsize), ret); + s, iova, int128_get64(llsize), ret); } iova += int128_get64(llsize); } - ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize)); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ") = %d (%m)", - v, iova, int128_get64(llsize), ret); + s, iova, int128_get64(llsize), ret); } memory_region_unref(section->mr); @@ -492,7 +496,7 @@ static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, void *arg) { struct vhost_vdpa *v = dev->opaque; - int fd = v->device_fd; + int fd = v->shared->device_fd; int ret; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); @@ -511,6 +515,10 @@ static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) if (ret < 0) { return ret; } + if ((s & status) == status) { + /* Don't set bits already set */ + return 0; + } s |= status; @@ -552,6 +560,11 @@ static bool vhost_vdpa_first_dev(struct vhost_dev *dev) return v->index == 0; } +static bool vhost_vdpa_last_dev(struct vhost_dev *dev) +{ + return dev->vq_index + dev->nvqs == dev->vq_index_end; +} + static int vhost_vdpa_get_dev_features(struct vhost_dev *dev, uint64_t *features) { @@ -579,16 +592,14 @@ static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v) static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) { - struct vhost_vdpa *v; + struct vhost_vdpa *v = opaque; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); - trace_vhost_vdpa_init(dev, opaque); + trace_vhost_vdpa_init(dev, v->shared, opaque); int ret; - v = opaque; v->dev = dev; dev->opaque = opaque ; - v->listener = vhost_vdpa_memory_listener; - v->msg_type = VHOST_IOTLB_MSG_V2; + v->shared->listener = vhost_vdpa_memory_listener; vhost_vdpa_init_svq(dev, v); error_propagate(&dev->migration_blocker, v->migration_blocker); @@ -651,7 +662,7 @@ static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) struct vhost_vdpa *v = dev->opaque; VirtIODevice *vdev = dev->vdev; VhostVDPAHostNotifier *n; - int fd = v->device_fd; + int fd = v->shared->device_fd; void *addr; char *name; @@ -748,10 +759,10 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev) trace_vhost_vdpa_cleanup(dev, v); if (vhost_vdpa_first_dev(dev)) { ram_block_discard_disable(false); + memory_listener_unregister(&v->shared->listener); } vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); - memory_listener_unregister(&v->listener); vhost_vdpa_svq_cleanup(dev); dev->opaque = NULL; @@ -828,6 +839,8 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev, static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) { + struct vhost_vdpa *v = dev->opaque; + uint64_t features; uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | @@ -849,6 +862,7 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) } dev->backend_cap = features; + v->shared->backend_cap = features; return 0; } @@ -882,19 +896,41 @@ static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) return idx; } -int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx) +static int vhost_vdpa_set_vring_enable_one(struct vhost_vdpa *v, unsigned idx, + int enable) { struct vhost_dev *dev = v->dev; struct vhost_vring_state state = { .index = idx, - .num = 1, + .num = enable, }; int r = vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state); - trace_vhost_vdpa_set_vring_ready(dev, idx, r); + trace_vhost_vdpa_set_vring_enable_one(dev, idx, enable, r); return r; } +static int vhost_vdpa_set_vring_enable(struct vhost_dev *dev, int enable) +{ + struct vhost_vdpa *v = dev->opaque; + unsigned int i; + int ret; + + for (i = 0; i < dev->nvqs; ++i) { + ret = vhost_vdpa_set_vring_enable_one(v, i, enable); + if (ret < 0) { + return ret; + } + } + + return 0; +} + +int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx) +{ + return vhost_vdpa_set_vring_enable_one(v, idx, 1); +} + static int vhost_vdpa_set_config_call(struct vhost_dev *dev, int fd) { @@ -961,7 +997,10 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { - trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); + struct vhost_vdpa *v = dev->opaque; + + trace_vhost_vdpa_set_dev_vring_base(dev, ring->index, ring->num, + v->shadow_vqs_enabled); return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); } @@ -1059,7 +1098,8 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) const DMAMap needle = { .translated_addr = addr, }; - const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle); + const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree, + &needle); hwaddr size; int r; @@ -1069,13 +1109,14 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) } size = ROUND_UP(result->size, qemu_real_host_page_size()); - r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size); + r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova, + size); if (unlikely(r < 0)) { error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); return; } - vhost_iova_tree_remove(v->iova_tree, *result); + vhost_iova_tree_remove(v->shared->iova_tree, *result); } static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, @@ -1103,19 +1144,19 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, { int r; - r = vhost_iova_tree_map_alloc(v->iova_tree, needle); + r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle); if (unlikely(r != IOVA_OK)) { error_setg(errp, "Cannot allocate iova (%d)", r); return false; } - r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova, + r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova, needle->size + 1, (void *)(uintptr_t)needle->translated_addr, needle->perm == IOMMU_RO); if (unlikely(r != 0)) { error_setg_errno(errp, -r, "Cannot map region to device"); - vhost_iova_tree_remove(v->iova_tree, *needle); + vhost_iova_tree_remove(v->shared->iova_tree, *needle); } return r == 0; @@ -1216,7 +1257,7 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) goto err; } - vhost_svq_start(svq, dev->vdev, vq, v->iova_tree); + vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree); ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); if (unlikely(!ok)) { goto err_map; @@ -1279,7 +1320,7 @@ static void vhost_vdpa_suspend(struct vhost_dev *dev) if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) { trace_vhost_vdpa_suspend(dev); - r = ioctl(v->device_fd, VHOST_VDPA_SUSPEND); + r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND); if (unlikely(r)) { error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno); } else { @@ -1309,7 +1350,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); } - if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + if (!vhost_vdpa_last_dev(dev)) { return 0; } @@ -1319,7 +1360,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) "IOMMU and try again"); return -1; } - memory_listener_register(&v->listener, dev->vdev->dma_as); + memory_listener_register(&v->shared->listener, dev->vdev->dma_as); return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); } @@ -1331,14 +1372,14 @@ static void vhost_vdpa_reset_status(struct vhost_dev *dev) { struct vhost_vdpa *v = dev->opaque; - if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + if (!vhost_vdpa_last_dev(dev)) { return; } vhost_vdpa_reset_device(dev); vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); - memory_listener_unregister(&v->listener); + memory_listener_unregister(&v->shared->listener); } static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, @@ -1401,6 +1442,7 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, if (v->shadow_vqs_enabled) { ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index); + trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, true); return 0; } @@ -1413,7 +1455,7 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, } ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); - trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); + trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, false); return ret; } @@ -1441,7 +1483,15 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, /* Remember last call fd because we can switch to SVQ anytime. */ vhost_svq_set_svq_call_fd(svq, file->fd); - if (v->shadow_vqs_enabled) { + /* + * When SVQ is transitioning to off, shadow_vqs_enabled has + * not been set back to false yet, but the underlying call fd + * will have to switch back to the guest notifier to signal the + * passthrough virtqueues. In other situations, SVQ's own call + * fd shall be used to signal the device model. + */ + if (v->shadow_vqs_enabled && + v->shared->svq_switching != SVQ_TSTATE_DISABLING) { return 0; } @@ -1508,6 +1558,7 @@ const VhostOps vdpa_ops = { .vhost_set_features = vhost_vdpa_set_features, .vhost_reset_device = vhost_vdpa_reset_device, .vhost_get_vq_index = vhost_vdpa_get_vq_index, + .vhost_set_vring_enable = vhost_vdpa_set_vring_enable, .vhost_get_config = vhost_vdpa_get_config, .vhost_set_config = vhost_vdpa_set_config, .vhost_requires_shm_log = NULL, diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c index aa16d584eed..3d4a5a97f48 100644 --- a/hw/virtio/vhost-vsock.c +++ b/hw/virtio/vhost-vsock.c @@ -111,7 +111,7 @@ static const VMStateDescription vmstate_virtio_vhost_vsock = { .name = "virtio-vhost_vsock", .minimum_version_id = VHOST_VSOCK_SAVEVM_VERSION, .version_id = VHOST_VSOCK_SAVEVM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, @@ -121,6 +121,7 @@ static const VMStateDescription vmstate_virtio_vhost_vsock = { static void vhost_vsock_device_realize(DeviceState *dev, Error **errp) { + ERRP_GUARD(); VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(dev); VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostVSock *vsock = VHOST_VSOCK(dev); diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 2c9ac794680..f50180e60e5 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1984,7 +1984,13 @@ static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable) return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable); } -/* Host notifiers must be enabled at this point. */ +/* + * Host notifiers must be enabled at this point. + * + * If @vrings is true, this function will enable all vrings before starting the + * device. If it is false, the vring initialization is left to be done by the + * caller. + */ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) { int i, r; @@ -2199,6 +2205,7 @@ int vhost_check_device_state(struct vhost_dev *dev, Error **errp) int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp) { + ERRP_GUARD(); /* Maximum chunk size in which to transfer the state */ const size_t chunk_size = 1 * 1024 * 1024; g_autofree void *transfer_buf = NULL; @@ -2291,6 +2298,7 @@ int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp) int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp) { + ERRP_GUARD(); size_t transfer_buf_size = 0; g_autofree void *transfer_buf = NULL; g_autoptr(GError) g_err = NULL; diff --git a/hw/virtio/virtio-acpi.c b/hw/virtio/virtio-acpi.c new file mode 100644 index 00000000000..230a6695001 --- /dev/null +++ b/hw/virtio/virtio-acpi.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * virtio ACPI Support + * + */ + +#include "qemu/osdep.h" +#include "hw/virtio/virtio-acpi.h" +#include "hw/acpi/aml-build.h" + +void virtio_acpi_dsdt_add(Aml *scope, const hwaddr base, const hwaddr size, + uint32_t mmio_irq, long int start_index, int num) +{ + hwaddr virtio_base = base; + uint32_t irq = mmio_irq; + long int i; + + for (i = start_index; i < start_index + num; i++) { + Aml *dev = aml_device("VR%02u", (unsigned)i); + aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005"))); + aml_append(dev, aml_name_decl("_UID", aml_int(i))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(virtio_base, size, AML_READ_WRITE)); + aml_append(crs, + aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + virtio_base += size; + irq++; + } +} diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index d004cf29d26..609e39a821f 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -31,8 +31,6 @@ #include "trace.h" #include "qemu/error-report.h" #include "migration/misc.h" -#include "migration/migration.h" -#include "migration/options.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" @@ -633,7 +631,8 @@ static void virtio_balloon_free_page_done(VirtIOBalloon *s) } static int -virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data) +virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data, + Error **errp) { VirtIOBalloon *dev = container_of(n, VirtIOBalloon, free_page_hint_notify); VirtIODevice *vdev = VIRTIO_DEVICE(dev); @@ -817,7 +816,7 @@ static const VMStateDescription vmstate_virtio_balloon_free_page_hint = { .version_id = 1, .minimum_version_id = 1, .needed = virtio_balloon_free_page_support, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(free_page_hint_cmd_id, VirtIOBalloon), VMSTATE_UINT32(free_page_hint_status, VirtIOBalloon), VMSTATE_END_OF_LIST() @@ -829,7 +828,7 @@ static const VMStateDescription vmstate_virtio_balloon_page_poison = { .version_id = 1, .minimum_version_id = 1, .needed = virtio_balloon_page_poison_support, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(poison_val, VirtIOBalloon), VMSTATE_END_OF_LIST() } @@ -840,12 +839,12 @@ static const VMStateDescription vmstate_virtio_balloon_device = { .version_id = 1, .minimum_version_id = 1, .post_load = virtio_balloon_post_load_device, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(num_pages, VirtIOBalloon), VMSTATE_UINT32(actual, VirtIOBalloon), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_balloon_free_page_hint, &vmstate_virtio_balloon_page_poison, NULL @@ -996,7 +995,7 @@ static const VMStateDescription vmstate_virtio_balloon = { .name = "virtio-balloon", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 0e2cc8d5a89..bbe8aa4b99c 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -1080,8 +1080,8 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp) vcrypto->vqs[i].dataq = virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh); vcrypto->vqs[i].dataq_bh = - qemu_bh_new_guarded(virtio_crypto_dataq_bh, &vcrypto->vqs[i], - &dev->mem_reentrancy_guard); + virtio_bh_new_guarded(dev, virtio_crypto_dataq_bh, + &vcrypto->vqs[i]); vcrypto->vqs[i].vcrypto = vcrypto; } @@ -1122,7 +1122,7 @@ static const VMStateDescription vmstate_virtio_crypto = { .unmigratable = 1, .minimum_version_id = VIRTIO_CRYPTO_VM_VERSION, .version_id = VIRTIO_CRYPTO_VM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 90a7ca2d25f..1326c6ec417 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -29,6 +29,7 @@ #include "sysemu/reset.h" #include "sysemu/sysemu.h" #include "qemu/reserved-region.h" +#include "qemu/units.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "trace.h" @@ -1115,8 +1116,8 @@ static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr, } /* - * The default mask (TARGET_PAGE_MASK) is the smallest supported guest granule, - * for example 0xfffffffffffff000. When an assigned device has page size + * The default mask depends on the "granule" property. For example, with + * 4k granule, it is -(4 * KiB). When an assigned device has page size * restrictions due to the hardware IOMMU configuration, apply this restriction * to the mask. */ @@ -1313,8 +1314,32 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) * in vfio realize */ s->config.bypass = s->boot_bypass; - s->config.page_size_mask = qemu_target_page_mask(); - s->config.input_range.end = UINT64_MAX; + if (s->aw_bits < 32 || s->aw_bits > 64) { + error_setg(errp, "aw-bits must be within [32,64]"); + return; + } + s->config.input_range.end = + s->aw_bits == 64 ? UINT64_MAX : BIT_ULL(s->aw_bits) - 1; + + switch (s->granule_mode) { + case GRANULE_MODE_4K: + s->config.page_size_mask = -(4 * KiB); + break; + case GRANULE_MODE_8K: + s->config.page_size_mask = -(8 * KiB); + break; + case GRANULE_MODE_16K: + s->config.page_size_mask = -(16 * KiB); + break; + case GRANULE_MODE_64K: + s->config.page_size_mask = -(64 * KiB); + break; + case GRANULE_MODE_HOST: + s->config.page_size_mask = qemu_real_host_page_mask(); + break; + default: + error_setg(errp, "Unsupported granule mode"); + } s->config.domain_range.end = UINT32_MAX; s->config.probe_size = VIOMMU_PROBE_SIZE; @@ -1399,7 +1424,7 @@ static void virtio_iommu_instance_init(Object *obj) .name = "interval", \ .version_id = 1, \ .minimum_version_id = 1, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT64(low, VirtIOIOMMUInterval), \ VMSTATE_UINT64(high, VirtIOIOMMUInterval), \ VMSTATE_END_OF_LIST() \ @@ -1411,7 +1436,7 @@ static void virtio_iommu_instance_init(Object *obj) .name = "mapping", \ .version_id = 1, \ .minimum_version_id = 1, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\ VMSTATE_UINT32(flags, VirtIOIOMMUMapping), \ VMSTATE_END_OF_LIST() \ @@ -1436,7 +1461,7 @@ static const VMStateDescription vmstate_endpoint = { .name = "endpoint", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, VirtIOIOMMUEndpoint), VMSTATE_END_OF_LIST() } @@ -1447,7 +1472,7 @@ static const VMStateDescription vmstate_domain = { .version_id = 2, .minimum_version_id = 2, .pre_load = domain_preload, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, VirtIOIOMMUDomain), VMSTATE_GTREE_V(mappings, VirtIOIOMMUDomain, 1, vmstate_interval_mapping, @@ -1499,7 +1524,7 @@ static const VMStateDescription vmstate_virtio_iommu_device = { .minimum_version_id = 2, .version_id = 2, .post_load = iommu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 2, &vmstate_domain, VirtIOIOMMUDomain), VMSTATE_UINT8_V(config.bypass, VirtIOIOMMU, 2), @@ -1512,7 +1537,7 @@ static const VMStateDescription vmstate_virtio_iommu = { .minimum_version_id = 2, .priority = MIG_PRI_IOMMU, .version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, @@ -1522,6 +1547,9 @@ static Property virtio_iommu_properties[] = { DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, TYPE_PCI_BUS, PCIBus *), DEFINE_PROP_BOOL("boot-bypass", VirtIOIOMMU, boot_bypass, true), + DEFINE_PROP_GRANULE_MODE("granule", VirtIOIOMMU, granule_mode, + GRANULE_MODE_HOST), + DEFINE_PROP_UINT8("aw-bits", VirtIOIOMMU, aw_bits, 64), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index 75ee38aa46b..ffd119ebacb 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -605,8 +605,7 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa, int fd = memory_region_get_fd(&vmem->memdev->mr); Error *local_err = NULL; - qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err); - if (local_err) { + if (!qemu_prealloc_mem(fd, area, size, 1, NULL, false, &local_err)) { static bool warned; /* @@ -1249,8 +1248,7 @@ static int virtio_mem_prealloc_range_cb(VirtIOMEM *vmem, void *arg, int fd = memory_region_get_fd(&vmem->memdev->mr); Error *local_err = NULL; - qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err); - if (local_err) { + if (!qemu_prealloc_mem(fd, area, size, 1, NULL, false, &local_err)) { error_report_err(local_err); return -ENOMEM; } @@ -1370,7 +1368,7 @@ static const VMStateDescription vmstate_virtio_mem_sanity_checks = { .name = "virtio-mem-device/sanity-checks", .pre_save = virtio_mem_mig_sanity_checks_pre_save, .post_load = virtio_mem_mig_sanity_checks_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(addr, VirtIOMEMMigSanityChecks), VMSTATE_UINT64(region_size, VirtIOMEMMigSanityChecks), VMSTATE_UINT64(block_size, VirtIOMEMMigSanityChecks), @@ -1393,7 +1391,7 @@ static const VMStateDescription vmstate_virtio_mem_device = { .version_id = 1, .priority = MIG_PRI_VIRTIO_MEM, .post_load = virtio_mem_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP_TEST(VirtIOMEM, virtio_mem_vmstate_field_exists, VirtIOMEMMigSanityChecks, vmstate_virtio_mem_sanity_checks), @@ -1423,7 +1421,7 @@ static const VMStateDescription vmstate_virtio_mem_device_early = { .version_id = 1, .early_setup = true, .post_load = virtio_mem_post_load_early, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP(VirtIOMEM, VirtIOMEMMigSanityChecks, vmstate_virtio_mem_sanity_checks), VMSTATE_UINT64(size, VirtIOMEM), @@ -1436,7 +1434,7 @@ static const VMStateDescription vmstate_virtio_mem = { .name = "virtio-mem", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 22f15e1e02f..22f9fbcf5a4 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -565,7 +565,7 @@ static const VMStateDescription vmstate_virtio_mmio_queue_state = { .name = "virtio_mmio/queue_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(num, VirtIOMMIOQueue), VMSTATE_BOOL(enabled, VirtIOMMIOQueue), VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2), @@ -579,7 +579,7 @@ static const VMStateDescription vmstate_virtio_mmio_state_sub = { .name = "virtio_mmio/state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2), VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0, vmstate_virtio_mmio_queue_state, @@ -592,10 +592,10 @@ static const VMStateDescription vmstate_virtio_mmio = { .name = "virtio_mmio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_mmio_state_sub, NULL } diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index e4338795423..cb159fd0785 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -97,7 +97,7 @@ static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { .name = "virtio_pci/modern_queue_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(num, VirtIOPCIQueue), VMSTATE_UNUSED(1), /* enabled was stored as be16 */ VMSTATE_BOOL(enabled, VirtIOPCIQueue), @@ -120,7 +120,7 @@ static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_pci_modern_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(dfselect, VirtIOPCIProxy), VMSTATE_UINT32(gfselect, VirtIOPCIProxy), VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), @@ -135,10 +135,10 @@ static const VMStateDescription vmstate_virtio_pci = { .name = "virtio_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_pci_modern_state_sub, NULL } @@ -1424,6 +1424,38 @@ static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, return offset; } +static void virtio_pci_set_vector(VirtIODevice *vdev, + VirtIOPCIProxy *proxy, + int queue_no, uint16_t old_vector, + uint16_t new_vector) +{ + bool kvm_irqfd = (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) && + msix_enabled(&proxy->pci_dev) && kvm_msi_via_irqfd_enabled(); + + if (new_vector == old_vector) { + return; + } + + /* + * If the device uses irqfd and the vector changes after DRIVER_OK is + * set, we need to release the old vector and set up the new one. + * Otherwise just need to set the new vector on the device. + */ + if (kvm_irqfd && old_vector != VIRTIO_NO_VECTOR) { + kvm_virtio_pci_vector_release_one(proxy, queue_no); + } + /* Set the new vector on the device. */ + if (queue_no == VIRTIO_CONFIG_IRQ_IDX) { + vdev->config_vector = new_vector; + } else { + virtio_queue_set_vector(vdev, queue_no, new_vector); + } + /* If the new vector changed need to set it up. */ + if (kvm_irqfd && new_vector != VIRTIO_NO_VECTOR) { + kvm_virtio_pci_vector_use_one(proxy, queue_no); + } +} + int virtio_pci_add_shm_cap(VirtIOPCIProxy *proxy, uint8_t bar, uint64_t offset, uint64_t length, uint8_t id) @@ -1570,7 +1602,8 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr, } else { val = VIRTIO_NO_VECTOR; } - vdev->config_vector = val; + virtio_pci_set_vector(vdev, proxy, VIRTIO_CONFIG_IRQ_IDX, + vdev->config_vector, val); break; case VIRTIO_PCI_COMMON_STATUS: if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { @@ -1610,7 +1643,7 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr, } else { val = VIRTIO_NO_VECTOR; } - virtio_queue_set_vector(vdev, vdev->queue_sel, val); + virtio_pci_set_vector(vdev, proxy, vdev->queue_sel, vector, val); break; case VIRTIO_PCI_COMMON_Q_ENABLE: if (val == 1) { @@ -1929,7 +1962,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; uint8_t *config; uint32_t size; - VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtIODevice *vdev = virtio_bus_get_device(bus); /* * Virtio capabilities present without diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c index 7e12fc03bfc..f74efffef7e 100644 --- a/hw/virtio/virtio-rng.c +++ b/hw/virtio/virtio-rng.c @@ -242,7 +242,7 @@ static const VMStateDescription vmstate_virtio_rng = { .name = "virtio-rng", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 356d690cc97..871674f9bef 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -957,12 +957,20 @@ static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count) return; } + /* + * For indirect element's 'ndescs' is 1. + * For all other elemment's 'ndescs' is the + * number of descriptors chained by NEXT (as set in virtqueue_packed_pop). + * So When the 'elem' be filled into the descriptor ring, + * The 'idx' of this 'elem' shall be + * the value of 'vq->used_idx' plus the 'ndescs'. + */ + ndescs += vq->used_elems[0].ndescs; for (i = 1; i < count; i++) { - virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false); + virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false); ndescs += vq->used_elems[i].ndescs; } virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true); - ndescs += vq->used_elems[0].ndescs; vq->inuse -= ndescs; vq->used_idx += ndescs; @@ -2594,7 +2602,7 @@ static const VMStateDescription vmstate_virtqueue = { .name = "virtqueue_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(vring.avail, struct VirtQueue), VMSTATE_UINT64(vring.used, struct VirtQueue), VMSTATE_END_OF_LIST() @@ -2605,7 +2613,7 @@ static const VMStateDescription vmstate_packed_virtqueue = { .name = "packed_virtqueue_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(last_avail_idx, struct VirtQueue), VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue), VMSTATE_UINT16(used_idx, struct VirtQueue), @@ -2620,7 +2628,7 @@ static const VMStateDescription vmstate_virtio_virtqueues = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_virtqueue_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue), VMSTATE_END_OF_LIST() @@ -2632,7 +2640,7 @@ static const VMStateDescription vmstate_virtio_packed_virtqueues = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_packed_virtqueue_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue), VMSTATE_END_OF_LIST() @@ -2643,7 +2651,7 @@ static const VMStateDescription vmstate_ringsize = { .name = "ringsize_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vring.num_default, struct VirtQueue), VMSTATE_END_OF_LIST() } @@ -2654,7 +2662,7 @@ static const VMStateDescription vmstate_virtio_ringsize = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_ringsize_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue), VMSTATE_END_OF_LIST() @@ -2697,7 +2705,7 @@ static const VMStateDescription vmstate_virtio_extra_state = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_extra_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { { .name = "extra_state", .version_id = 0, @@ -2716,7 +2724,7 @@ static const VMStateDescription vmstate_virtio_device_endian = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_device_endian_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(device_endian, VirtIODevice), VMSTATE_END_OF_LIST() } @@ -2727,7 +2735,7 @@ static const VMStateDescription vmstate_virtio_64bit_features = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_64bit_features_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(guest_features, VirtIODevice), VMSTATE_END_OF_LIST() } @@ -2738,7 +2746,7 @@ static const VMStateDescription vmstate_virtio_broken = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_broken_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(broken, VirtIODevice), VMSTATE_END_OF_LIST() } @@ -2749,7 +2757,7 @@ static const VMStateDescription vmstate_virtio_started = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_started_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(started, VirtIODevice), VMSTATE_END_OF_LIST() } @@ -2760,7 +2768,7 @@ static const VMStateDescription vmstate_virtio_disabled = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_disabled_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(disabled, VirtIODevice), VMSTATE_END_OF_LIST() } @@ -2770,10 +2778,10 @@ static const VMStateDescription vmstate_virtio = { .name = "virtio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_device_endian, &vmstate_virtio_64bit_features, &vmstate_virtio_virtqueues, @@ -4137,3 +4145,13 @@ static void virtio_register_types(void) } type_init(virtio_register_types) + +QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev, + QEMUBHFunc *cb, void *opaque, + const char *name) +{ + DeviceState *transport = qdev_get_parent_bus(dev)->parent; + + return qemu_bh_new_full(cb, opaque, name, + &transport->mem_reentrancy_guard); +} diff --git a/hw/watchdog/allwinner-wdt.c b/hw/watchdog/allwinner-wdt.c index 6205765efec..d35711c7c5b 100644 --- a/hw/watchdog/allwinner-wdt.c +++ b/hw/watchdog/allwinner-wdt.c @@ -313,7 +313,7 @@ static const VMStateDescription allwinner_wdt_vmstate = { .name = "allwinner-wdt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, AwWdtState), VMSTATE_UINT32_ARRAY(regs, AwWdtState, AW_WDT_REGS_NUM), VMSTATE_END_OF_LIST() diff --git a/hw/watchdog/cmsdk-apb-watchdog.c b/hw/watchdog/cmsdk-apb-watchdog.c index 5a2cd46eb76..3091e5c3d54 100644 --- a/hw/watchdog/cmsdk-apb-watchdog.c +++ b/hw/watchdog/cmsdk-apb-watchdog.c @@ -361,7 +361,7 @@ static const VMStateDescription cmsdk_apb_watchdog_vmstate = { .name = "cmsdk-apb-watchdog", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(wdogclk, CMSDKAPBWatchdog), VMSTATE_PTIMER(timer, CMSDKAPBWatchdog), VMSTATE_UINT32(control, CMSDKAPBWatchdog), diff --git a/hw/watchdog/sbsa_gwdt.c b/hw/watchdog/sbsa_gwdt.c index 7aa57a8c514..96895d76369 100644 --- a/hw/watchdog/sbsa_gwdt.c +++ b/hw/watchdog/sbsa_gwdt.c @@ -28,7 +28,7 @@ static const VMStateDescription vmstate_sbsa_gwdt = { .name = "sbsa-gwdt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, SBSA_GWDTState), VMSTATE_UINT32(wcs, SBSA_GWDTState), VMSTATE_UINT32(worl, SBSA_GWDTState), diff --git a/hw/watchdog/spapr_watchdog.c b/hw/watchdog/spapr_watchdog.c index 55ff1f03c1d..2bb1d3c5325 100644 --- a/hw/watchdog/spapr_watchdog.c +++ b/hw/watchdog/spapr_watchdog.c @@ -226,7 +226,7 @@ static const VMStateDescription vmstate_wdt = { .version_id = 1, .minimum_version_id = 1, .needed = watchdog_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER(timer, SpaprWatchdog), VMSTATE_UINT8(action, SpaprWatchdog), VMSTATE_UINT8(leave_others, SpaprWatchdog), diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c index 273a49d3601..d70b656f8e7 100644 --- a/hw/watchdog/wdt_aspeed.c +++ b/hw/watchdog/wdt_aspeed.c @@ -218,7 +218,7 @@ static const VMStateDescription vmstate_aspeed_wdt = { .name = "vmstate_aspeed_wdt", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, AspeedWDTState), VMSTATE_UINT32_ARRAY(regs, AspeedWDTState, ASPEED_WDT_REGS_MAX), VMSTATE_END_OF_LIST() diff --git a/hw/watchdog/wdt_diag288.c b/hw/watchdog/wdt_diag288.c index 76d89fbf785..1b73b16fb35 100644 --- a/hw/watchdog/wdt_diag288.c +++ b/hw/watchdog/wdt_diag288.c @@ -23,7 +23,7 @@ static const VMStateDescription vmstate_diag288 = { .name = "vmstate_diag288", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, DIAG288State), VMSTATE_BOOL(enabled, DIAG288State), VMSTATE_END_OF_LIST() diff --git a/hw/watchdog/wdt_i6300esb.c b/hw/watchdog/wdt_i6300esb.c index 54c167cd358..8bce0509cd5 100644 --- a/hw/watchdog/wdt_i6300esb.c +++ b/hw/watchdog/wdt_i6300esb.c @@ -418,7 +418,7 @@ static const VMStateDescription vmstate_i6300esb = { */ .version_id = 10000, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, I6300State), VMSTATE_INT32(reboot_enabled, I6300State), VMSTATE_INT32(clock_scale, I6300State), diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c index a1750a4957f..eea8da60596 100644 --- a/hw/watchdog/wdt_ib700.c +++ b/hw/watchdog/wdt_ib700.c @@ -95,7 +95,7 @@ static const VMStateDescription vmstate_ib700 = { .name = "ib700_wdt", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, IB700State), VMSTATE_END_OF_LIST() } diff --git a/hw/watchdog/wdt_imx2.c b/hw/watchdog/wdt_imx2.c index 891d7beb2a1..6452fc4721d 100644 --- a/hw/watchdog/wdt_imx2.c +++ b/hw/watchdog/wdt_imx2.c @@ -234,7 +234,7 @@ static const MemoryRegionOps imx2_wdt_ops = { static const VMStateDescription vmstate_imx2_wdt = { .name = "imx2.wdt", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, IMX2WdtState), VMSTATE_PTIMER(itimer, IMX2WdtState), VMSTATE_BOOL(wicr_locked, IMX2WdtState), diff --git a/hw/xen/trace-events b/hw/xen/trace-events index 67a6c419260..d1b27f6c11b 100644 --- a/hw/xen/trace-events +++ b/hw/xen/trace-events @@ -42,7 +42,7 @@ xs_node_vscanf(char *path, char *value) "%s %s" xs_node_watch(char *path) "%s" xs_node_unwatch(char *path) "%s" -# xen-hvm.c +# xen-hvm-common.c xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: 0x%lx, size 0x%lx" xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "0x%"PRIx64" size 0x%lx, log_dirty %i" handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" @@ -55,8 +55,27 @@ cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint6 xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p" cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x" cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x" +cpu_get_ioreq_from_shared_memory_req_not_ready(int state, int data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O request not ready: 0x%x, ptr: 0x%x, port: 0x%"PRIx64", data: 0x%"PRIx64", count: %u, size: %u" +xen_main_loop_prepare_init_cpu(int id, void *cpu) "cpu_by_vcpu_id[%d]=%p" +xen_map_ioreq_server_shared_page(long unsigned int ioreq_pfn) "shared page at pfn 0x%lx" +xen_map_ioreq_server_buffered_io_page(long unsigned int ioreq_pfn) "buffered io page at pfn 0x%lx" +xen_map_ioreq_server_buffered_io_evtchn(int bufioreq_evtchn) "buffered io evtchn is 0x%x" +destroy_hvm_domain_cannot_acquire_handle(void) "Cannot acquire xenctrl handle" +destroy_hvm_domain_failed_action(const char *action, int sts, char *errno_s) "xc_domain_shutdown failed to issue %s, sts %d, %s" +destroy_hvm_domain_action(int xen_domid, const char *action) "Issued domain %d %s" # xen-mapcache.c xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64 xen_remap_bucket(uint64_t index) "index 0x%"PRIx64 xen_map_cache_return(void* ptr) "%p" +xen_map_cache_init(uint64_t nr_buckets, uint64_t size) "nr_buckets = 0x%"PRIx64" size 0x%"PRIx64 +xen_replace_cache_entry_dummy(uint64_t old_phys_addr, uint64_t new_phys_addr) "Replacing a dummy mapcache entry for 0x%"PRIx64" with 0x%"PRIx64 +xen_invalidate_map_cache_entry_unlocked_not_found(void *p) "could not find %p" +xen_invalidate_map_cache_entry_unlocked_found(uint64_t addr, void *p) " 0x%"PRIx64" -> %p is present" +xen_invalidate_map_cache_entry_unlocked_miss(void *buffer) "Trying to unmap address %p that is not in the mapcache" +xen_replace_cache_entry_unlocked_could_not_update_entry(uint64_t old_phys_addr) "Unable to update a mapcache entry for 0x%"PRIx64 +xen_ram_addr_from_mapcache_not_found(void *p) "could not find %p" +xen_ram_addr_from_mapcache_found(uint64_t addr, void *p) " 0x%"PRIx64" -> %p is present" +xen_ram_addr_from_mapcache_not_in_cache(void *p) "Trying to find address %p that is not in the mapcache" +xen_replace_cache_entry_unlocked(uint64_t old_phys_addr) "Trying to update an entry for 0x%"PRIx64" that is not in the mapcache" +xen_invalidate_map_cache(uint64_t paddr_index, void *vaddr_req) "Locked DMA mapping while invalidating mapcache 0x%"PRIx64" -> %p is present" diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c index 4973e7d9c95..fb82cc33e48 100644 --- a/hw/xen/xen-bus.c +++ b/hw/xen/xen-bus.c @@ -19,6 +19,7 @@ #include "qapi/error.h" #include "qapi/qmp/qdict.h" #include "sysemu/sysemu.h" +#include "net/net.h" #include "trace.h" static char *xen_device_get_backend_path(XenDevice *xendev) @@ -1133,7 +1134,7 @@ static void xen_register_types(void) type_init(xen_register_types) -BusState *xen_bus_init(void) +void xen_bus_init(void) { DeviceState *dev = qdev_new(TYPE_XEN_BRIDGE); BusState *bus = qbus_new(TYPE_XEN_BUS, dev, NULL); @@ -1141,5 +1142,6 @@ BusState *xen_bus_init(void) sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); qbus_set_bus_hotplug_handler(bus); - return bus; + qemu_create_nic_bus_devices(bus, TYPE_XEN_DEVICE, "xen-net-device", + "xen", "xen-net-device"); } diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c index 565dc39c8f6..1627da73982 100644 --- a/hw/xen/xen-hvm-common.c +++ b/hw/xen/xen-hvm-common.c @@ -1,6 +1,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" +#include "exec/target_page.h" #include "trace.h" #include "hw/pci/pci_host.h" @@ -9,34 +10,35 @@ #include "hw/boards.h" #include "hw/xen/arch_hvm.h" -MemoryRegion ram_memory; +MemoryRegion xen_memory; void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, Error **errp) { + unsigned target_page_bits = qemu_target_page_bits(); unsigned long nr_pfn; xen_pfn_t *pfn_list; int i; if (runstate_check(RUN_STATE_INMIGRATE)) { /* RAM already populated in Xen */ - fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT - " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", + warn_report("%s: do not alloc "RAM_ADDR_FMT + " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE", __func__, size, ram_addr); return; } - if (mr == &ram_memory) { + if (mr == &xen_memory) { return; } trace_xen_ram_alloc(ram_addr, size); - nr_pfn = size >> TARGET_PAGE_BITS; + nr_pfn = size >> target_page_bits; pfn_list = g_new(xen_pfn_t, nr_pfn); for (i = 0; i < nr_pfn; i++) { - pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; + pfn_list[i] = (ram_addr >> target_page_bits) + i; } if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { @@ -53,7 +55,7 @@ static void xen_set_memory(struct MemoryListener *listener, { XenIOState *state = container_of(listener, XenIOState, memory_listener); - if (section->mr == &ram_memory) { + if (section->mr == &xen_memory) { return; } else { if (add) { @@ -169,11 +171,12 @@ static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); if (req->state != STATE_IOREQ_READY) { - DPRINTF("I/O request not ready: " - "%x, ptr: %x, port: %"PRIx64", " - "data: %"PRIx64", count: %u, size: %u\n", - req->state, req->data_is_ptr, req->addr, - req->data, req->count, req->size); + trace_cpu_get_ioreq_from_shared_memory_req_not_ready(req->state, + req->data_is_ptr, + req->addr, + req->data, + req->count, + req->size); return NULL; } @@ -551,9 +554,9 @@ static void cpu_handle_ioreq(void *opaque) req->data = copy.data; if (req->state != STATE_IOREQ_INPROCESS) { - fprintf(stderr, "Badness in I/O request ... not in service?!: " + warn_report("Badness in I/O request ... not in service?!: " "%x, ptr: %x, port: %"PRIx64", " - "data: %"PRIx64", count: %u, size: %u, type: %u\n", + "data: %"PRIx64", count: %u, size: %u, type: %u", req->state, req->data_is_ptr, req->addr, req->data, req->count, req->size, req->type); destroy_hvm_domain(false); @@ -601,10 +604,9 @@ static void xen_main_loop_prepare(XenIOState *state) if (evtchn_fd != -1) { CPUState *cpu_state; - DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); CPU_FOREACH(cpu_state) { - DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", - __func__, cpu_state->cpu_index, cpu_state); + trace_xen_main_loop_prepare_init_cpu(cpu_state->cpu_index, + cpu_state); state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; } qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); @@ -623,7 +625,7 @@ void xen_hvm_change_state_handler(void *opaque, bool running, xen_set_ioreq_server_state(xen_domid, state->ioservid, - (rstate == RUN_STATE_RUNNING)); + running); } void xen_exit_notifier(Notifier *n, void *data) @@ -681,7 +683,7 @@ static int xen_map_ioreq_server(XenIOState *state) } if (state->shared_page == NULL) { - DPRINTF("shared page at pfn %lx\n", ioreq_pfn); + trace_xen_map_ioreq_server_shared_page(ioreq_pfn); state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ | PROT_WRITE, @@ -693,7 +695,7 @@ static int xen_map_ioreq_server(XenIOState *state) } if (state->buffered_io_page == NULL) { - DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); + trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn); state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ | PROT_WRITE, @@ -709,7 +711,7 @@ static int xen_map_ioreq_server(XenIOState *state) return -1; } - DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); + trace_xen_map_ioreq_server_buffered_io_evtchn(bufioreq_evtchn); state->bufioreq_remote_port = bufioreq_evtchn; @@ -737,16 +739,17 @@ void destroy_hvm_domain(bool reboot) xc_handle = xc_interface_open(0, 0, 0); if (xc_handle == NULL) { - fprintf(stderr, "Cannot acquire xenctrl handle\n"); + trace_destroy_hvm_domain_cannot_acquire_handle(); } else { sts = xc_domain_shutdown(xc_handle, xen_domid, reason); if (sts != 0) { - fprintf(stderr, "xc_domain_shutdown failed to issue %s, " - "sts %d, %s\n", reboot ? "reboot" : "poweroff", - sts, strerror(errno)); + trace_destroy_hvm_domain_failed_action( + reboot ? "reboot" : "poweroff", sts, strerror(errno) + ); } else { - fprintf(stderr, "Issued domain %d %s\n", xen_domid, - reboot ? "reboot" : "poweroff"); + trace_destroy_hvm_domain_action( + xen_domid, reboot ? "reboot" : "poweroff" + ); } xc_interface_close(xc_handle); } @@ -757,9 +760,9 @@ void xen_shutdown_fatal_error(const char *fmt, ...) va_list ap; va_start(ap, fmt); - vfprintf(stderr, fmt, ap); + error_vreport(fmt, ap); va_end(ap); - fprintf(stderr, "Will destroy the domain.\n"); + error_report("Will destroy the domain."); /* destroy the domain */ qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR); } diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c index f7d974677d1..7f59080ba77 100644 --- a/hw/xen/xen-mapcache.c +++ b/hw/xen/xen-mapcache.c @@ -22,16 +22,6 @@ #include "trace.h" -//#define MAPCACHE_DEBUG - -#ifdef MAPCACHE_DEBUG -# define DPRINTF(fmt, ...) do { \ - fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \ -} while (0) -#else -# define DPRINTF(fmt, ...) do { } while (0) -#endif - #if HOST_LONG_BITS == 32 # define MCACHE_BUCKET_SHIFT 16 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */ @@ -145,8 +135,7 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) size = mapcache->nr_buckets * sizeof (MapCacheEntry); size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); - DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__, - mapcache->nr_buckets, size); + trace_xen_map_cache_init(mapcache->nr_buckets, size); mapcache->entry = g_malloc0(size); } @@ -286,7 +275,9 @@ static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size, test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, mapcache->last_entry->valid_mapping)) { - trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); + trace_xen_map_cache_return( + mapcache->last_entry->vaddr_base + address_offset + ); return mapcache->last_entry->vaddr_base + address_offset; } @@ -356,9 +347,8 @@ static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size, MapCacheRev *reventry = g_new0(MapCacheRev, 1); entry->lock++; if (entry->lock == 0) { - fprintf(stderr, - "mapcache entry lock overflow: "HWADDR_FMT_plx" -> %p\n", - entry->paddr_index, entry->vaddr_base); + error_report("mapcache entry lock overflow: "HWADDR_FMT_plx" -> %p", + entry->paddr_index, entry->vaddr_base); abort(); } reventry->dma = dma; @@ -368,7 +358,9 @@ static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size, QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); } - trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); + trace_xen_map_cache_return( + mapcache->last_entry->vaddr_base + address_offset + ); return mapcache->last_entry->vaddr_base + address_offset; } @@ -402,10 +394,10 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr) } } if (!found) { - fprintf(stderr, "%s, could not find %p\n", __func__, ptr); + trace_xen_ram_addr_from_mapcache_not_found(ptr); QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { - DPRINTF(" "HWADDR_FMT_plx" -> %p is present\n", reventry->paddr_index, - reventry->vaddr_req); + trace_xen_ram_addr_from_mapcache_found(reventry->paddr_index, + reventry->vaddr_req); } abort(); return 0; @@ -416,7 +408,7 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr) entry = entry->next; } if (!entry) { - DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr); + trace_xen_ram_addr_from_mapcache_not_in_cache(ptr); raddr = 0; } else { raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) + @@ -443,9 +435,12 @@ static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) } } if (!found) { - DPRINTF("%s, could not find %p\n", __func__, buffer); + trace_xen_invalidate_map_cache_entry_unlocked_not_found(buffer); QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { - DPRINTF(" "HWADDR_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req); + trace_xen_invalidate_map_cache_entry_unlocked_found( + reventry->paddr_index, + reventry->vaddr_req + ); } return; } @@ -463,7 +458,7 @@ static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) entry = entry->next; } if (!entry) { - DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer); + trace_xen_invalidate_map_cache_entry_unlocked_miss(buffer); return; } entry->lock--; @@ -481,11 +476,37 @@ static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) g_free(entry); } -void xen_invalidate_map_cache_entry(uint8_t *buffer) +typedef struct XenMapCacheData { + Coroutine *co; + uint8_t *buffer; +} XenMapCacheData; + +static void xen_invalidate_map_cache_entry_bh(void *opaque) { + XenMapCacheData *data = opaque; + mapcache_lock(); - xen_invalidate_map_cache_entry_unlocked(buffer); + xen_invalidate_map_cache_entry_unlocked(data->buffer); mapcache_unlock(); + + aio_co_wake(data->co); +} + +void coroutine_mixed_fn xen_invalidate_map_cache_entry(uint8_t *buffer) +{ + if (qemu_in_coroutine()) { + XenMapCacheData data = { + .co = qemu_coroutine_self(), + .buffer = buffer, + }; + aio_bh_schedule_oneshot(qemu_get_current_aio_context(), + xen_invalidate_map_cache_entry_bh, &data); + qemu_coroutine_yield(); + } else { + mapcache_lock(); + xen_invalidate_map_cache_entry_unlocked(buffer); + mapcache_unlock(); + } } void xen_invalidate_map_cache(void) @@ -502,9 +523,8 @@ void xen_invalidate_map_cache(void) if (!reventry->dma) { continue; } - fprintf(stderr, "Locked DMA mapping while invalidating mapcache!" - " "HWADDR_FMT_plx" -> %p is present\n", - reventry->paddr_index, reventry->vaddr_req); + trace_xen_invalidate_map_cache(reventry->paddr_index, + reventry->vaddr_req); } for (i = 0; i < mapcache->nr_buckets; i++) { @@ -562,24 +582,23 @@ static uint8_t *xen_replace_cache_entry_unlocked(hwaddr old_phys_addr, entry = entry->next; } if (!entry) { - DPRINTF("Trying to update an entry for "HWADDR_FMT_plx \ - "that is not in the mapcache!\n", old_phys_addr); + trace_xen_replace_cache_entry_unlocked(old_phys_addr); return NULL; } address_index = new_phys_addr >> MCACHE_BUCKET_SHIFT; address_offset = new_phys_addr & (MCACHE_BUCKET_SIZE - 1); - fprintf(stderr, "Replacing a dummy mapcache entry for "HWADDR_FMT_plx \ - " with "HWADDR_FMT_plx"\n", old_phys_addr, new_phys_addr); + trace_xen_replace_cache_entry_dummy(old_phys_addr, new_phys_addr); xen_remap_bucket(entry, entry->vaddr_base, cache_size, address_index, false); if (!test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { - DPRINTF("Unable to update a mapcache entry for "HWADDR_FMT_plx"!\n", - old_phys_addr); + trace_xen_replace_cache_entry_unlocked_could_not_update_entry( + old_phys_addr + ); return NULL; } diff --git a/hw/xen/xen_devconfig.c b/hw/xen/xen_devconfig.c index 3f77c675c64..2150869f60c 100644 --- a/hw/xen/xen_devconfig.c +++ b/hw/xen/xen_devconfig.c @@ -46,31 +46,6 @@ static int xen_config_dev_all(char *fe, char *be) /* ------------------------------------------------------------- */ -int xen_config_dev_nic(NICInfo *nic) -{ - char fe[256], be[256]; - char mac[20]; - int vlan_id = -1; - - net_hub_id_for_client(nic->netdev, &vlan_id); - snprintf(mac, sizeof(mac), "%02x:%02x:%02x:%02x:%02x:%02x", - nic->macaddr.a[0], nic->macaddr.a[1], nic->macaddr.a[2], - nic->macaddr.a[3], nic->macaddr.a[4], nic->macaddr.a[5]); - xen_pv_printf(NULL, 1, "config nic %d: mac=\"%s\"\n", vlan_id, mac); - xen_config_dev_dirs("vif", "qnic", vlan_id, fe, be, sizeof(fe)); - - /* frontend */ - xenstore_write_int(fe, "handle", vlan_id); - xenstore_write_str(fe, "mac", mac); - - /* backend */ - xenstore_write_int(be, "handle", vlan_id); - xenstore_write_str(be, "mac", mac); - - /* common stuff */ - return xen_config_dev_all(fe, be); -} - int xen_config_dev_vfb(int vdev, const char *type) { char fe[256], be[256]; diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index 36e6f93c372..3635d1b39f7 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -59,7 +59,8 @@ #include "hw/pci/pci.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" -#include "xen_pt.h" +#include "hw/xen/xen_pt.h" +#include "hw/xen/xen_igd.h" #include "hw/xen/xen.h" #include "hw/xen/xen-legacy-backend.h" #include "qemu/range.h" @@ -710,7 +711,7 @@ static void xen_pt_destroy(PCIDevice *d) { uint8_t intx; int rc; - if (machine_irq && !xen_host_pci_device_closed(&s->real_device)) { + if (machine_irq && !xen_host_pci_device_closed(host_dev)) { intx = xen_pt_pci_intx(s); rc = xc_domain_unbind_pt_irq(xen_xc, xen_domid, machine_irq, PT_IRQ_TYPE_PCI, @@ -759,8 +760,8 @@ static void xen_pt_destroy(PCIDevice *d) { memory_listener_unregister(&s->io_listener); s->listener_set = false; } - if (!xen_host_pci_device_closed(&s->real_device)) { - xen_host_pci_device_put(&s->real_device); + if (!xen_host_pci_device_closed(host_dev)) { + xen_host_pci_device_put(host_dev); } } /* init */ diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h index 31bcfdf7056..095a0f0365d 100644 --- a/hw/xen/xen_pt.h +++ b/hw/xen/xen_pt.h @@ -1,3 +1,13 @@ +/* + * Copyright (c) 2007, Neocleus Corporation. + * Copyright (c) 2007, Intel Corporation. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * Alex Novik + * Allen Kay + * Guy Zana + */ #ifndef XEN_PT_H #define XEN_PT_H @@ -5,9 +15,6 @@ #include "xen-host-pci-device.h" #include "qom/object.h" -bool xen_igd_gfx_pt_enabled(void); -void xen_igd_gfx_pt_set(bool value, Error **errp); - void xen_pt_log(const PCIDevice *d, const char *f, ...) G_GNUC_PRINTF(2, 3); #define XEN_PT_ERR(d, _f, _a...) xen_pt_log(d, "%s: Error: "_f, __func__, ##_a) @@ -52,12 +59,6 @@ typedef struct XenPTDeviceClass { XenPTQdevRealize pci_qdev_realize; } XenPTDeviceClass; -uint32_t igd_read_opregion(XenPCIPassthroughState *s); -void xen_igd_reserve_slot(PCIBus *pci_bus); -void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val); -void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s, - XenHostPCIDevice *dev); - /* function type for config reg */ typedef int (*xen_pt_conf_reg_init) (XenPCIPassthroughState *, XenPTRegInfo *, uint32_t real_offset, @@ -343,11 +344,6 @@ static inline bool xen_pt_has_msix_mapping(XenPCIPassthroughState *s, int bar) void *pci_assign_dev_load_option_rom(PCIDevice *dev, int *size, unsigned int domain, unsigned int bus, unsigned int slot, unsigned int function); -static inline bool is_igd_vga_passthrough(XenHostPCIDevice *dev) -{ - return (xen_igd_gfx_pt_enabled() - && ((dev->class_code >> 0x8) == PCI_CLASS_DISPLAY_VGA)); -} int xen_pt_register_vga_regions(XenHostPCIDevice *dev); int xen_pt_unregister_vga_regions(XenHostPCIDevice *dev); void xen_pt_setup_vga(XenPCIPassthroughState *s, XenHostPCIDevice *dev, diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c index 2b8680b112f..3edaeab1e34 100644 --- a/hw/xen/xen_pt_config_init.c +++ b/hw/xen/xen_pt_config_init.c @@ -15,7 +15,8 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/timer.h" -#include "xen_pt.h" +#include "hw/xen/xen_pt.h" +#include "hw/xen/xen_igd.h" #include "hw/xen/xen-legacy-backend.h" #define XEN_PT_MERGE_VALUE(value, data, val_mask) \ @@ -291,7 +292,10 @@ static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, uint32_t *data) { /* read PCI_HEADER_TYPE */ - *data = reg->init_val | 0x80; + *data = reg->init_val; + if ((PCI_DEVICE(s)->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { + *data |= PCI_HEADER_TYPE_MULTI_FUNCTION; + } return 0; } @@ -676,7 +680,7 @@ static XenPTRegInfo xen_pt_emu_reg_header0[] = { .size = 1, .init_val = 0x00, .ro_mask = 0xFF, - .emu_mask = 0x00, + .emu_mask = PCI_HEADER_TYPE_MULTI_FUNCTION, .init = xen_pt_header_type_reg_init, .u.b.read = xen_pt_byte_reg_read, .u.b.write = xen_pt_byte_reg_write, diff --git a/hw/xen/xen_pt_graphics.c b/hw/xen/xen_pt_graphics.c index 0aed3bb6fd6..6c2e3f4840f 100644 --- a/hw/xen/xen_pt_graphics.c +++ b/hw/xen/xen_pt_graphics.c @@ -3,7 +3,8 @@ */ #include "qemu/osdep.h" #include "qapi/error.h" -#include "xen_pt.h" +#include "hw/xen/xen_pt.h" +#include "hw/xen/xen_igd.h" #include "xen-host-pci-device.h" static unsigned long igd_guest_opregion; diff --git a/hw/xen/xen_pt_stub.c b/hw/xen/xen_pt_stub.c index 5c108446a86..72feebeb207 100644 --- a/hw/xen/xen_pt_stub.c +++ b/hw/xen/xen_pt_stub.c @@ -6,7 +6,7 @@ */ #include "qemu/osdep.h" -#include "hw/xen/xen_pt.h" +#include "hw/xen/xen_igd.h" #include "qapi/error.h" bool xen_igd_gfx_pt_enabled(void) diff --git a/hw/xenpv/xen_machine_pv.c b/hw/xenpv/xen_machine_pv.c index 9f9f137f99d..1130d1a1479 100644 --- a/hw/xenpv/xen_machine_pv.c +++ b/hw/xenpv/xen_machine_pv.c @@ -32,8 +32,6 @@ static void xen_init_pv(MachineState *machine) { - int i; - setup_xen_backend_ops(); /* Initialize backend core & drivers */ @@ -62,13 +60,6 @@ static void xen_init_pv(MachineState *machine) vga_interface_created = true; } - /* configure nics */ - for (i = 0; i < nb_nics; i++) { - if (!nd_table[i].model || 0 != strcmp(nd_table[i].model, "xen")) - continue; - xen_config_dev_nic(nd_table + i); - } - xen_bus_init(); /* config cleanup hook */ diff --git a/hw/xtensa/virt.c b/hw/xtensa/virt.c index a6cf646e997..5310a888613 100644 --- a/hw/xtensa/virt.c +++ b/hw/xtensa/virt.c @@ -102,9 +102,7 @@ static void create_pcie(MachineState *ms, CPUXtensaState *env, int irq_base, pci = PCI_HOST_BRIDGE(dev); if (pci->bus) { - for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci->bus, mc->default_nic, NULL); - } + pci_init_nic_devices(pci->bus, mc->default_nic); } } diff --git a/hw/xtensa/xtfpga.c b/hw/xtensa/xtfpga.c index fbad1c83a3f..f49e6591dc2 100644 --- a/hw/xtensa/xtfpga.c +++ b/hw/xtensa/xtfpga.c @@ -141,14 +141,16 @@ static void xtfpga_net_init(MemoryRegion *address_space, hwaddr base, hwaddr descriptors, hwaddr buffers, - qemu_irq irq, NICInfo *nd) + qemu_irq irq) { DeviceState *dev; SysBusDevice *s; MemoryRegion *ram; - dev = qdev_new("open_eth"); - qdev_set_nic_properties(dev, nd); + dev = qemu_create_nic_device("open_eth", true, NULL); + if (!dev) { + return; + } s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); @@ -301,10 +303,7 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine) memory_region_add_subregion(system_memory, board->io[1], io); } xtfpga_fpga_init(system_io, 0x0d020000, freq); - if (nd_table[0].used) { - xtfpga_net_init(system_io, 0x0d030000, 0x0d030400, 0x0d800000, - extints[1], nd_table); - } + xtfpga_net_init(system_io, 0x0d030000, 0x0d030400, 0x0d800000, extints[1]); serial_mm_init(system_io, 0x0d050020, 2, extints[0], 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h index 5449b6d7428..cf5e8bde1ca 100644 --- a/include/block/aio-wait.h +++ b/include/block/aio-wait.h @@ -63,9 +63,6 @@ extern AioWait global_aio_wait; * @ctx: the aio context, or NULL if multiple aio contexts (for which the * caller does not hold a lock) are involved in the polling condition. * @cond: wait while this conditional expression is true - * @unlock: whether to unlock and then lock again @ctx. This applies - * only when waiting for another AioContext from the main loop. - * Otherwise it's ignored. * * Wait while a condition is true. Use this to implement synchronous * operations that require event loop activity. @@ -78,7 +75,7 @@ extern AioWait global_aio_wait; * wait on conditions between two IOThreads since that could lead to deadlock, * go via the main loop instead. */ -#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \ +#define AIO_WAIT_WHILE_INTERNAL(ctx, cond) ({ \ bool waited_ = false; \ AioWait *wait_ = &global_aio_wait; \ AioContext *ctx_ = (ctx); \ @@ -95,13 +92,7 @@ extern AioWait global_aio_wait; assert(qemu_get_current_aio_context() == \ qemu_get_aio_context()); \ while ((cond)) { \ - if (unlock && ctx_) { \ - aio_context_release(ctx_); \ - } \ aio_poll(qemu_get_aio_context(), true); \ - if (unlock && ctx_) { \ - aio_context_acquire(ctx_); \ - } \ waited_ = true; \ } \ } \ @@ -109,10 +100,11 @@ extern AioWait global_aio_wait; waited_; }) #define AIO_WAIT_WHILE(ctx, cond) \ - AIO_WAIT_WHILE_INTERNAL(ctx, cond, true) + AIO_WAIT_WHILE_INTERNAL(ctx, cond) +/* TODO replace this with AIO_WAIT_WHILE() in a future patch */ #define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \ - AIO_WAIT_WHILE_INTERNAL(ctx, cond, false) + AIO_WAIT_WHILE_INTERNAL(ctx, cond) /** * aio_wait_kick: @@ -151,7 +143,7 @@ static inline bool in_aio_context_home_thread(AioContext *ctx) } if (ctx == qemu_get_aio_context()) { - return qemu_mutex_iothread_locked(); + return bql_locked(); } else { return false; } diff --git a/include/block/aio.h b/include/block/aio.h index 795a375ff24..8378553eb9d 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -55,7 +55,7 @@ typedef void IOHandler(void *opaque); struct ThreadPool; struct LinuxAioState; -struct LuringState; +typedef struct LuringState LuringState; /* Is polling disabled? */ bool aio_poll_disabled(AioContext *ctx); @@ -212,7 +212,7 @@ struct AioContext { struct LinuxAioState *linux_aio; #endif #ifdef CONFIG_LINUX_IO_URING - struct LuringState *linux_io_uring; + LuringState *linux_io_uring; /* State for file descriptor monitoring using Linux io_uring */ struct io_uring fdmon_io_uring; @@ -278,23 +278,6 @@ void aio_context_ref(AioContext *ctx); */ void aio_context_unref(AioContext *ctx); -/* Take ownership of the AioContext. If the AioContext will be shared between - * threads, and a thread does not want to be interrupted, it will have to - * take ownership around calls to aio_poll(). Otherwise, aio_poll() - * automatically takes care of calling aio_context_acquire and - * aio_context_release. - * - * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A - * thread still has to call those to avoid being interrupted by the guest. - * - * Bottom halves, timers and callbacks can be created or removed without - * acquiring the AioContext. - */ -void aio_context_acquire(AioContext *ctx); - -/* Relinquish ownership of the AioContext. */ -void aio_context_release(AioContext *ctx); - /** * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will * run only once and as soon as possible. @@ -526,10 +509,10 @@ struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); /* Setup the LuringState bound to this AioContext */ -struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp); +LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp); /* Return the LuringState bound to this AioContext */ -struct LuringState *aio_get_linux_io_uring(AioContext *ctx); +LuringState *aio_get_linux_io_uring(AioContext *ctx); /** * aio_timer_new_with_attrs: * @ctx: the aio context @@ -721,8 +704,7 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, * @max_batch: maximum number of requests in a batch, 0 means that the * engine will use its default */ -void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, - Error **errp); +void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch); /** * aio_context_set_thread_pool_params: diff --git a/include/block/block-common.h b/include/block/block-common.h index d7599564db7..a846023a098 100644 --- a/include/block/block-common.h +++ b/include/block/block-common.h @@ -70,9 +70,6 @@ * automatically takes the graph rdlock when calling the wrapped function. In * the same way, no_co_wrapper_bdrv_wrlock functions automatically take the * graph wrlock. - * - * If the first parameter of the function is a BlockDriverState, BdrvChild or - * BlockBackend pointer, the AioContext lock for it is taken in the wrapper. */ #define no_co_wrapper #define no_co_wrapper_bdrv_rdlock diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h index 6b21fbc73f7..bd7cecd1cf4 100644 --- a/include/block/block-global-state.h +++ b/include/block/block-global-state.h @@ -31,11 +31,10 @@ /* * Global state (GS) API. These functions run under the BQL. * - * If a function modifies the graph, it also uses drain and/or - * aio_context_acquire/release to be sure it has unique access. - * aio_context locking is needed together with BQL because of - * the thread-safe I/O API that concurrently runs and accesses - * the graph without the BQL. + * If a function modifies the graph, it also uses the graph lock to be sure it + * has unique access. The graph lock is needed together with BQL because of the + * thread-safe I/O API that concurrently runs and accesses the graph without + * the BQL. * * It is important to note that not all of these functions are * necessarily limited to running under the BQL, but they would @@ -145,7 +144,8 @@ int GRAPH_RDLOCK bdrv_make_empty(BdrvChild *c, Error **errp); void bdrv_register(BlockDriver *bdrv); int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, - const char *backing_file_str); + const char *backing_file_str, + bool backing_mask_protocol); BlockDriverState * GRAPH_RDLOCK bdrv_find_overlay(BlockDriverState *active, BlockDriverState *bs); @@ -268,20 +268,6 @@ int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); int bdrv_debug_resume(BlockDriverState *bs, const char *tag); bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); -/** - * Locks the AioContext of @bs if it's not the current AioContext. This avoids - * double locking which could lead to deadlocks: This is a coroutine_fn, so we - * know we already own the lock of the current AioContext. - * - * May only be called in the main thread. - */ -void coroutine_fn bdrv_co_lock(BlockDriverState *bs); - -/** - * Unlocks the AioContext of @bs if it's not the current AioContext. - */ -void coroutine_fn bdrv_co_unlock(BlockDriverState *bs); - bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, GHashTable *visited, Transaction *tran, Error **errp); diff --git a/include/block/block-io.h b/include/block/block-io.h index f8729ccc55c..b49e0537dd4 100644 --- a/include/block/block-io.h +++ b/include/block/block-io.h @@ -31,8 +31,7 @@ /* * I/O API functions. These functions are thread-safe, and therefore - * can run in any thread as long as the thread has called - * aio_context_acquire/release(). + * can run in any thread. * * These functions can only call functions from I/O and Common categories, * but can be invoked by GS, "I/O or GS" and I/O APIs. @@ -333,11 +332,10 @@ bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, * "I/O or GS" API functions. These functions can run without * the BQL, but only in one specific iothread/main loop. * - * More specifically, these functions use BDRV_POLL_WHILE(bs), which - * requires the caller to be either in the main thread and hold - * the BlockdriverState (bs) AioContext lock, or directly in the - * home thread that runs the bs AioContext. Calling them from - * another thread in another AioContext would cause deadlocks. + * More specifically, these functions use BDRV_POLL_WHILE(bs), which requires + * the caller to be either in the main thread or directly in the home thread + * that runs the bs AioContext. Calling them from another thread in another + * AioContext would cause deadlocks. * * Therefore, these functions are not proper I/O, because they * can't run in *any* iothreads, but only in a specific one. diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h index 4e31d161c57..761276127ed 100644 --- a/include/block/block_int-common.h +++ b/include/block/block_int-common.h @@ -985,7 +985,9 @@ struct BdrvChildClass { * can update its reference. */ int (*update_filename)(BdrvChild *child, BlockDriverState *new_base, - const char *filename, Error **errp); + const char *filename, + bool backing_mask_protocol, + Error **errp); bool (*change_aio_ctx)(BdrvChild *child, AioContext *ctx, GHashTable *visited, Transaction *tran, @@ -1192,8 +1194,6 @@ struct BlockDriverState { /* The error object in use for blocking operations on backing_hd */ Error *backing_blocker; - /* Protected by AioContext lock */ - /* * If we are reading a disk image, give its size in sectors. * Generally read-only; it is written to by load_snapshot and diff --git a/include/block/block_int-global-state.h b/include/block/block_int-global-state.h index ef31c58bb3e..d2201e27f4d 100644 --- a/include/block/block_int-global-state.h +++ b/include/block/block_int-global-state.h @@ -46,6 +46,8 @@ * flatten the whole backing file chain onto @bs. * @backing_file_str: The file name that will be written to @bs as the * the new backing file if the job completes. Ignored if @base is %NULL. + * @backing_mask_protocol: Replace potential protocol name with 'raw' in + * 'backing file format' header * @creation_flags: Flags that control the behavior of the Job lifetime. * See @BlockJobCreateFlags * @speed: The maximum speed, in bytes per second, or 0 for unlimited. @@ -64,6 +66,7 @@ */ void stream_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, const char *backing_file_str, + bool backing_mask_protocol, BlockDriverState *bottom, int creation_flags, int64_t speed, BlockdevOnError on_error, @@ -82,6 +85,8 @@ void stream_start(const char *job_id, BlockDriverState *bs, * @speed: The maximum speed, in bytes per second, or 0 for unlimited. * @on_error: The action to take upon error. * @backing_file_str: String to use as the backing file in @top's overlay + * @backing_mask_protocol: Replace potential protocol name with 'raw' in + * 'backing file format' header * @filter_node_name: The node name that should be assigned to the filter * driver that the commit job inserts into the graph above @top. NULL means * that a node name should be autogenerated. @@ -92,6 +97,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, BlockDriverState *top, int creation_flags, int64_t speed, BlockdevOnError on_error, const char *backing_file_str, + bool backing_mask_protocol, const char *filter_node_name, Error **errp); /** * commit_active_start: diff --git a/include/block/blockjob.h b/include/block/blockjob.h index e594c10d231..7061ab7201a 100644 --- a/include/block/blockjob.h +++ b/include/block/blockjob.h @@ -54,7 +54,7 @@ typedef struct BlockJob { /** * Speed that was set with @block_job_set_speed. - * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). + * Always modified and read under the BQL (GLOBAL_STATE_CODE). */ int64_t speed; @@ -66,7 +66,7 @@ typedef struct BlockJob { /** * Block other operations when block job is running. - * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). + * Always modified and read under the BQL (GLOBAL_STATE_CODE). */ Error *blocker; @@ -89,7 +89,7 @@ typedef struct BlockJob { /** * BlockDriverStates that are involved in this block job. - * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). + * Always modified and read under the BQL (GLOBAL_STATE_CODE). */ GSList *nodes; } BlockJob; diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h index 22b5db1ed96..d7545e82d06 100644 --- a/include/block/graph-lock.h +++ b/include/block/graph-lock.h @@ -110,34 +110,17 @@ void unregister_aiocontext(AioContext *ctx); * * The wrlock can only be taken from the main loop, with BQL held, as only the * main loop is allowed to modify the graph. - * - * If @bs is non-NULL, its AioContext is temporarily released. - * - * This function polls. Callers must not hold the lock of any AioContext other - * than the current one and the one of @bs. */ void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA -bdrv_graph_wrlock(BlockDriverState *bs); +bdrv_graph_wrlock(void); /* * bdrv_graph_wrunlock: * Write finished, reset global has_writer to 0 and restart * all readers that are waiting. - * - * If @bs is non-NULL, its AioContext is temporarily released. - */ -void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA -bdrv_graph_wrunlock(BlockDriverState *bs); - -/* - * bdrv_graph_wrunlock_ctx: - * Write finished, reset global has_writer to 0 and restart - * all readers that are waiting. - * - * If @ctx is non-NULL, its lock is temporarily released. */ void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA -bdrv_graph_wrunlock_ctx(AioContext *ctx); +bdrv_graph_wrunlock(void); /* * bdrv_graph_co_rdlock: diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h index 0f63c2800ce..20e000b8ef8 100644 --- a/include/block/raw-aio.h +++ b/include/block/raw-aio.h @@ -65,7 +65,6 @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context); #endif /* io_uring.c - Linux io_uring implementation */ #ifdef CONFIG_LINUX_IO_URING -typedef struct LuringState LuringState; LuringState *luring_init(Error **errp); void luring_cleanup(LuringState *s); diff --git a/include/block/snapshot.h b/include/block/snapshot.h index d49c5599d9b..304cc6ea61c 100644 --- a/include/block/snapshot.h +++ b/include/block/snapshot.h @@ -86,8 +86,6 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs, /* * Group operations. All block drivers are involved. - * These functions will properly handle dataplane (take aio_context_acquire - * when appropriate for appropriate block drivers */ bool bdrv_all_can_snapshot(bool has_devices, strList *devices, diff --git a/include/chardev/char-fe.h b/include/chardev/char-fe.h index 0ff6f875116..ecef1828355 100644 --- a/include/chardev/char-fe.h +++ b/include/chardev/char-fe.h @@ -7,8 +7,12 @@ typedef void IOEventHandler(void *opaque, QEMUChrEvent event); typedef int BackendChangeHandler(void *opaque); -/* This is the backend as seen by frontend, the actual backend is - * Chardev */ +/** + * struct CharBackend - back end as seen by front end + * @fe_is_open: the front end is ready for IO + * + * The actual backend is Chardev + */ struct CharBackend { Chardev *chr; IOEventHandler *chr_event; @@ -17,7 +21,7 @@ struct CharBackend { BackendChangeHandler *chr_be_change; void *opaque; int tag; - int fe_open; + bool fe_is_open; }; /** @@ -156,12 +160,13 @@ void qemu_chr_fe_set_echo(CharBackend *be, bool echo); /** * qemu_chr_fe_set_open: + * @be: a CharBackend + * @is_open: the front end open status * - * Set character frontend open status. This is an indication that the - * front end is ready (or not) to begin doing I/O. - * Without associated Chardev, do nothing. + * This is an indication that the front end is ready (or not) to begin + * doing I/O. Without associated Chardev, do nothing. */ -void qemu_chr_fe_set_open(CharBackend *be, int fe_open); +void qemu_chr_fe_set_open(CharBackend *be, bool is_open); /** * qemu_chr_fe_printf: diff --git a/include/crypto/block.h b/include/crypto/block.h index 4f63a378727..92e823c9f2f 100644 --- a/include/crypto/block.h +++ b/include/crypto/block.h @@ -66,6 +66,7 @@ bool qcrypto_block_has_format(QCryptoBlockFormat format, typedef enum { QCRYPTO_BLOCK_OPEN_NO_IO = (1 << 0), + QCRYPTO_BLOCK_OPEN_DETACHED = (1 << 1), } QCryptoBlockOpenFlags; /** @@ -95,6 +96,10 @@ typedef enum { * metadata such as the payload offset. There will be * no cipher or ivgen objects available. * + * If @flags contains QCRYPTO_BLOCK_OPEN_DETACHED then + * the open process will be optimized to skip the LUKS + * payload overlap check. + * * If any part of initializing the encryption context * fails an error will be returned. This could be due * to the volume being in the wrong format, a cipher @@ -111,6 +116,10 @@ QCryptoBlock *qcrypto_block_open(QCryptoBlockOpenOptions *options, size_t n_threads, Error **errp); +typedef enum { + QCRYPTO_BLOCK_CREATE_DETACHED = (1 << 0), +} QCryptoBlockCreateFlags; + /** * qcrypto_block_create: * @options: the encryption options @@ -118,6 +127,7 @@ QCryptoBlock *qcrypto_block_open(QCryptoBlockOpenOptions *options, * @initfunc: callback for initializing volume header * @writefunc: callback for writing data to the volume header * @opaque: data to pass to @initfunc and @writefunc + * @flags: bitmask of QCryptoBlockCreateFlags values * @errp: pointer to a NULL-initialized error object * * Create a new block encryption object for initializing @@ -129,6 +139,11 @@ QCryptoBlock *qcrypto_block_open(QCryptoBlockOpenOptions *options, * generating new master keys, etc as required. Any existing * data present on the volume will be irrevocably destroyed. * + * If @flags contains QCRYPTO_BLOCK_CREATE_DETACHED then + * the open process will set the payload_offset_sector to 0 + * to specify the starting point for the read/write of a + * detached LUKS header image. + * * If any part of initializing the encryption context * fails an error will be returned. This could be due * to the volume being in the wrong format, a cipher @@ -142,6 +157,7 @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options, QCryptoBlockInitFunc initfunc, QCryptoBlockWriteFunc writefunc, void *opaque, + unsigned int flags, Error **errp); /** diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h index 2324f6b1a46..b26867b6417 100644 --- a/include/disas/dis-asm.h +++ b/include/disas/dis-asm.h @@ -396,6 +396,14 @@ typedef struct disassemble_info { /* Command line options specific to the target disassembler. */ char * disassembler_options; + /* + * When true instruct the disassembler it may preface the + * disassembly with the opcodes values if it wants to. This is + * mainly for the benefit of the plugin interface which doesn't want + * that. + */ + bool show_opcodes; + /* Field intended to be used by targets in any way they deem suitable. */ void *target_info; diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 5340907cfd0..1a6510fd3bf 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -171,34 +171,10 @@ extern const TargetPageBits target_page; #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) -/* same as PROT_xxx */ -#define PAGE_READ 0x0001 -#define PAGE_WRITE 0x0002 -#define PAGE_EXEC 0x0004 -#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) -#define PAGE_VALID 0x0008 -/* - * Original state of the write flag (used when tracking self-modifying code) - */ -#define PAGE_WRITE_ORG 0x0010 -/* - * Invalidate the TLB entry immediately, helpful for s390x - * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() - */ -#define PAGE_WRITE_INV 0x0020 -/* For use with page_set_flags: page is being replaced; target_data cleared. */ -#define PAGE_RESET 0x0040 -/* For linux-user, indicates that the page is MAP_ANON. */ -#define PAGE_ANON 0x0080 - #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) /* FIXME: Code that sets/uses this is broken and needs to go away. */ #define PAGE_RESERVED 0x0100 #endif -/* Target-specific bits that will be used via page_get_flags(). */ -#define PAGE_TARGET_1 0x0200 -#define PAGE_TARGET_2 0x0400 - /* * For linux-user, indicates that the page is mapped with the same semantics * in both guest and host. @@ -335,6 +311,10 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2)) #define TLB_WATCHPOINT 0 +static inline int cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return MMU_USER_IDX; +} #else /* @@ -377,8 +357,10 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_BSWAP (1 << 0) /* Set if TLB entry contains a watchpoint. */ #define TLB_WATCHPOINT (1 << 1) +/* Set if TLB entry requires aligned accesses. */ +#define TLB_CHECK_ALIGNED (1 << 2) -#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT) +#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED) /* The two sets of flags must not overlap. */ QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); @@ -408,33 +390,8 @@ static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) #endif /* !CONFIG_USER_ONLY */ -/* accel/tcg/cpu-exec.c */ -int cpu_exec(CPUState *cpu); - /* Validate correct placement of CPUArchState. */ QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0); QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState)); -/** - * env_archcpu(env) - * @env: The architecture environment - * - * Return the ArchCPU associated with the environment. - */ -static inline ArchCPU *env_archcpu(CPUArchState *env) -{ - return (void *)env - sizeof(CPUState); -} - -/** - * env_cpu(env) - * @env: The architecture environment - * - * Return the CPUState associated with the environment. - */ -static inline CPUState *env_cpu(CPUArchState *env) -{ - return (void *)env - sizeof(CPUState); -} - #endif /* CPU_ALL_H */ diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 41115d89194..6346df17ce9 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -3,9 +3,12 @@ /* CPU interfaces that are target independent. */ +#include "exec/vaddr.h" #ifndef CONFIG_USER_ONLY #include "exec/hwaddr.h" #endif +#include "hw/core/cpu.h" +#include "tcg/debug-assert.h" #define EXCP_INTERRUPT 0x10000 /* async interruption */ #define EXCP_HLT 0x10001 /* hlt instruction reached */ @@ -14,28 +17,9 @@ #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ #define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ -/** - * vaddr: - * Type wide enough to contain any #target_ulong virtual address. - */ -typedef uint64_t vaddr; -#define VADDR_PRId PRId64 -#define VADDR_PRIu PRIu64 -#define VADDR_PRIo PRIo64 -#define VADDR_PRIx PRIx64 -#define VADDR_PRIX PRIX64 -#define VADDR_MAX UINT64_MAX - void cpu_exec_init_all(void); void cpu_exec_step_atomic(CPUState *cpu); -/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even - * when intptr_t is 32-bit and we are aligning a long long. - */ -extern uintptr_t qemu_host_page_size; -extern intptr_t qemu_host_page_mask; - -#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size) #define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size()) /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ @@ -92,7 +76,7 @@ RAMBlock *qemu_ram_block_by_name(const char *name); * * By the time this function returns, the returned pointer is not protected * by RCU anymore. If the caller is not within an RCU critical section and - * does not hold the iothread lock, it must have other means of protecting the + * does not hold the BQL, it must have other means of protecting the * pointer, such as a reference to the memory region that owns the RAMBlock. */ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, @@ -217,4 +201,79 @@ G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); G_NORETURN void cpu_loop_exit(CPUState *cpu); G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); +/* same as PROT_xxx */ +#define PAGE_READ 0x0001 +#define PAGE_WRITE 0x0002 +#define PAGE_EXEC 0x0004 +#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) +#define PAGE_VALID 0x0008 +/* + * Original state of the write flag (used when tracking self-modifying code) + */ +#define PAGE_WRITE_ORG 0x0010 +/* + * Invalidate the TLB entry immediately, helpful for s390x + * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() + */ +#define PAGE_WRITE_INV 0x0020 +/* For use with page_set_flags: page is being replaced; target_data cleared. */ +#define PAGE_RESET 0x0040 +/* For linux-user, indicates that the page is MAP_ANON. */ +#define PAGE_ANON 0x0080 + +/* Target-specific bits that will be used via page_get_flags(). */ +#define PAGE_TARGET_1 0x0200 +#define PAGE_TARGET_2 0x0400 + +/* + * For linux-user, indicates that the page is mapped with the same semantics + * in both guest and host. + */ +#define PAGE_PASSTHROUGH 0x0800 + +/* accel/tcg/cpu-exec.c */ +int cpu_exec(CPUState *cpu); + +/** + * env_archcpu(env) + * @env: The architecture environment + * + * Return the ArchCPU associated with the environment. + */ +static inline ArchCPU *env_archcpu(CPUArchState *env) +{ + return (void *)env - sizeof(CPUState); +} + +/** + * env_cpu(env) + * @env: The architecture environment + * + * Return the CPUState associated with the environment. + */ +static inline CPUState *env_cpu(CPUArchState *env) +{ + return (void *)env - sizeof(CPUState); +} + +#ifndef CONFIG_USER_ONLY +/** + * cpu_mmu_index: + * @env: The cpu environment + * @ifetch: True for code access, false for data access. + * + * Return the core mmu index for the current translation regime. + * This function is used by generic TCG code paths. + * + * The user-only version of this function is inline in cpu-all.h, + * where it always returns MMU_USER_IDX. + */ +static inline int cpu_mmu_index(CPUState *cs, bool ifetch) +{ + int ret = cs->cc->mmu_index(cs, ifetch); + tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES); + return ret; +} +#endif /* !CONFIG_USER_ONLY */ + #endif /* CPU_COMMON_H */ diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h index 6061e33ac95..eb8f3f05953 100644 --- a/include/exec/cpu_ldst.h +++ b/include/exec/cpu_ldst.h @@ -121,8 +121,8 @@ static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len) h2g_nocheck(x); \ }) #else -typedef target_ulong abi_ptr; -#define TARGET_ABI_FMT_ptr TARGET_FMT_lx +typedef vaddr abi_ptr; +#define TARGET_ABI_FMT_ptr VADDR_PRIx #endif uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index ee90ef122bc..3e535016915 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -459,12 +459,6 @@ int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size, #endif -/* Hide the qatomic_read to make code a little easier on the eyes */ -static inline uint32_t tb_cflags(const TranslationBlock *tb) -{ - return qatomic_read(&tb->cflags); -} - static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb) { #ifdef CONFIG_USER_ONLY @@ -518,11 +512,6 @@ static inline void tb_set_page_addr1(TranslationBlock *tb, uint32_t curr_cflags(CPUState *cpu); /* TranslationBlock invalidate API */ -#if defined(CONFIG_USER_ONLY) -void tb_invalidate_phys_addr(hwaddr addr); -#else -void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); -#endif void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); @@ -666,6 +655,7 @@ static inline void mmap_unlock(void) {} void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); void tlb_set_dirty(CPUState *cpu, vaddr addr); +void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length); MemoryRegionSection * address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h index d8a3c56fa2b..eb14b91139b 100644 --- a/include/exec/gdbstub.h +++ b/include/exec/gdbstub.h @@ -13,19 +13,28 @@ typedef struct GDBFeature { const char *xmlname; const char *xml; + const char *name; + const char * const *regs; int num_regs; } GDBFeature; typedef struct GDBFeatureBuilder { GDBFeature *feature; GPtrArray *xml; + GPtrArray *regs; int base_reg; } GDBFeatureBuilder; /* Get or set a register. Returns the size of the register. */ -typedef int (*gdb_get_reg_cb)(CPUArchState *env, GByteArray *buf, int reg); -typedef int (*gdb_set_reg_cb)(CPUArchState *env, uint8_t *buf, int reg); +typedef int (*gdb_get_reg_cb)(CPUState *cpu, GByteArray *buf, int reg); +typedef int (*gdb_set_reg_cb)(CPUState *cpu, uint8_t *buf, int reg); + +/** + * gdb_init_cpu(): Initialize the CPU for gdbstub. + * @cpu: The CPU to be initialized. + */ +void gdb_init_cpu(CPUState *cpu); /** * gdb_register_coprocessor() - register a supplemental set of registers @@ -38,7 +47,7 @@ typedef int (*gdb_set_reg_cb)(CPUArchState *env, uint8_t *buf, int reg); */ void gdb_register_coprocessor(CPUState *cpu, gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg, - int num_regs, const char *xml, int g_pos); + const GDBFeature *feature, int g_pos); /** * gdbserver_start: start the gdb server @@ -102,6 +111,34 @@ void gdb_feature_builder_end(const GDBFeatureBuilder *builder); */ const GDBFeature *gdb_find_static_feature(const char *xmlname); +/** + * gdb_read_register() - Read a register associated with a CPU. + * @cpu: The CPU associated with the register. + * @buf: The buffer that the read register will be appended to. + * @reg: The register's number returned by gdb_find_feature_register(). + * + * Return: The number of read bytes. + */ +int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); + +/** + * typedef GDBRegDesc - a register description from gdbstub + */ +typedef struct { + int gdb_reg; + const char *name; + const char *feature_name; +} GDBRegDesc; + +/** + * gdb_get_register_list() - Return list of all registers for CPU + * @cpu: The CPU being searched + * + * Returns a GArray of GDBRegDesc, caller frees array but not the + * const strings. + */ +GArray *gdb_get_register_list(CPUState *cpu); + void gdb_set_stop_cpu(CPUState *cpu); /* in gdbstub-xml.c, generated by scripts/feature_to_c.py */ diff --git a/include/exec/ioport.h b/include/exec/ioport.h index e34f668998d..4397f12f932 100644 --- a/include/exec/ioport.h +++ b/include/exec/ioport.h @@ -35,7 +35,6 @@ typedef struct MemoryRegionPortio { unsigned size; uint32_t (*read)(void *opaque, uint32_t address); void (*write)(void *opaque, uint32_t address, uint32_t data); - uint32_t base; /* private field */ } MemoryRegionPortio; #define PORTIO_END_OF_LIST() { } @@ -55,6 +54,7 @@ typedef struct PortioList { const struct MemoryRegionPortio *ports; Object *owner; struct MemoryRegion *address_space; + uint32_t addr; unsigned nr; struct MemoryRegion **regions; void *opaque; @@ -71,5 +71,7 @@ void portio_list_add(PortioList *piolist, struct MemoryRegion *address_space, uint32_t addr); void portio_list_del(PortioList *piolist); +void portio_list_set_enabled(PortioList *piolist, bool enabled); +void portio_list_set_address(PortioList *piolist, uint32_t addr); #endif /* IOPORT_H */ diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h index d04170aa27a..14cdd8d5824 100644 --- a/include/exec/memattrs.h +++ b/include/exec/memattrs.h @@ -52,18 +52,6 @@ typedef struct MemTxAttrs { unsigned int memory:1; /* Requester ID (for MSI for example) */ unsigned int requester_id:16; - /* Invert endianness for this page */ - unsigned int byte_swap:1; - /* - * The following are target-specific page-table bits. These are not - * related to actual memory transactions at all. However, this structure - * is part of the tlb_fill interface, cached in the cputlb structure, - * and has unused bits. These fields will be read by target-specific - * helpers using env->iotlb[mmu_idx][tlb_index()].attrs.target_tlb_bitN. - */ - unsigned int target_tlb_bit0 : 1; - unsigned int target_tlb_bit1 : 1; - unsigned int target_tlb_bit2 : 1; } MemTxAttrs; /* Bus masters which don't specify any attributes will get this, diff --git a/include/exec/memory.h b/include/exec/memory.h index 831f7c996d9..8626a355b31 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -43,7 +43,7 @@ typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass; DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass, IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION) -#define TYPE_RAM_DISCARD_MANAGER "qemu:ram-discard-manager" +#define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager" typedef struct RamDiscardManagerClass RamDiscardManagerClass; typedef struct RamDiscardManager RamDiscardManager; DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass, @@ -1288,8 +1288,10 @@ void memory_region_init_io(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_nomigrate(MemoryRegion *mr, +bool memory_region_init_ram_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1310,8 +1312,10 @@ void memory_region_init_ram_nomigrate(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, +bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1338,8 +1342,10 @@ void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_resizeable_ram(MemoryRegion *mr, +bool memory_region_init_resizeable_ram(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1370,8 +1376,10 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_from_file(MemoryRegion *mr, +bool memory_region_init_ram_from_file(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1398,8 +1406,10 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_from_fd(MemoryRegion *mr, +bool memory_region_init_ram_from_fd(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1494,8 +1504,10 @@ void memory_region_init_alias(MemoryRegion *mr, * must be unique within any device * @size: size of the region. * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_rom_nomigrate(MemoryRegion *mr, +bool memory_region_init_rom_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1517,8 +1529,10 @@ void memory_region_init_rom_nomigrate(MemoryRegion *mr, * must be unique within any device * @size: size of the region. * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, +bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr, Object *owner, const MemoryRegionOps *ops, void *opaque, @@ -1576,8 +1590,10 @@ void memory_region_init_iommu(void *_iommu_mr, * give the RAM block a unique name for migration purposes. * We should lift this restriction and allow arbitrary Objects. * If you pass a non-NULL non-device @owner then we will assert. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram(MemoryRegion *mr, +bool memory_region_init_ram(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1603,8 +1619,10 @@ void memory_region_init_ram(MemoryRegion *mr, * must be unique within any device * @size: size of the region. * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_rom(MemoryRegion *mr, +bool memory_region_init_rom(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1634,8 +1652,10 @@ void memory_region_init_rom(MemoryRegion *mr, * must be unique within any device * @size: size of the region. * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_rom_device(MemoryRegion *mr, +bool memory_region_init_rom_device(MemoryRegion *mr, Object *owner, const MemoryRegionOps *ops, void *opaque, @@ -1962,7 +1982,7 @@ int memory_region_get_fd(MemoryRegion *mr); * * Use with care; by the time this function returns, the returned pointer is * not protected by RCU anymore. If the caller is not within an RCU critical - * section and does not hold the iothread lock, it must have other means of + * section and does not hold the BQL, it must have other means of * protecting the pointer, such as a reference to the region that includes * the incoming ram_addr_t. * @@ -1979,7 +1999,7 @@ MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); * * Use with care; by the time this function returns, the returned pointer is * not protected by RCU anymore. If the caller is not within an RCU critical - * section and does not hold the iothread lock, it must have other means of + * section and does not hold the BQL, it must have other means of * protecting the pointer, such as a reference to the region that includes * the incoming ram_addr_t. * @@ -3126,7 +3146,7 @@ int ram_block_discard_require(bool state); /* * See ram_block_discard_require(): only inhibit technologies that disable - * uncoordinated discarding of pages in RAM blocks, allowing co-existance with + * uncoordinated discarding of pages in RAM blocks, allowing co-existence with * technologies that only inhibit uncoordinated discards (via the * RamDiscardManager). */ diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 90676093f5d..de45ba7bc96 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -25,6 +25,7 @@ #include "sysemu/tcg.h" #include "exec/ramlist.h" #include "exec/ramblock.h" +#include "exec/exec-all.h" extern uint64_t total_dirty_pages; @@ -443,6 +444,14 @@ uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, } #endif /* not _WIN32 */ +static inline void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start, + ram_addr_t length) +{ + if (tcg_enabled()) { + tlb_reset_dirty_range_all(start, length); + } + +} bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, ram_addr_t length, unsigned client); @@ -504,6 +513,9 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, idx++; } } + if (num_dirty) { + cpu_physical_memory_dirty_bits_cleared(start, length); + } if (rb->clear_bmap) { /* diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h index 69c6a539029..848915ea5bf 100644 --- a/include/exec/ramblock.h +++ b/include/exec/ramblock.h @@ -34,7 +34,7 @@ struct RAMBlock { ram_addr_t max_length; void (*resized)(const char*, uint64_t length, void *host); uint32_t flags; - /* Protected by iothread lock. */ + /* Protected by the BQL. */ char idstr[256]; /* RCU-enabled, writes protected by the ramlist lock */ QLIST_ENTRY(RAMBlock) next; @@ -44,6 +44,19 @@ struct RAMBlock { size_t page_size; /* dirty bitmap used during migration */ unsigned long *bmap; + + /* + * Below fields are only used by mapped-ram migration + */ + /* bitmap of pages present in the migration file */ + unsigned long *file_bmap; + /* + * offset in the file pages belonging to this ramblock are saved, + * used only during migration to a file. + */ + off_t bitmap_offset; + uint64_t pages_offset; + /* bitmap of already received pages in postcopy */ unsigned long *receivedmap; diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h index 88602ae8d83..85c9460c7ca 100644 --- a/include/exec/translate-all.h +++ b/include/exec/translate-all.h @@ -23,7 +23,6 @@ /* translate-all.c */ -void tb_invalidate_phys_page(tb_page_addr_t addr); void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr); #ifdef CONFIG_USER_ONLY diff --git a/include/exec/translation-block.h b/include/exec/translation-block.h index e2b26e16da1..48211c890a7 100644 --- a/include/exec/translation-block.h +++ b/include/exec/translation-block.h @@ -145,4 +145,10 @@ struct TranslationBlock { /* The alignment given to TranslationBlock during allocation. */ #define CODE_GEN_ALIGN 16 +/* Hide the qatomic_read to make code a little easier on the eyes */ +static inline uint32_t tb_cflags(const TranslationBlock *tb) +{ + return qatomic_read(&tb->cflags); +} + #endif /* EXEC_TRANSLATION_BLOCK_H */ diff --git a/include/exec/translator.h b/include/exec/translator.h index 6d3f59d0951..2c4fb818e71 100644 --- a/include/exec/translator.h +++ b/include/exec/translator.h @@ -33,7 +33,7 @@ * the target-specific DisasContext, and then invoke translator_loop. */ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc); + vaddr pc, void *host_pc); /** * DisasJumpType: @@ -74,19 +74,21 @@ typedef enum DisasJumpType { * @singlestep_enabled: "Hardware" single stepping enabled. * @saved_can_do_io: Known value of cpu->neg.can_do_io, or -1 for unknown. * @plugin_enabled: TCG plugin enabled in this TB. + * @insn_start: The last op emitted by the insn_start hook, + * which is expected to be INDEX_op_insn_start. * * Architecture-agnostic disassembly context. */ typedef struct DisasContextBase { TranslationBlock *tb; - target_ulong pc_first; - target_ulong pc_next; + vaddr pc_first; + vaddr pc_next; DisasJumpType is_jmp; int num_insns; int max_insns; bool singlestep_enabled; - int8_t saved_can_do_io; bool plugin_enabled; + struct TCGOp *insn_start; void *host_addr[2]; } DisasContextBase; @@ -235,7 +237,7 @@ void translator_fake_ldb(uint8_t insn8, abi_ptr pc); * Translators can use this to enforce the rule that only single-insn * translation blocks are allowed to cross page boundaries. */ -static inline bool is_same_page(const DisasContextBase *db, target_ulong addr) +static inline bool is_same_page(const DisasContextBase *db, vaddr addr) { return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0; } diff --git a/include/exec/vaddr.h b/include/exec/vaddr.h new file mode 100644 index 00000000000..b9844afc773 --- /dev/null +++ b/include/exec/vaddr.h @@ -0,0 +1,18 @@ +/* Define vaddr. */ + +#ifndef VADDR_H +#define VADDR_H + +/** + * vaddr: + * Type wide enough to contain any #target_ulong virtual address. + */ +typedef uint64_t vaddr; +#define VADDR_PRId PRId64 +#define VADDR_PRIu PRIu64 +#define VADDR_PRIo PRIo64 +#define VADDR_PRIx PRIx64 +#define VADDR_PRIX PRIX64 +#define VADDR_MAX UINT64_MAX + +#endif diff --git a/include/gdbstub/user.h b/include/gdbstub/user.h index d392e510c59..3b8358e3dab 100644 --- a/include/gdbstub/user.h +++ b/include/gdbstub/user.h @@ -9,10 +9,15 @@ #ifndef GDBSTUB_USER_H #define GDBSTUB_USER_H +#define MAX_SIGINFO_LENGTH 128 + /** * gdb_handlesig() - yield control to gdb * @cpu: CPU * @sig: if non-zero, the signal number which caused us to stop + * @reason: stop reason for stop reply packet or NULL + * @siginfo: target-specific siginfo struct + * @siginfo_len: target-specific siginfo struct length * * This function yields control to gdb, when a user-mode-only target * needs to stop execution. If @sig is non-zero, then we will send a @@ -24,7 +29,7 @@ * or 0 if no signal should be delivered, ie the signal that caused * us to stop should be ignored. */ -int gdb_handlesig(CPUState *, int); +int gdb_handlesig(CPUState *, int, const char *, void *, int); /** * gdb_signalled() - inform remote gdb of sig exit @@ -34,10 +39,29 @@ int gdb_handlesig(CPUState *, int); void gdb_signalled(CPUArchState *as, int sig); /** - * gdbserver_fork() - disable gdb stub for child processes. + * gdbserver_fork_start() - inform gdb of the upcoming fork() + */ +void gdbserver_fork_start(void); + +/** + * gdbserver_fork_end() - inform gdb of the completed fork() * @cs: CPU + * @pid: 0 if in child process, -1 if fork failed, child process pid otherwise */ -void gdbserver_fork(CPUState *cs); +void gdbserver_fork_end(CPUState *cs, pid_t pid); +/** + * gdb_syscall_entry() - inform gdb of syscall entry and yield control to it + * @cs: CPU + * @num: syscall number + */ +void gdb_syscall_entry(CPUState *cs, int num); + +/** + * gdb_syscall_entry() - inform gdb of syscall return and yield control to it + * @cs: CPU + * @num: syscall number + */ +void gdb_syscall_return(CPUState *cs, int num); #endif /* GDBSTUB_USER_H */ diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h index 2b42e4192bf..0e6e82b339f 100644 --- a/include/hw/acpi/acpi-defs.h +++ b/include/hw/acpi/acpi-defs.h @@ -90,6 +90,39 @@ typedef struct AcpiFadtData { unsigned *xdsdt_tbl_offset; } AcpiFadtData; +typedef struct AcpiGas { + uint8_t id; /* Address space ID */ + uint8_t width; /* Register bit width */ + uint8_t offset; /* Register bit offset */ + uint8_t size; /* Access size */ + uint64_t addr; /* Address */ +} AcpiGas; + +/* SPCR (Serial Port Console Redirection table) */ +typedef struct AcpiSpcrData { + uint8_t interface_type; + uint8_t reserved[3]; + struct AcpiGas base_addr; + uint8_t interrupt_type; + uint8_t pc_interrupt; + uint32_t interrupt; /* Global system interrupt */ + uint8_t baud_rate; + uint8_t parity; + uint8_t stop_bits; + uint8_t flow_control; + uint8_t terminal_type; + uint8_t language; + uint8_t reserved1; + uint16_t pci_device_id; /* Must be 0xffff if not PCI device */ + uint16_t pci_vendor_id; /* Must be 0xffff if not PCI device */ + uint8_t pci_bus; + uint8_t pci_device; + uint8_t pci_function; + uint32_t pci_flags; + uint8_t pci_segment; + uint32_t reserved2; +} AcpiSpcrData; + #define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0) #define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1) diff --git a/include/hw/acpi/acpi_generic_initiator.h b/include/hw/acpi/acpi_generic_initiator.h new file mode 100644 index 00000000000..a304bad73e0 --- /dev/null +++ b/include/hw/acpi/acpi_generic_initiator.h @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#ifndef ACPI_GENERIC_INITIATOR_H +#define ACPI_GENERIC_INITIATOR_H + +#include "qom/object_interfaces.h" + +#define TYPE_ACPI_GENERIC_INITIATOR "acpi-generic-initiator" + +typedef struct AcpiGenericInitiator { + /* private */ + Object parent; + + /* public */ + char *pci_dev; + uint16_t node; +} AcpiGenericInitiator; + +/* + * ACPI 6.3: + * Table 5-81 Flags – Generic Initiator Affinity Structure + */ +typedef enum { + /* + * If clear, the OSPM ignores the contents of the Generic + * Initiator/Port Affinity Structure. This allows system firmware + * to populate the SRAT with a static number of structures, but only + * enable them as necessary. + */ + GEN_AFFINITY_ENABLED = (1 << 0), +} GenericAffinityFlags; + +/* + * ACPI 6.3: + * Table 5-80 Device Handle - PCI + */ +typedef struct PCIDeviceHandle { + uint16_t segment; + uint16_t bdf; +} PCIDeviceHandle; + +void build_srat_generic_pci_initiator(GArray *table_data); + +#endif diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h index ff2a310270a..a3784155cb3 100644 --- a/include/hw/acpi/aml-build.h +++ b/include/hw/acpi/aml-build.h @@ -497,4 +497,8 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog, const char *oem_id, const char *oem_table_id); + +void build_spcr(GArray *table_data, BIOSLinker *linker, + const AcpiSpcrData *f, const uint8_t rev, + const char *oem_id, const char *oem_table_id); #endif diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h index bc901660fb6..e6e1a9ef594 100644 --- a/include/hw/acpi/cpu.h +++ b/include/hw/acpi/cpu.h @@ -12,6 +12,7 @@ #ifndef ACPI_CPU_H #define ACPI_CPU_H +#include "qapi/qapi-types-acpi.h" #include "hw/qdev-core.h" #include "hw/acpi/acpi.h" #include "hw/acpi/aml-build.h" @@ -19,7 +20,7 @@ #include "hw/hotplug.h" typedef struct AcpiCpuStatus { - struct CPUState *cpu; + CPUState *cpu; uint64_t arch_id; bool is_inserting; bool is_removing; diff --git a/include/hw/acpi/ich9_tco.h b/include/hw/acpi/ich9_tco.h index c4393caee0f..2562a7cf39b 100644 --- a/include/hw/acpi/ich9_tco.h +++ b/include/hw/acpi/ich9_tco.h @@ -11,6 +11,7 @@ #define HW_ACPI_TCO_H #include "exec/memory.h" +#include "migration/vmstate.h" /* As per ICH9 spec, the internal timer has an error of ~0.6s on every tick */ #define TCO_TICK_NSEC 600000000LL diff --git a/include/hw/acpi/memory_hotplug.h b/include/hw/acpi/memory_hotplug.h index dfe9cf3fde1..38841d7b065 100644 --- a/include/hw/acpi/memory_hotplug.h +++ b/include/hw/acpi/memory_hotplug.h @@ -1,6 +1,7 @@ #ifndef QEMU_HW_ACPI_MEMORY_HOTPLUG_H #define QEMU_HW_ACPI_MEMORY_HOTPLUG_H +#include "qapi/qapi-types-acpi.h" #include "hw/qdev-core.h" #include "hw/acpi/acpi.h" #include "hw/acpi/aml-build.h" diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h index 2eb83a17eae..67a9a17b862 100644 --- a/include/hw/arm/allwinner-a10.h +++ b/include/hw/arm/allwinner-a10.h @@ -5,7 +5,7 @@ #include "hw/intc/allwinner-a10-pic.h" #include "hw/net/allwinner_emac.h" #include "hw/sd/allwinner-sdhost.h" -#include "hw/ide/ahci.h" +#include "hw/ide/ahci-sysbus.h" #include "hw/usb/hcd-ohci.h" #include "hw/usb/hcd-ehci.h" #include "hw/rtc/allwinner-rtc.h" diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h index 6e1ac9d4c13..614e74b7ed4 100644 --- a/include/hw/arm/allwinner-r40.h +++ b/include/hw/arm/allwinner-r40.h @@ -22,6 +22,7 @@ #include "qom/object.h" #include "hw/timer/allwinner-a10-pit.h" +#include "hw/ide/ahci-sysbus.h" #include "hw/intc/arm_gic.h" #include "hw/sd/allwinner-sdhost.h" #include "hw/misc/allwinner-r40-ccu.h" @@ -30,6 +31,9 @@ #include "hw/i2c/allwinner-i2c.h" #include "hw/net/allwinner_emac.h" #include "hw/net/allwinner-sun8i-emac.h" +#include "hw/usb/hcd-ohci.h" +#include "hw/usb/hcd-ehci.h" +#include "hw/watchdog/allwinner-wdt.h" #include "target/arm/cpu.h" #include "sysemu/block-backend.h" @@ -44,8 +48,14 @@ enum { AW_R40_DEV_MMC1, AW_R40_DEV_MMC2, AW_R40_DEV_MMC3, + AW_R40_DEV_AHCI, + AW_R40_DEV_EHCI1, + AW_R40_DEV_OHCI1, + AW_R40_DEV_EHCI2, + AW_R40_DEV_OHCI2, AW_R40_DEV_CCU, AW_R40_DEV_PIT, + AW_R40_DEV_WDT, AW_R40_DEV_UART0, AW_R40_DEV_UART1, AW_R40_DEV_UART2, @@ -88,6 +98,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(AwR40State, AW_R40) * which are currently emulated by the R40 SoC code. */ #define AW_R40_NUM_MMCS 4 +#define AW_R40_NUM_USB 2 #define AW_R40_NUM_UARTS 8 struct AwR40State { @@ -105,7 +116,11 @@ struct AwR40State { const hwaddr *memmap; AwSRAMCState sramc; AwA10PITState timer; + AwWdtState wdt; + AllwinnerAHCIState sata; AwSdHostState mmc[AW_R40_NUM_MMCS]; + EHCISysBusState ehci[AW_R40_NUM_USB]; + OHCISysBusState ohci[AW_R40_NUM_USB]; AwR40ClockCtlState ccu; AwR40DramCtlState dramc; AWI2CState i2c0; diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h index e2cebbd15c0..5c057ab2ec9 100644 --- a/include/hw/arm/armv7m.h +++ b/include/hw/arm/armv7m.h @@ -43,6 +43,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MState, ARMV7M) * a qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET). * + Property "cpu-type": CPU type to instantiate * + Property "num-irq": number of external IRQ lines + * + Property "num-prio-bits": number of priority bits in the NVIC * + Property "memory": MemoryRegion defining the physical address space * that CPU accesses see. (The NVIC, bitbanding and other CPU-internal * devices will be automatically layered on top of this view.) diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h index cb832bc1ee1..c60fac900ac 100644 --- a/include/hw/arm/aspeed_soc.h +++ b/include/hw/arm/aspeed_soc.h @@ -36,6 +36,7 @@ #include "hw/misc/aspeed_lpc.h" #include "hw/misc/unimp.h" #include "hw/misc/aspeed_peci.h" +#include "hw/fsi/aspeed_apb2opb.h" #include "hw/char/serial.h" #define ASPEED_SPIS_NUM 2 @@ -90,6 +91,7 @@ struct AspeedSoCState { UnimplementedDeviceState udc; UnimplementedDeviceState sgpiom; UnimplementedDeviceState jtag[ASPEED_JTAG_NUM]; + AspeedAPB2OPBState fsi[2]; }; #define TYPE_ASPEED_SOC "aspeed-soc" @@ -128,7 +130,8 @@ struct AspeedSoCClass { DeviceClass parent_class; const char *name; - const char *cpu_type; + /** valid_cpu_types: NULL terminated array of a single CPU type. */ + const char * const *valid_cpu_types; uint32_t silicon_rev; uint64_t sram_size; uint64_t secsram_size; @@ -137,16 +140,19 @@ struct AspeedSoCClass { int wdts_num; int macs_num; int uarts_num; + int uarts_base; const int *irqmap; const hwaddr *memmap; uint32_t num_cpus; qemu_irq (*get_irq)(AspeedSoCState *s, int dev); }; +const char *aspeed_soc_cpu_type(AspeedSoCClass *sc); enum { ASPEED_DEV_SPI_BOOT, ASPEED_DEV_IOMEM, + ASPEED_DEV_UART0, ASPEED_DEV_UART1, ASPEED_DEV_UART2, ASPEED_DEV_UART3, @@ -214,10 +220,10 @@ enum { ASPEED_DEV_SGPIOM, ASPEED_DEV_JTAG0, ASPEED_DEV_JTAG1, + ASPEED_DEV_FSI1, + ASPEED_DEV_FSI2, }; -#define ASPEED_SOC_SPI_BOOT_ADDR 0x0 - qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev); bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp); void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr); @@ -229,4 +235,19 @@ void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev, void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, unsigned int count, int unit0); +static inline int aspeed_uart_index(int uart_dev) +{ + return uart_dev - ASPEED_DEV_UART0; +} + +static inline int aspeed_uart_first(AspeedSoCClass *sc) +{ + return aspeed_uart_index(sc->uarts_base); +} + +static inline int aspeed_uart_last(AspeedSoCClass *sc) +{ + return aspeed_uart_first(sc) + sc->uarts_num - 1; +} + #endif /* ASPEED_SOC_H */ diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h index d724a2fc28a..636203baa5a 100644 --- a/include/hw/arm/bcm2835_peripherals.h +++ b/include/hw/arm/bcm2835_peripherals.h @@ -31,13 +31,18 @@ #include "hw/gpio/bcm2835_gpio.h" #include "hw/timer/bcm2835_systmr.h" #include "hw/usb/hcd-dwc2.h" +#include "hw/ssi/bcm2835_spi.h" +#include "hw/i2c/bcm2835_i2c.h" #include "hw/misc/unimp.h" #include "qom/object.h" +#define TYPE_BCM_SOC_PERIPHERALS_BASE "bcm-soc-peripherals-base" +OBJECT_DECLARE_TYPE(BCMSocPeripheralBaseState, BCMSocPeripheralBaseClass, + BCM_SOC_PERIPHERALS_BASE) #define TYPE_BCM2835_PERIPHERALS "bcm2835-peripherals" OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PeripheralState, BCM2835_PERIPHERALS) -struct BCM2835PeripheralState { +struct BCMSocPeripheralBaseState { /*< private >*/ SysBusDevice parent_obj; /*< public >*/ @@ -59,15 +64,13 @@ struct BCM2835PeripheralState { OrIRQState orgated_dma_irq; BCM2835ICState ic; BCM2835PropertyState property; - BCM2835RngState rng; BCM2835MboxState mboxes; SDHCIState sdhci; BCM2835SDHostState sdhost; - BCM2835GpioState gpio; - Bcm2835ThermalState thermal; UnimplementedDeviceState i2s; - UnimplementedDeviceState spi[1]; - UnimplementedDeviceState i2c[3]; + BCM2835SPIState spi[1]; + BCM2835I2CState i2c[3]; + OrIRQState orgated_i2c_irq; UnimplementedDeviceState otp; UnimplementedDeviceState dbus; UnimplementedDeviceState ave0; @@ -78,4 +81,25 @@ struct BCM2835PeripheralState { UnimplementedDeviceState sdramc; }; +struct BCMSocPeripheralBaseClass { + /*< private >*/ + SysBusDeviceClass parent_class; + /*< public >*/ + uint64_t peri_size; /* Peripheral range size */ +}; + +struct BCM2835PeripheralState { + /*< private >*/ + BCMSocPeripheralBaseState parent_obj; + /*< public >*/ + BCM2835RngState rng; + Bcm2835ThermalState thermal; + BCM2835GpioState gpio; +}; + +void create_unimp(BCMSocPeripheralBaseState *ps, + UnimplementedDeviceState *uds, + const char *name, hwaddr ofs, hwaddr size); +void bcm_soc_peripherals_common_realize(DeviceState *dev, Error **errp); + #endif /* BCM2835_PERIPHERALS_H */ diff --git a/include/hw/arm/bcm2836.h b/include/hw/arm/bcm2836.h index 6f90cabfa3a..918fb3bf142 100644 --- a/include/hw/arm/bcm2836.h +++ b/include/hw/arm/bcm2836.h @@ -17,8 +17,10 @@ #include "target/arm/cpu.h" #include "qom/object.h" +#define TYPE_BCM283X_BASE "bcm283x-base" +OBJECT_DECLARE_TYPE(BCM283XBaseState, BCM283XBaseClass, BCM283X_BASE) #define TYPE_BCM283X "bcm283x" -OBJECT_DECLARE_TYPE(BCM283XState, BCM283XClass, BCM283X) +OBJECT_DECLARE_SIMPLE_TYPE(BCM283XState, BCM283X) #define BCM283X_NCPUS 4 @@ -30,7 +32,7 @@ OBJECT_DECLARE_TYPE(BCM283XState, BCM283XClass, BCM283X) #define TYPE_BCM2836 "bcm2836" #define TYPE_BCM2837 "bcm2837" -struct BCM283XState { +struct BCM283XBaseState { /*< private >*/ DeviceState parent_obj; /*< public >*/ @@ -41,7 +43,28 @@ struct BCM283XState { ARMCPU core; } cpu[BCM283X_NCPUS]; BCM2836ControlState control; +}; + +struct BCM283XBaseClass { + /*< private >*/ + DeviceClass parent_class; + /*< public >*/ + const char *name; + const char *cpu_type; + unsigned core_count; + hwaddr peri_base; /* Peripheral base address seen by the CPU */ + hwaddr ctrl_base; /* Interrupt controller and mailboxes etc. */ + int clusterid; +}; + +struct BCM283XState { + /*< private >*/ + BCM283XBaseState parent_obj; + /*< public >*/ BCM2835PeripheralState peripherals; }; +bool bcm283x_common_realize(DeviceState *dev, BCMSocPeripheralBaseState *ps, + Error **errp); + #endif /* BCM2836_H */ diff --git a/include/hw/arm/bcm2838.h b/include/hw/arm/bcm2838.h new file mode 100644 index 00000000000..e53c7bedf92 --- /dev/null +++ b/include/hw/arm/bcm2838.h @@ -0,0 +1,31 @@ +/* + * BCM2838 SoC emulation + * + * Copyright (C) 2022 Ovchinnikov Vitalii + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef BCM2838_H +#define BCM2838_H + +#include "hw/arm/bcm2836.h" +#include "hw/intc/arm_gic.h" +#include "hw/arm/bcm2838_peripherals.h" + +#define BCM2838_PERI_LOW_BASE 0xfc000000 +#define BCM2838_GIC_BASE 0x40000 + +#define TYPE_BCM2838 "bcm2838" + +OBJECT_DECLARE_TYPE(BCM2838State, BCM2838Class, BCM2838) + +struct BCM2838State { + /*< private >*/ + BCM283XBaseState parent_obj; + /*< public >*/ + BCM2838PeripheralState peripherals; + GICState gic; +}; + +#endif /* BCM2838_H */ diff --git a/include/hw/arm/bcm2838_peripherals.h b/include/hw/arm/bcm2838_peripherals.h new file mode 100644 index 00000000000..7ee1bd066fa --- /dev/null +++ b/include/hw/arm/bcm2838_peripherals.h @@ -0,0 +1,84 @@ +/* + * BCM2838 peripherals emulation + * + * Copyright (C) 2022 Ovchinnikov Vitalii + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef BCM2838_PERIPHERALS_H +#define BCM2838_PERIPHERALS_H + +#include "hw/arm/bcm2835_peripherals.h" +#include "hw/sd/sdhci.h" +#include "hw/gpio/bcm2838_gpio.h" + +/* SPI */ +#define GIC_SPI_INTERRUPT_MBOX 33 +#define GIC_SPI_INTERRUPT_MPHI 40 +#define GIC_SPI_INTERRUPT_DWC2 73 +#define GIC_SPI_INTERRUPT_DMA_0 80 +#define GIC_SPI_INTERRUPT_DMA_6 86 +#define GIC_SPI_INTERRUPT_DMA_7_8 87 +#define GIC_SPI_INTERRUPT_DMA_9_10 88 +#define GIC_SPI_INTERRUPT_AUX_UART1 93 +#define GIC_SPI_INTERRUPT_SDHOST 120 +#define GIC_SPI_INTERRUPT_UART0 121 +#define GIC_SPI_INTERRUPT_RNG200 125 +#define GIC_SPI_INTERRUPT_EMMC_EMMC2 126 +#define GIC_SPI_INTERRUPT_PCI_INT_A 143 +#define GIC_SPI_INTERRUPT_GENET_A 157 +#define GIC_SPI_INTERRUPT_GENET_B 158 + + +/* GPU (legacy) DMA interrupts */ +#define GPU_INTERRUPT_DMA0 16 +#define GPU_INTERRUPT_DMA1 17 +#define GPU_INTERRUPT_DMA2 18 +#define GPU_INTERRUPT_DMA3 19 +#define GPU_INTERRUPT_DMA4 20 +#define GPU_INTERRUPT_DMA5 21 +#define GPU_INTERRUPT_DMA6 22 +#define GPU_INTERRUPT_DMA7_8 23 +#define GPU_INTERRUPT_DMA9_10 24 +#define GPU_INTERRUPT_DMA11 25 +#define GPU_INTERRUPT_DMA12 26 +#define GPU_INTERRUPT_DMA13 27 +#define GPU_INTERRUPT_DMA14 28 +#define GPU_INTERRUPT_DMA15 31 + +#define BCM2838_MPHI_OFFSET 0xb200 +#define BCM2838_MPHI_SIZE 0x200 + +#define TYPE_BCM2838_PERIPHERALS "bcm2838-peripherals" +OBJECT_DECLARE_TYPE(BCM2838PeripheralState, BCM2838PeripheralClass, + BCM2838_PERIPHERALS) + +struct BCM2838PeripheralState { + /*< private >*/ + BCMSocPeripheralBaseState parent_obj; + + /*< public >*/ + MemoryRegion peri_low_mr; + MemoryRegion peri_low_mr_alias; + MemoryRegion mphi_mr_alias; + + SDHCIState emmc2; + BCM2838GpioState gpio; + + OrIRQState mmc_irq_orgate; + OrIRQState dma_7_8_irq_orgate; + OrIRQState dma_9_10_irq_orgate; + + UnimplementedDeviceState asb; + UnimplementedDeviceState clkisp; +}; + +struct BCM2838PeripheralClass { + /*< private >*/ + BCMSocPeripheralBaseClass parent_class; + /*< public >*/ + uint64_t peri_low_size; /* Peripheral lower range size */ +}; + +#endif /* BCM2838_PERIPHERALS_H */ diff --git a/include/hw/arm/fsl-imx6.h b/include/hw/arm/fsl-imx6.h index 519b871014a..61c593ffd24 100644 --- a/include/hw/arm/fsl-imx6.h +++ b/include/hw/arm/fsl-imx6.h @@ -32,6 +32,7 @@ #include "hw/net/imx_fec.h" #include "hw/usb/chipidea.h" #include "hw/usb/imx-usb-phy.h" +#include "hw/pci-host/designware.h" #include "exec/memory.h" #include "cpu.h" #include "qom/object.h" @@ -55,27 +56,28 @@ struct FslIMX6State { DeviceState parent_obj; /*< public >*/ - ARMCPU cpu[FSL_IMX6_NUM_CPUS]; - A9MPPrivState a9mpcore; - IMX6CCMState ccm; - IMX6SRCState src; - IMX7SNVSState snvs; - IMXSerialState uart[FSL_IMX6_NUM_UARTS]; - IMXGPTState gpt; - IMXEPITState epit[FSL_IMX6_NUM_EPITS]; - IMXI2CState i2c[FSL_IMX6_NUM_I2CS]; - IMXGPIOState gpio[FSL_IMX6_NUM_GPIOS]; - SDHCIState esdhc[FSL_IMX6_NUM_ESDHCS]; - IMXSPIState spi[FSL_IMX6_NUM_ECSPIS]; - IMX2WdtState wdt[FSL_IMX6_NUM_WDTS]; - IMXUSBPHYState usbphy[FSL_IMX6_NUM_USB_PHYS]; - ChipideaState usb[FSL_IMX6_NUM_USBS]; - IMXFECState eth; - MemoryRegion rom; - MemoryRegion caam; - MemoryRegion ocram; - MemoryRegion ocram_alias; - uint32_t phy_num; + ARMCPU cpu[FSL_IMX6_NUM_CPUS]; + A9MPPrivState a9mpcore; + IMX6CCMState ccm; + IMX6SRCState src; + IMX7SNVSState snvs; + IMXSerialState uart[FSL_IMX6_NUM_UARTS]; + IMXGPTState gpt; + IMXEPITState epit[FSL_IMX6_NUM_EPITS]; + IMXI2CState i2c[FSL_IMX6_NUM_I2CS]; + IMXGPIOState gpio[FSL_IMX6_NUM_GPIOS]; + SDHCIState esdhc[FSL_IMX6_NUM_ESDHCS]; + IMXSPIState spi[FSL_IMX6_NUM_ECSPIS]; + IMX2WdtState wdt[FSL_IMX6_NUM_WDTS]; + IMXUSBPHYState usbphy[FSL_IMX6_NUM_USB_PHYS]; + ChipideaState usb[FSL_IMX6_NUM_USBS]; + IMXFECState eth; + DesignwarePCIEHost pcie; + MemoryRegion rom; + MemoryRegion caam; + MemoryRegion ocram; + MemoryRegion ocram_alias; + uint32_t phy_num; }; diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h index 14390f60144..8277b0e8b2c 100644 --- a/include/hw/arm/fsl-imx6ul.h +++ b/include/hw/arm/fsl-imx6ul.h @@ -182,6 +182,8 @@ enum FslIMX6ULMemoryMap { FSL_IMX6UL_ENET1_ADDR = 0x02188000, FSL_IMX6UL_USBO2_USBMISC_ADDR = 0x02184800, + FSL_IMX6UL_USBO2_USBMISC_SIZE = 0x200, + FSL_IMX6UL_USBO2_USB1_ADDR = 0x02184000, FSL_IMX6UL_USBO2_USB2_ADDR = 0x02184200, diff --git a/include/hw/arm/msf2-soc.h b/include/hw/arm/msf2-soc.h index ce417a6266a..9300664e8ea 100644 --- a/include/hw/arm/msf2-soc.h +++ b/include/hw/arm/msf2-soc.h @@ -47,13 +47,10 @@ OBJECT_DECLARE_SIMPLE_TYPE(MSF2State, MSF2_SOC) #define MSF2_NUM_TIMERS 2 struct MSF2State { - /*< private >*/ SysBusDevice parent_obj; - /*< public >*/ ARMv7MState armv7m; - char *cpu_type; char *part_name; uint64_t envm_size; uint64_t esram_size; diff --git a/include/hw/arm/npcm7xx.h b/include/hw/arm/npcm7xx.h index 72c77220964..4e0d2101885 100644 --- a/include/hw/arm/npcm7xx.h +++ b/include/hw/arm/npcm7xx.h @@ -29,6 +29,7 @@ #include "hw/misc/npcm7xx_pwm.h" #include "hw/misc/npcm7xx_rng.h" #include "hw/net/npcm7xx_emc.h" +#include "hw/net/npcm_gmac.h" #include "hw/nvram/npcm7xx_otp.h" #include "hw/timer/npcm7xx_timer.h" #include "hw/ssi/npcm7xx_fiu.h" @@ -104,6 +105,7 @@ struct NPCM7xxState { OHCISysBusState ohci; NPCM7xxFIUState fiu[2]; NPCM7xxEMCState emc[2]; + NPCMGMACState gmac[2]; NPCM7xxSDHCIState mmc; NPCMPSPIState pspi[2]; }; diff --git a/include/hw/arm/omap.h b/include/hw/arm/omap.h index 067e9419f7e..40ee8ea9e56 100644 --- a/include/hw/arm/omap.h +++ b/include/hw/arm/omap.h @@ -1008,7 +1008,8 @@ void omap_mpu_wakeup(void *opaque, int irq, int req); __func__, paddr) /* OMAP-specific Linux bootloader tags for the ATAG_BOARD area - (Board-specifc tags are not here) */ + * (Board-specific tags are not here) + */ #define OMAP_TAG_CLOCK 0x4f01 #define OMAP_TAG_MMC 0x4f02 #define OMAP_TAG_SERIAL_CONSOLE 0x4f03 diff --git a/include/hw/arm/raspberrypi-fw-defs.h b/include/hw/arm/raspberrypi-fw-defs.h index 4551fe7450d..8b404e05336 100644 --- a/include/hw/arm/raspberrypi-fw-defs.h +++ b/include/hw/arm/raspberrypi-fw-defs.h @@ -10,7 +10,6 @@ #ifndef INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_ #define INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_ -#include "qemu/osdep.h" enum rpi_firmware_property_tag { RPI_FWREQ_PROPERTY_END = 0, @@ -160,4 +159,15 @@ enum rpi_firmware_clk_id { RPI_FIRMWARE_NUM_CLK_ID, }; +struct rpi_firmware_property_tag_header { + uint32_t tag; + uint32_t buf_size; + uint32_t req_resp_size; +}; + +typedef struct rpi_firmware_prop_request { + struct rpi_firmware_property_tag_header hdr; + uint8_t payload[0]; +} rpi_firmware_prop_request_t; + #endif /* INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_ */ diff --git a/include/hw/arm/raspi_platform.h b/include/hw/arm/raspi_platform.h index ede98e63c33..7bc4807fa51 100644 --- a/include/hw/arm/raspi_platform.h +++ b/include/hw/arm/raspi_platform.h @@ -28,6 +28,42 @@ #ifndef HW_ARM_RASPI_PLATFORM_H #define HW_ARM_RASPI_PLATFORM_H +#include "hw/boards.h" +#include "hw/arm/boot.h" + +/* Registered machine type (matches RPi Foundation bootloader and U-Boot) */ +#define MACH_TYPE_BCM2708 3138 + +#define TYPE_RASPI_BASE_MACHINE MACHINE_TYPE_NAME("raspi-base") +OBJECT_DECLARE_TYPE(RaspiBaseMachineState, RaspiBaseMachineClass, + RASPI_BASE_MACHINE) + +struct RaspiBaseMachineState { + /*< private >*/ + MachineState parent_obj; + /*< public >*/ + struct arm_boot_info binfo; +}; + +struct RaspiBaseMachineClass { + /*< private >*/ + MachineClass parent_obj; + /*< public >*/ + uint32_t board_rev; +}; + +/* Common functions for raspberry pi machines */ +const char *board_soc_type(uint32_t board_rev); +void raspi_machine_init(MachineState *machine); + +typedef struct BCM283XBaseState BCM283XBaseState; +void raspi_base_machine_init(MachineState *machine, + BCM283XBaseState *soc); + +void raspi_machine_class_common_init(MachineClass *mc, + uint32_t board_rev); +uint64_t board_ram_size(uint32_t board_rev); + #define MSYNC_OFFSET 0x0000 /* Multicore Sync Block */ #define CCPT_OFFSET 0x1000 /* Compact Camera Port 2 TX */ #define INTE_OFFSET 0x2000 /* VC Interrupt controller */ @@ -37,7 +73,7 @@ #define MPHI_OFFSET 0x6000 /* Message-based Parallel Host Intf. */ #define DMA_OFFSET 0x7000 /* DMA controller, channels 0-14 */ #define ARBA_OFFSET 0x9000 -#define BRDG_OFFSET 0xa000 +#define BRDG_OFFSET 0xa000 /* RPiVid ASB for BCM2838 (BCM2711) */ #define ARM_OFFSET 0xB000 /* ARM control block */ #define ARMCTRL_OFFSET (ARM_OFFSET + 0x000) #define ARMCTRL_IC_OFFSET (ARM_OFFSET + 0x200) /* Interrupt controller */ diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index fd8d772da11..5ec2e6c1a43 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -92,6 +92,7 @@ typedef struct SMMUTransCfg { bool disabled; /* smmu is disabled */ bool bypassed; /* translation is bypassed */ bool aborted; /* translation is aborted */ + bool affd; /* AF fault disable */ uint32_t iotlb_hits; /* counts IOTLB hits */ uint32_t iotlb_misses; /* counts IOTLB misses*/ /* Used by stage-1 only. */ diff --git a/include/hw/arm/stm32l4x5_soc.h b/include/hw/arm/stm32l4x5_soc.h new file mode 100644 index 00000000000..ee5f3624055 --- /dev/null +++ b/include/hw/arm/stm32l4x5_soc.h @@ -0,0 +1,67 @@ +/* + * STM32L4x5 SoC family + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This work is heavily inspired by the stm32f405_soc by Alistair Francis. + * Original code is licensed under the MIT License: + * + * Copyright (c) 2014 Alistair Francis + */ + +/* + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html + */ + +#ifndef HW_ARM_STM32L4x5_SOC_H +#define HW_ARM_STM32L4x5_SOC_H + +#include "exec/memory.h" +#include "hw/arm/armv7m.h" +#include "hw/or-irq.h" +#include "hw/misc/stm32l4x5_syscfg.h" +#include "hw/misc/stm32l4x5_exti.h" +#include "hw/misc/stm32l4x5_rcc.h" +#include "hw/gpio/stm32l4x5_gpio.h" +#include "qom/object.h" + +#define TYPE_STM32L4X5_SOC "stm32l4x5-soc" +#define TYPE_STM32L4X5XC_SOC "stm32l4x5xc-soc" +#define TYPE_STM32L4X5XE_SOC "stm32l4x5xe-soc" +#define TYPE_STM32L4X5XG_SOC "stm32l4x5xg-soc" +OBJECT_DECLARE_TYPE(Stm32l4x5SocState, Stm32l4x5SocClass, STM32L4X5_SOC) + +#define NUM_EXTI_OR_GATES 4 + +struct Stm32l4x5SocState { + SysBusDevice parent_obj; + + ARMv7MState armv7m; + + Stm32l4x5ExtiState exti; + OrIRQState exti_or_gates[NUM_EXTI_OR_GATES]; + Stm32l4x5SyscfgState syscfg; + Stm32l4x5RccState rcc; + Stm32l4x5GpioState gpio[NUM_GPIOS]; + + MemoryRegion sram1; + MemoryRegion sram2; + MemoryRegion flash; + MemoryRegion flash_alias; +}; + +struct Stm32l4x5SocClass { + SysBusDeviceClass parent_class; + + size_t flash_size; +}; + +#endif diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index f69239850e6..bb486d36b14 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -130,6 +130,7 @@ struct VirtMachineClass { /* Machines < 6.2 have no support for describing cpu topology to guest */ bool no_cpu_topology; bool no_tcg_lpa2; + bool no_ns_el2_virt_timer_irq; }; struct VirtMachineState { @@ -173,6 +174,7 @@ struct VirtMachineState { PCIBus *bus; char *oem_id; char *oem_table_id; + bool ns_el2_virt_timer_irq; }; #define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM) diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h index b24fa64557f..025beb5532d 100644 --- a/include/hw/arm/xlnx-versal.h +++ b/include/hw/arm/xlnx-versal.h @@ -34,6 +34,7 @@ #include "hw/net/xlnx-versal-canfd.h" #include "hw/misc/xlnx-versal-cfu.h" #include "hw/misc/xlnx-versal-cframe-reg.h" +#include "target/arm/cpu.h" #define TYPE_XLNX_VERSAL "xlnx-versal" OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL) diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h index 96358d51ebb..48f79480921 100644 --- a/include/hw/arm/xlnx-zynqmp.h +++ b/include/hw/arm/xlnx-zynqmp.h @@ -22,7 +22,7 @@ #include "hw/net/cadence_gem.h" #include "hw/char/cadence_uart.h" #include "hw/net/xlnx-zynqmp-can.h" -#include "hw/ide/ahci.h" +#include "hw/ide/ahci-sysbus.h" #include "hw/sd/sdhci.h" #include "hw/ssi/xilinx_spips.h" #include "hw/dma/xlnx_dpdma.h" diff --git a/include/hw/audio/asc.h b/include/hw/audio/asc.h index 4741f92c461..04fac270b6a 100644 --- a/include/hw/audio/asc.h +++ b/include/hw/audio/asc.h @@ -13,7 +13,6 @@ #ifndef HW_AUDIO_ASC_H #define HW_AUDIO_ASC_H -#include "qemu/osdep.h" #include "hw/sysbus.h" #include "audio/audio.h" diff --git a/include/hw/audio/virtio-snd.h b/include/hw/audio/virtio-snd.h index c3767f442b2..8dafedb276d 100644 --- a/include/hw/audio/virtio-snd.h +++ b/include/hw/audio/virtio-snd.h @@ -151,7 +151,6 @@ struct VirtIOSoundPCMStream { QemuMutex queue_mutex; bool active; QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) queue; - QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) invalid; }; /* @@ -223,6 +222,21 @@ struct VirtIOSound { QemuMutex cmdq_mutex; QTAILQ_HEAD(, virtio_snd_ctrl_command) cmdq; bool processing_cmdq; + /* + * Convenience queue to keep track of invalid tx/rx queue messages inside + * the tx/rx callbacks. + * + * In the callbacks as a first step we are emptying the virtqueue to handle + * each message and we cannot add an invalid message back to the queue: we + * would re-process it in subsequent loop iterations. + * + * Instead, we add them to this queue and after finishing examining every + * virtqueue element, we inform the guest for each invalid message. + * + * This queue must be empty at all times except for inside the tx/rx + * callbacks. + */ + QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) invalid; }; struct virtio_snd_ctrl_command { @@ -230,6 +244,7 @@ struct virtio_snd_ctrl_command { VirtQueue *vq; virtio_snd_hdr ctrl; virtio_snd_hdr resp; + size_t payload_size; QTAILQ_ENTRY(virtio_snd_ctrl_command) next; }; #endif diff --git a/include/hw/block/block.h b/include/hw/block/block.h index 15fff664350..de3946a5f1e 100644 --- a/include/hw/block/block.h +++ b/include/hw/block/block.h @@ -88,8 +88,8 @@ static inline unsigned int get_physical_block_exp(BlockConf *conf) /* Backend access helpers */ -bool blk_check_size_and_read_all(BlockBackend *blk, void *buf, hwaddr size, - Error **errp); +bool blk_check_size_and_read_all(BlockBackend *blk, DeviceState *dev, + void *buf, hwaddr size, Error **errp); /* Configuration helpers */ diff --git a/include/hw/block/fdc.h b/include/hw/block/fdc.h index 35248c08379..c367c5efeaf 100644 --- a/include/hw/block/fdc.h +++ b/include/hw/block/fdc.h @@ -14,6 +14,9 @@ void fdctrl_init_sysbus(qemu_irq irq, hwaddr mmio_base, DriveInfo **fds); void sun4m_fdctrl_init(qemu_irq irq, hwaddr io_base, DriveInfo **fds, qemu_irq *fdc_tc); +void isa_fdc_set_iobase(ISADevice *fdc, hwaddr iobase); +void isa_fdc_set_enabled(ISADevice *fdc, bool enabled); + FloppyDriveType isa_fdc_get_drive_type(ISADevice *fdc, int i); int cmos_get_fd_drive_type(FloppyDriveType fd0); diff --git a/include/hw/block/flash.h b/include/hw/block/flash.h index de93756cbe8..2b5ccd92f46 100644 --- a/include/hw/block/flash.h +++ b/include/hw/block/flash.h @@ -78,6 +78,8 @@ extern const VMStateDescription vmstate_ecc_state; /* m25p80.c */ +#define TYPE_M25P80 "m25p80-generic" + BlockBackend *m25p80_get_blk(DeviceState *dev); #endif diff --git a/include/hw/boards.h b/include/hw/boards.h index da85f86efb9..8b8f6d5c00d 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -24,6 +24,12 @@ OBJECT_DECLARE_TYPE(MachineState, MachineClass, MACHINE) extern MachineState *current_machine; +/** + * machine_class_default_cpu_type: Return the machine default CPU type. + * @mc: Machine class + */ +const char *machine_class_default_cpu_type(MachineClass *mc); + void machine_add_audiodev_property(MachineClass *mc); void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp); bool machine_usb(MachineState *machine); @@ -114,7 +120,7 @@ typedef struct CPUArchId { uint64_t arch_id; int64_t vcpus_count; CpuInstanceProperties props; - Object *cpu; + CPUState *cpu; const char *type; } CPUArchId; @@ -419,6 +425,9 @@ struct MachineState { } \ type_init(machine_initfn##_register_types) +extern GlobalProperty hw_compat_8_2[]; +extern const size_t hw_compat_8_2_len; + extern GlobalProperty hw_compat_8_1[]; extern const size_t hw_compat_8_1_len; diff --git a/include/hw/char/grlib_uart.h b/include/hw/char/grlib_uart.h new file mode 100644 index 00000000000..7496f8fd5e9 --- /dev/null +++ b/include/hw/char/grlib_uart.h @@ -0,0 +1,32 @@ +/* + * QEMU GRLIB UART + * + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2024 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef GRLIB_UART_H +#define GRLIB_UART_H + +#define TYPE_GRLIB_APB_UART "grlib-apbuart" + +#endif diff --git a/include/hw/char/imx_serial.h b/include/hw/char/imx_serial.h index b823f945195..65f0e97c76b 100644 --- a/include/hw/char/imx_serial.h +++ b/include/hw/char/imx_serial.h @@ -21,12 +21,16 @@ #include "hw/sysbus.h" #include "chardev/char-fe.h" #include "qom/object.h" +#include "qemu/fifo32.h" #define TYPE_IMX_SERIAL "imx.serial" OBJECT_DECLARE_SIMPLE_TYPE(IMXSerialState, IMX_SERIAL) +#define FIFO_SIZE 32 + #define URXD_CHARRDY (1<<15) /* character read is valid */ #define URXD_ERR (1<<14) /* Character has error */ +#define URXD_OVRRUN (1<<13) /* 32nd character in RX FIFO */ #define URXD_FRMERR (1<<12) /* Character has frame error */ #define URXD_BRK (1<<11) /* Break received */ @@ -65,11 +69,13 @@ OBJECT_DECLARE_SIMPLE_TYPE(IMXSerialState, IMX_SERIAL) #define UCR1_TXMPTYEN (1<<6) /* Tx Empty Interrupt Enable */ #define UCR1_UARTEN (1<<0) /* UART Enable */ +#define UCR2_ATEN (1<<3) /* Ageing Timer Enable */ #define UCR2_TXEN (1<<2) /* Transmitter enable */ #define UCR2_RXEN (1<<1) /* Receiver enable */ #define UCR2_SRST (1<<0) /* Reset complete */ #define UCR4_DREN BIT(0) /* Receive Data Ready interrupt enable */ +#define UCR4_OREN BIT(1) /* Overrun interrupt enable */ #define UCR4_TCEN BIT(3) /* TX complete interrupt enable */ #define UCR4_WKEN BIT(7) /* WAKE interrupt enable */ @@ -78,13 +84,25 @@ OBJECT_DECLARE_SIMPLE_TYPE(IMXSerialState, IMX_SERIAL) #define UTS1_TXFULL (1<<4) #define UTS1_RXFULL (1<<3) +#define TL_MASK 0x3f + + /* Bit time in nanoseconds assuming maximum baud rate of 115200 */ +#define BIT_TIME_NS 8681 + +/* Assume 8 bits per character */ +#define NUM_BITS 8 + +/* Ageing timer triggers after 8 characters */ +#define AGE_DURATION_NS (8 * NUM_BITS * BIT_TIME_NS) + struct IMXSerialState { /*< private >*/ SysBusDevice parent_obj; /*< public >*/ MemoryRegion iomem; - int32_t readbuff; + QEMUTimer ageing_timer; + Fifo32 rx_fifo; uint32_t usr1; uint32_t usr2; diff --git a/include/hw/char/parallel-isa.h b/include/hw/char/parallel-isa.h index d24ccecf05c..5284b2ffecc 100644 --- a/include/hw/char/parallel-isa.h +++ b/include/hw/char/parallel-isa.h @@ -12,6 +12,7 @@ #include "parallel.h" +#include "exec/ioport.h" #include "hw/isa/isa.h" #include "qom/object.h" @@ -25,6 +26,10 @@ struct ISAParallelState { uint32_t iobase; uint32_t isairq; ParallelState state; + PortioList portio_list; }; +void isa_parallel_set_iobase(ISADevice *parallel, hwaddr iobase); +void isa_parallel_set_enabled(ISADevice *parallel, bool enabled); + #endif /* HW_PARALLEL_ISA_H */ diff --git a/include/hw/char/parallel.h b/include/hw/char/parallel.h index 7b5a309a039..cfb97cc7cc9 100644 --- a/include/hw/char/parallel.h +++ b/include/hw/char/parallel.h @@ -1,7 +1,6 @@ #ifndef HW_PARALLEL_H #define HW_PARALLEL_H -#include "exec/ioport.h" #include "exec/memory.h" #include "hw/isa/isa.h" #include "hw/irq.h" @@ -22,7 +21,6 @@ typedef struct ParallelState { uint32_t last_read_offset; /* For debugging */ /* Memory-mapped interface */ int it_shift; - PortioList portio_list; } ParallelState; void parallel_hds_isa_init(ISABus *bus, int n); diff --git a/include/hw/char/serial.h b/include/hw/char/serial.h index 8ba7eca3d67..6e14099ee7f 100644 --- a/include/hw/char/serial.h +++ b/include/hw/char/serial.h @@ -112,5 +112,7 @@ SerialMM *serial_mm_init(MemoryRegion *address_space, #define TYPE_ISA_SERIAL "isa-serial" void serial_hds_isa_init(ISABus *bus, int from, int to); +void isa_serial_set_iobase(ISADevice *serial, hwaddr iobase); +void isa_serial_set_enabled(ISADevice *serial, bool enabled); #endif diff --git a/include/hw/clock.h b/include/hw/clock.h index bb12117f67b..eb58599131c 100644 --- a/include/hw/clock.h +++ b/include/hw/clock.h @@ -357,6 +357,8 @@ char *clock_display_freq(Clock *clk); * @multiplier: multiplier value * @divider: divider value * + * @return: true if the clock is changed. + * * By default, a Clock's children will all run with the same period * as their parent. This function allows you to adjust the multiplier * and divider used to derive the child clock frequency. @@ -374,6 +376,6 @@ char *clock_display_freq(Clock *clk); * Note that this function does not call clock_propagate(); the * caller should do that if necessary. */ -void clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider); +bool clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider); #endif /* QEMU_HW_CLOCK_H */ diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index c0c8320413e..ec14f74ce5d 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -22,8 +22,8 @@ #include "hw/qdev-core.h" #include "disas/dis-asm.h" -#include "exec/cpu-common.h" #include "exec/hwaddr.h" +#include "exec/vaddr.h" #include "exec/memattrs.h" #include "exec/tlb-common.h" #include "qapi/qapi-types-run-state.h" @@ -31,7 +31,6 @@ #include "qemu/rcu_queue.h" #include "qemu/queue.h" #include "qemu/thread.h" -#include "qemu/plugin-event.h" #include "qom/object.h" typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, @@ -90,9 +89,6 @@ typedef enum MMUAccessType { typedef struct CPUWatchpoint CPUWatchpoint; -/* see tcg-cpu-ops.h */ -struct TCGCPUOps; - /* see accel-cpu.h */ struct AccelCPUClass; @@ -106,6 +102,8 @@ struct SysemuCPUOps; * @parse_features: Callback to parse command line arguments. * @reset_dump_flags: #CPUDumpFlags to use for reset logging. * @has_work: Callback for checking if there is work to do. + * @mmu_index: Callback for choosing softmmu mmu index; + * may be used internally by memory_rw_debug without TCG. * @memory_rw_debug: Callback for GDB memory access. * @dump_state: Callback for dumping state. * @query_cpu_fast: @@ -127,15 +125,13 @@ struct SysemuCPUOps; * @gdb_adjust_breakpoint: Callback for adjusting the address of a * breakpoint. Used by AVR to handle a gdb mis-feature with * its Harvard architecture split code and data. - * @gdb_num_core_regs: Number of core registers accessible to GDB. + * @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer + * from @gdb_core_xml_file. * @gdb_core_xml_file: File name for core registers GDB XML description. * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop * before the insn which triggers a watchpoint rather than after it. * @gdb_arch_name: Optional callback that returns the architecture name known * to GDB. The caller must free the returned string with g_free. - * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the - * gdb stub. Returns a pointer to the XML contents for the specified XML file - * or NULL if the CPU doesn't have a dynamically generated content for it. * @disas_set_info: Setup architecture specific components of disassembly info * @adjust_watchpoint_address: Perform a target-specific adjustment to an * address before attempting to match it against watchpoints. @@ -153,6 +149,7 @@ struct CPUClass { void (*parse_features)(const char *typename, char *str, Error **errp); bool (*has_work)(CPUState *cpu); + int (*mmu_index)(CPUState *cpu, bool ifetch); int (*memory_rw_debug)(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); void (*dump_state)(CPUState *cpu, FILE *, int flags); @@ -166,7 +163,6 @@ struct CPUClass { const char *gdb_core_xml_file; const gchar * (*gdb_arch_name)(CPUState *cpu); - const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname); void (*disas_set_info)(CPUState *cpu, disassemble_info *info); @@ -177,7 +173,7 @@ struct CPUClass { const struct SysemuCPUOps *sysemu_ops; /* when TCG is not available, this pointer is NULL */ - const struct TCGCPUOps *tcg_ops; + const TCGCPUOps *tcg_ops; /* * if not NULL, this is called in order for the CPUClass to initialize @@ -234,6 +230,9 @@ typedef struct CPUTLBEntryFull { /* @lg_page_size contains the log2 of the page size. */ uint8_t lg_page_size; + /* Additional tlb flags requested by tlb_fill. */ + uint8_t tlb_fill_flags; + /* * Additional tlb flags for use by the slow path. If non-zero, * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW. @@ -430,17 +429,15 @@ struct qemu_work_item; * @gdb_regs: Additional GDB registers. * @gdb_num_regs: Number of total registers accessible to GDB. * @gdb_num_g_regs: Number of registers in GDB 'g' packets. - * @next_cpu: Next CPU sharing TB cache. + * @node: QTAILQ of CPUs sharing TB cache. * @opaque: User data. * @mem_io_pc: Host Program Counter at which the memory was accessed. * @accel: Pointer to accelerator specific state. * @kvm_fd: vCPU file descriptor for KVM. * @work_mutex: Lock to prevent multiple access to @work_list. * @work_list: List of pending asynchronous work. - * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes - * to @trace_dstate). - * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask). - * @plugin_mask: Plugin event bitmap. Modified only via async work. + * @plugin_mem_cbs: active plugin memory callbacks + * @plugin_state: per-CPU plugin state * @ignore_memory_transaction_failures: Cached copy of the MachineState * flag of the same name: allows the board to suppress calling of the * CPU do_transaction_failed hook function. @@ -532,10 +529,13 @@ struct CPUState { /* Use by accel-block: CPU is executing an ioctl() */ QemuLockCnt in_ioctl_lock; - DECLARE_BITMAP(plugin_mask, QEMU_PLUGIN_EV_MAX); - #ifdef CONFIG_PLUGIN + /* + * The callback pointer stays in the main CPUState as it is + * accessed via TCG (see gen_empty_mem_helper). + */ GArray *plugin_mem_cbs; + CPUPluginState *plugin_state; #endif /* TODO Move common fields from CPUArchState here. */ @@ -779,6 +779,19 @@ void cpu_reset(CPUState *cpu); */ ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model); +/** + * cpu_model_from_type: + * @typename: The CPU type name + * + * Extract the CPU model name from the CPU type name. The + * CPU type name is either the combination of the CPU model + * name and suffix, or same to the CPU model name. + * + * Returns: CPU model name or NULL if the CPU class doesn't exist + * The user should g_free() the string once no longer needed. + */ +char *cpu_model_from_type(const char *typename); + /** * cpu_create: * @typename: The CPU type. @@ -1169,8 +1182,6 @@ bool target_words_bigendian(void); const char *target_name(void); -void page_size_init(void); - #ifdef NEED_CPU_H #ifndef CONFIG_USER_ONLY diff --git a/include/hw/core/resetcontainer.h b/include/hw/core/resetcontainer.h new file mode 100644 index 00000000000..23db0c7a880 --- /dev/null +++ b/include/hw/core/resetcontainer.h @@ -0,0 +1,48 @@ +/* + * Reset container + * + * Copyright (c) 2024 Linaro, Ltd + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef HW_RESETCONTAINER_H +#define HW_RESETCONTAINER_H + +/* + * The "reset container" is an object which implements the Resettable + * interface. It contains a list of arbitrary other objects which also + * implement Resettable. Resetting the reset container resets all the + * objects in it. + */ + +#include "qom/object.h" + +#define TYPE_RESETTABLE_CONTAINER "resettable-container" +OBJECT_DECLARE_TYPE(ResettableContainer, ResettableContainerClass, RESETTABLE_CONTAINER) + +/** + * resettable_container_add: Add a resettable object to the container + * @rc: container + * @obj: object to add to the container + * + * Add @obj to the ResettableContainer @rc. @obj must implement the + * Resettable interface. + * + * When @rc is reset, it will reset every object that has been added + * to it, in the order they were added. + */ +void resettable_container_add(ResettableContainer *rc, Object *obj); + +/** + * resettable_container_remove: Remove an object from the container + * @rc: container + * @obj: object to remove from the container + * + * Remove @obj from the ResettableContainer @rc. @obj must have been + * previously added to this container. + */ +void resettable_container_remove(ResettableContainer *rc, Object *obj); + +#endif diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index 479713a36e3..bf8ff8e3eec 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -50,7 +50,7 @@ struct TCGCPUOps { void (*debug_excp_handler)(CPUState *cpu); #ifdef NEED_CPU_H -#if defined(CONFIG_USER_ONLY) && defined(TARGET_I386) +#ifdef CONFIG_USER_ONLY /** * @fake_user_interrupt: Callback for 'fake exception' handling. * @@ -58,13 +58,7 @@ struct TCGCPUOps { * cpu execution loop (hack for x86 user mode). */ void (*fake_user_interrupt)(CPUState *cpu); -#else - /** - * @do_interrupt: Callback for interrupt handling. - */ - void (*do_interrupt)(CPUState *cpu); -#endif /* !CONFIG_USER_ONLY || !TARGET_I386 */ -#ifdef CONFIG_USER_ONLY + /** * record_sigsegv: * @cpu: cpu context @@ -114,8 +108,12 @@ struct TCGCPUOps { void (*record_sigbus)(CPUState *cpu, vaddr addr, MMUAccessType access_type, uintptr_t ra); #else + /** @do_interrupt: Callback for interrupt handling. */ + void (*do_interrupt)(CPUState *cpu); /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); + /** @cpu_exec_halt: Callback for handling halt in cpu_exec */ + void (*cpu_exec_halt)(CPUState *cpu); /** * @tlb_fill: Handle a softmmu tlb miss * @@ -170,6 +168,11 @@ struct TCGCPUOps { */ bool (*io_recompile_replay_branch)(CPUState *cpu, const TranslationBlock *tb); + /** + * @need_replay_interrupt: Return %true if @interrupt_request + * needs to be recorded for replay purposes. + */ + bool (*need_replay_interrupt)(int interrupt_request); #endif /* !CONFIG_USER_ONLY */ #endif /* NEED_CPU_H */ diff --git a/include/hw/cris/etraxfs.h b/include/hw/cris/etraxfs.h index 467b529dc06..012c4e99743 100644 --- a/include/hw/cris/etraxfs.h +++ b/include/hw/cris/etraxfs.h @@ -31,7 +31,7 @@ #include "hw/sysbus.h" #include "qapi/error.h" -DeviceState *etraxfs_eth_init(NICInfo *nd, hwaddr base, int phyaddr, +DeviceState *etraxfs_eth_init(hwaddr base, int phyaddr, struct etraxfs_dma_client *dma_out, struct etraxfs_dma_client *dma_in); diff --git a/include/hw/cxl/cxl_cdat.h b/include/hw/cxl/cxl_cdat.h index 7f676386859..17a09066dc3 100644 --- a/include/hw/cxl/cxl_cdat.h +++ b/include/hw/cxl/cxl_cdat.h @@ -16,17 +16,17 @@ /* * Reference: * Coherent Device Attribute Table (CDAT) Specification, Rev. 1.03, July. 2022 - * Compute Express Link (CXL) Specification, Rev. 3.0, Aug. 2022 + * Compute Express Link (CXL) Specification, Rev. 3.1, Aug. 2023 */ -/* Table Access DOE - CXL r3.0 8.1.11 */ +/* Table Access DOE - CXL r3.1 8.1.11 */ #define CXL_DOE_TABLE_ACCESS 2 #define CXL_DOE_PROTOCOL_CDAT ((CXL_DOE_TABLE_ACCESS << 16) | CXL_VENDOR_ID) -/* Read Entry - CXL r3.0 8.1.11.1 */ +/* Read Entry - CXL r3.1 8.1.11.1 */ #define CXL_DOE_TAB_TYPE_CDAT 0 #define CXL_DOE_TAB_ENT_MAX 0xFFFF -/* Read Entry Request - CXL r3.0 8.1.11.1 Table 8-13 */ +/* Read Entry Request - CXL r3.1 8.1.11.1 Table 8-13 */ #define CXL_DOE_TAB_REQ 0 typedef struct CDATReq { DOEHeader header; @@ -35,7 +35,7 @@ typedef struct CDATReq { uint16_t entry_handle; } QEMU_PACKED CDATReq; -/* Read Entry Response - CXL r3.0 8.1.11.1 Table 8-14 */ +/* Read Entry Response - CXL r3.1 8.1.11.1 Table 8-14 */ #define CXL_DOE_TAB_RSP 0 typedef struct CDATRsp { DOEHeader header; @@ -82,7 +82,8 @@ typedef struct CDATDsmas { uint16_t reserved; uint64_t DPA_base; uint64_t DPA_length; -} QEMU_PACKED CDATDsmas; +} CDATDsmas; +QEMU_BUILD_BUG_ON(sizeof(CDATDsmas) != 24); /* Device Scoped Latency and Bandwidth Information Structure - CDAT Table 5 */ typedef struct CDATDslbis { @@ -95,7 +96,8 @@ typedef struct CDATDslbis { uint64_t entry_base_unit; uint16_t entry[3]; uint16_t reserved2; -} QEMU_PACKED CDATDslbis; +} CDATDslbis; +QEMU_BUILD_BUG_ON(sizeof(CDATDslbis) != 24); /* Device Scoped Memory Side Cache Information Structure - CDAT Table 6 */ typedef struct CDATDsmscis { @@ -122,7 +124,8 @@ typedef struct CDATDsemts { uint16_t reserved; uint64_t DPA_offset; uint64_t DPA_length; -} QEMU_PACKED CDATDsemts; +} CDATDsemts; +QEMU_BUILD_BUG_ON(sizeof(CDATDsemts) != 24); /* Switch Scoped Latency and Bandwidth Information Structure - CDAT Table 9 */ typedef struct CDATSslbisHeader { @@ -130,7 +133,8 @@ typedef struct CDATSslbisHeader { uint8_t data_type; uint8_t reserved[3]; uint64_t entry_base_unit; -} QEMU_PACKED CDATSslbisHeader; +} CDATSslbisHeader; +QEMU_BUILD_BUG_ON(sizeof(CDATSslbisHeader) != 16); #define CDAT_PORT_ID_USP 0x100 /* Switch Scoped Latency and Bandwidth Entry - CDAT Table 10 */ @@ -139,12 +143,13 @@ typedef struct CDATSslbe { uint16_t port_y_id; uint16_t latency_bandwidth; uint16_t reserved; -} QEMU_PACKED CDATSslbe; +} CDATSslbe; +QEMU_BUILD_BUG_ON(sizeof(CDATSslbe) != 8); typedef struct CDATSslbis { CDATSslbisHeader sslbis_header; CDATSslbe sslbe[]; -} QEMU_PACKED CDATSslbis; +} CDATSslbis; typedef struct CDATEntry { void *base; diff --git a/include/hw/cxl/cxl_component.h b/include/hw/cxl/cxl_component.h index 5227a8e8338..5012fab6f76 100644 --- a/include/hw/cxl/cxl_component.h +++ b/include/hw/cxl/cxl_component.h @@ -10,7 +10,7 @@ #ifndef CXL_COMPONENT_H #define CXL_COMPONENT_H -/* CXL 2.0 - 8.2.4 */ +/* CXL r3.1 Section 8.2.4: CXL.cache and CXL.mem Registers */ #define CXL2_COMPONENT_IO_REGION_SIZE 0x1000 #define CXL2_COMPONENT_CM_REGION_SIZE 0x1000 #define CXL2_COMPONENT_BLOCK_SIZE 0x10000 @@ -25,6 +25,7 @@ enum reg_type { CXL2_TYPE3_DEVICE, CXL2_LOGICAL_DEVICE, CXL2_ROOT_PORT, + CXL2_RC, CXL2_UPSTREAM_PORT, CXL2_DOWNSTREAM_PORT, CXL3_SWITCH_MAILBOX_CCI, @@ -34,10 +35,11 @@ enum reg_type { * Capability registers are defined at the top of the CXL.cache/mem region and * are packed. For our purposes we will always define the caps in the same * order. - * CXL 2.0 - 8.2.5 Table 142 for details. + * CXL r3.1 Table 8-22: CXL_CAPABILITY_ID Assignment for details. */ -/* CXL 2.0 - 8.2.5.1 */ +/* CXL r3.1 Section 8.2.4.1: CXL Capability Header Register */ +#define CXL_CAPABILITY_VERSION 1 REG32(CXL_CAPABILITY_HEADER, 0) FIELD(CXL_CAPABILITY_HEADER, ID, 0, 16) FIELD(CXL_CAPABILITY_HEADER, VERSION, 16, 4) @@ -60,8 +62,9 @@ CXLx_CAPABILITY_HEADER(SNOOP, 0x14) * implements. Some of these are specific to certain types of components, but * this implementation leaves enough space regardless. */ -/* 8.2.5.9 - CXL RAS Capability Structure */ +/* CXL r3.1 Section 8.2.4.17: CXL RAS Capability Structure */ +#define CXL_RAS_CAPABILITY_VERSION 3 /* Give ample space for caps before this */ #define CXL_RAS_REGISTERS_OFFSET 0x80 #define CXL_RAS_REGISTERS_SIZE 0x58 @@ -95,22 +98,26 @@ REG32(CXL_RAS_COR_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET + 0xc) REG32(CXL_RAS_COR_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x10) REG32(CXL_RAS_ERR_CAP_CTRL, CXL_RAS_REGISTERS_OFFSET + 0x14) FIELD(CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER, 0, 6) + FIELD(CXL_RAS_ERR_CAP_CTRL, MULTIPLE_HEADER_RECORDING_CAP, 9, 1) + FIELD(CXL_RAS_ERR_POISON_ENABLED, POISON_ENABLED, 13, 1) REG32(CXL_RAS_ERR_HEADER0, CXL_RAS_REGISTERS_OFFSET + 0x18) #define CXL_RAS_ERR_HEADER_NUM 32 /* Offset 0x18 - 0x58 reserved for RAS logs */ -/* 8.2.5.10 - CXL Security Capability Structure */ +/* CXL r3.1 Section 8.2.4.18: CXL Security Capability Structure */ #define CXL_SEC_REGISTERS_OFFSET \ (CXL_RAS_REGISTERS_OFFSET + CXL_RAS_REGISTERS_SIZE) #define CXL_SEC_REGISTERS_SIZE 0 /* We don't implement 1.1 downstream ports */ -/* 8.2.5.11 - CXL Link Capability Structure */ +/* CXL r3.1 Section 8.2.4.19: CXL Link Capability Structure */ +#define CXL_LINK_CAPABILITY_VERSION 2 #define CXL_LINK_REGISTERS_OFFSET \ (CXL_SEC_REGISTERS_OFFSET + CXL_SEC_REGISTERS_SIZE) -#define CXL_LINK_REGISTERS_SIZE 0x38 +#define CXL_LINK_REGISTERS_SIZE 0x50 -/* 8.2.5.12 - CXL HDM Decoder Capability Structure */ -#define HDM_DECODE_MAX 10 /* 8.2.5.12.1 */ +/* CXL r3.1 Section 8.2.4.20: CXL HDM Decoder Capability Structure */ +#define HDM_DECODE_MAX 10 /* Maximum decoders for Devices */ +#define CXL_HDM_CAPABILITY_VERSION 3 #define CXL_HDM_REGISTERS_OFFSET \ (CXL_LINK_REGISTERS_OFFSET + CXL_LINK_REGISTERS_SIZE) #define CXL_HDM_REGISTERS_SIZE (0x10 + 0x20 * HDM_DECODE_MAX) @@ -133,6 +140,11 @@ REG32(CXL_RAS_ERR_HEADER0, CXL_RAS_REGISTERS_OFFSET + 0x18) FIELD(CXL_HDM_DECODER##n##_CTRL, COMMITTED, 10, 1) \ FIELD(CXL_HDM_DECODER##n##_CTRL, ERR, 11, 1) \ FIELD(CXL_HDM_DECODER##n##_CTRL, TYPE, 12, 1) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, BI, 13, 1) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, UIO, 14, 1) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, UIG, 16, 4) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, UIW, 20, 4) \ + FIELD(CXL_HDM_DECODER##n##_CTRL, ISP, 24, 4) \ REG32(CXL_HDM_DECODER##n##_TARGET_LIST_LO, \ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x24) \ REG32(CXL_HDM_DECODER##n##_TARGET_LIST_HI, \ @@ -148,6 +160,12 @@ REG32(CXL_HDM_DECODER_CAPABILITY, CXL_HDM_REGISTERS_OFFSET) FIELD(CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 8, 1) FIELD(CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 9, 1) FIELD(CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 10, 1) + FIELD(CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 11, 1) + FIELD(CXL_HDM_DECODER_CAPABILITY, 16_WAY, 12, 1) + FIELD(CXL_HDM_DECODER_CAPABILITY, UIO, 13, 1) + FIELD(CXL_HDM_DECODER_CAPABILITY, UIO_DECODER_COUNT, 16, 4) + FIELD(CXL_HDM_DECODER_CAPABILITY, MEMDATA_NXM_CAP, 20, 1) + FIELD(CXL_HDM_DECODER_CAPABILITY, SUPPORTED_COHERENCY_MODEL, 21, 2) REG32(CXL_HDM_DECODER_GLOBAL_CONTROL, CXL_HDM_REGISTERS_OFFSET + 4) FIELD(CXL_HDM_DECODER_GLOBAL_CONTROL, POISON_ON_ERR_EN, 0, 1) FIELD(CXL_HDM_DECODER_GLOBAL_CONTROL, HDM_DECODER_ENABLE, 1, 1) @@ -160,18 +178,24 @@ HDM_DECODER_INIT(1); HDM_DECODER_INIT(2); HDM_DECODER_INIT(3); -/* 8.2.5.13 - CXL Extended Security Capability Structure (Root complex only) */ +/* + * CXL r3.1 Section 8.2.4.21: CXL Extended Security Capability Structure + * (Root complex only) + */ #define EXTSEC_ENTRY_MAX 256 +#define CXL_EXTSEC_CAP_VERSION 2 #define CXL_EXTSEC_REGISTERS_OFFSET \ (CXL_HDM_REGISTERS_OFFSET + CXL_HDM_REGISTERS_SIZE) #define CXL_EXTSEC_REGISTERS_SIZE (8 * EXTSEC_ENTRY_MAX + 4) -/* 8.2.5.14 - CXL IDE Capability Structure */ +/* CXL r3.1 Section 8.2.4.22: CXL IDE Capability Structure */ +#define CXL_IDE_CAP_VERSION 2 #define CXL_IDE_REGISTERS_OFFSET \ (CXL_EXTSEC_REGISTERS_OFFSET + CXL_EXTSEC_REGISTERS_SIZE) -#define CXL_IDE_REGISTERS_SIZE 0x20 +#define CXL_IDE_REGISTERS_SIZE 0x24 -/* 8.2.5.15 - CXL Snoop Filter Capability Structure */ +/* CXL r3.1 Section 8.2.4.23 - CXL Snoop Filter Capability Structure */ +#define CXL_SNOOP_CAP_VERSION 1 #define CXL_SNOOP_REGISTERS_OFFSET \ (CXL_IDE_REGISTERS_OFFSET + CXL_IDE_REGISTERS_SIZE) #define CXL_SNOOP_REGISTERS_SIZE 0x8 @@ -187,7 +211,7 @@ typedef struct component_registers { MemoryRegion component_registers; /* - * 8.2.4 Table 141: + * CXL r3.1 Table 8-21: CXL Subsystem Component Register Ranges * 0x0000 - 0x0fff CXL.io registers * 0x1000 - 0x1fff CXL.cache and CXL.mem * 0x2000 - 0xdfff Implementation specific diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h index 31d2afcd3df..279b276bda2 100644 --- a/include/hw/cxl/cxl_device.h +++ b/include/hw/cxl/cxl_device.h @@ -58,18 +58,30 @@ * */ -#define CXL_DEVICE_CAP_HDR1_OFFSET 0x10 /* Figure 138 */ -#define CXL_DEVICE_CAP_REG_SIZE 0x10 /* 8.2.8.2 */ -#define CXL_DEVICE_CAPS_MAX 4 /* 8.2.8.2.1 + 8.2.8.5 */ +/* CXL r3.1 Figure 8-12: CXL Device Registers */ +#define CXL_DEVICE_CAP_HDR1_OFFSET 0x10 +/* CXL r3.1 Section 8.2.8.2: CXL Device Capability Header Register */ +#define CXL_DEVICE_CAP_REG_SIZE 0x10 + +/* + * CXL r3.1 Section 8.2.8.2.1: CXL Device Capabilities + + * CXL r3.1 Section 8.2.8.5: Memory Device Capabilities + */ +#define CXL_DEVICE_CAPS_MAX 4 #define CXL_CAPS_SIZE \ (CXL_DEVICE_CAP_REG_SIZE * (CXL_DEVICE_CAPS_MAX + 1)) /* +1 for header */ #define CXL_DEVICE_STATUS_REGISTERS_OFFSET 0x80 /* Read comment above */ -#define CXL_DEVICE_STATUS_REGISTERS_LENGTH 0x8 /* 8.2.8.3.1 */ +/* + * CXL r3.1 Section 8.2.8.3: Device Status Registers + * As it is the only Device Status Register in CXL r3.1 + */ +#define CXL_DEVICE_STATUS_REGISTERS_LENGTH 0x8 #define CXL_MAILBOX_REGISTERS_OFFSET \ (CXL_DEVICE_STATUS_REGISTERS_OFFSET + CXL_DEVICE_STATUS_REGISTERS_LENGTH) -#define CXL_MAILBOX_REGISTERS_SIZE 0x20 /* 8.2.8.4, Figure 139 */ +/* CXL r3.1 Figure 8-13: Mailbox Registers */ +#define CXL_MAILBOX_REGISTERS_SIZE 0x20 #define CXL_MAILBOX_PAYLOAD_SHIFT 11 #define CXL_MAILBOX_MAX_PAYLOAD_SIZE (1 << CXL_MAILBOX_PAYLOAD_SHIFT) #define CXL_MAILBOX_REGISTERS_LENGTH \ @@ -83,7 +95,7 @@ (CXL_DEVICE_CAP_REG_SIZE + CXL_DEVICE_STATUS_REGISTERS_LENGTH + \ CXL_MAILBOX_REGISTERS_LENGTH + CXL_MEMORY_DEVICE_REGISTERS_LENGTH) -/* 8.2.8.4.5.1 Command Return Codes */ +/* CXL r3.1 Table 8-34: Command Return Codes */ typedef enum { CXL_MBOX_SUCCESS = 0x0, CXL_MBOX_BG_STARTED = 0x1, @@ -108,7 +120,17 @@ typedef enum { CXL_MBOX_INCORRECT_PASSPHRASE = 0x14, CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15, CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16, - CXL_MBOX_MAX = 0x17 + CXL_MBOX_INVALID_LOG = 0x17, + CXL_MBOX_INTERRUPTED = 0x18, + CXL_MBOX_UNSUPPORTED_FEATURE_VERSION = 0x19, + CXL_MBOX_UNSUPPORTED_FEATURE_SELECTION_VALUE = 0x1a, + CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS = 0x1b, + CXL_MBOX_FEATURE_TRANSFER_OUT_OF_ORDER = 0x1c, + CXL_MBOX_RESOURCES_EXHAUSTED = 0x1d, + CXL_MBOX_INVALID_EXTENT_LIST = 0x1e, + CXL_MBOX_TRANSFER_OUT_OF_ORDER = 0x1f, + CXL_MBOX_REQUEST_ABORT_NOTSUP = 0x20, + CXL_MBOX_MAX = 0x20 } CXLRetCode; typedef struct CXLCCI CXLCCI; @@ -169,7 +191,7 @@ typedef struct CXLCCI { typedef struct cxl_device_state { MemoryRegion device_registers; - /* mmio for device capabilities array - 8.2.8.2 */ + /* CXL r3.1 Section 8.2.8.3: Device Status Registers */ struct { MemoryRegion device; union { @@ -189,7 +211,7 @@ typedef struct cxl_device_state { }; }; - /* mmio for the mailbox registers 8.2.8.4 */ + /* CXL r3.1 Section 8.2.8.4: Mailbox Registers */ struct { MemoryRegion mailbox; uint16_t payload_size; @@ -231,7 +253,7 @@ void cxl_device_register_init_t3(CXLType3Dev *ct3d); void cxl_device_register_init_swcci(CSWMBCCIDev *sw); /* - * CXL 2.0 - 8.2.8.1 including errata F4 + * CXL r3.1 Section 8.2.8.1: CXL Device Capabilities Array Register * Documented as a 128 bit register, but 64 bit accesses and the second * 64 bits are currently reserved. */ @@ -246,17 +268,18 @@ void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type, /* * Helper macro to initialize capability headers for CXL devices. * - * In the 8.2.8.2, this is listed as a 128b register, but in 8.2.8, it says: + * In CXL r3.1 Section 8.2.8.2: CXL Device Capability Header Register, this is + * listed as a 128b register, but in CXL r3.1 Section 8.2.8: CXL Device Register + * Interface, it says: * > No registers defined in Section 8.2.8 are larger than 64-bits wide so that * > is the maximum access size allowed for these registers. If this rule is not - * > followed, the behavior is undefined + * > followed, the behavior is undefined. * - * CXL 2.0 Errata F4 states further that the layouts in the specification are - * shown as greater than 128 bits, but implementations are expected to - * use any size of access up to 64 bits. + * > To illustrate how the fields fit together, the layouts ... are shown as + * > wider than a 64 bit register. Implementations are expected to use any size + * > accesses for this information up to 64 bits without lost of functionality * - * Here we've chosen to make it 4 dwords. The spec allows any pow2 multiple - * access to be used for a register up to 64 bits. + * Here we've chosen to make it 4 dwords. */ #define CXL_DEVICE_CAPABILITY_HEADER_REGISTER(n, offset) \ REG32(CXL_DEV_##n##_CAP_HDR0, offset) \ @@ -306,45 +329,51 @@ void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, CAP_LENGTH, CXL_##reg##_REGISTERS_LENGTH); \ } while (0) -/* CXL 3.0 8.2.8.3.1 Event Status Register */ +/* CXL r3.2 Section 8.2.8.3.1: Event Status Register */ +#define CXL_DEVICE_STATUS_VERSION 2 REG64(CXL_DEV_EVENT_STATUS, 0) FIELD(CXL_DEV_EVENT_STATUS, EVENT_STATUS, 0, 32) -/* CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register */ +#define CXL_DEV_MAILBOX_VERSION 1 +/* CXL r3.1 Section 8.2.8.4.3: Mailbox Capabilities Register */ REG32(CXL_DEV_MAILBOX_CAP, 0) FIELD(CXL_DEV_MAILBOX_CAP, PAYLOAD_SIZE, 0, 5) FIELD(CXL_DEV_MAILBOX_CAP, INT_CAP, 5, 1) FIELD(CXL_DEV_MAILBOX_CAP, BG_INT_CAP, 6, 1) FIELD(CXL_DEV_MAILBOX_CAP, MSI_N, 7, 4) + FIELD(CXL_DEV_MAILBOX_CAP, MBOX_READY_TIME, 11, 8) + FIELD(CXL_DEV_MAILBOX_CAP, TYPE, 19, 4) -/* CXL 2.0 8.2.8.4.4 Mailbox Control Register */ +/* CXL r3.1 Section 8.2.8.4.4: Mailbox Control Register */ REG32(CXL_DEV_MAILBOX_CTRL, 4) FIELD(CXL_DEV_MAILBOX_CTRL, DOORBELL, 0, 1) FIELD(CXL_DEV_MAILBOX_CTRL, INT_EN, 1, 1) FIELD(CXL_DEV_MAILBOX_CTRL, BG_INT_EN, 2, 1) -/* CXL 2.0 8.2.8.4.5 Command Register */ +/* CXL r3.1 Section 8.2.8.4.5: Command Register */ REG64(CXL_DEV_MAILBOX_CMD, 8) FIELD(CXL_DEV_MAILBOX_CMD, COMMAND, 0, 8) FIELD(CXL_DEV_MAILBOX_CMD, COMMAND_SET, 8, 8) FIELD(CXL_DEV_MAILBOX_CMD, LENGTH, 16, 20) -/* CXL 2.0 8.2.8.4.6 Mailbox Status Register */ +/* CXL r3.1 Section 8.2.8.4.6: Mailbox Status Register */ REG64(CXL_DEV_MAILBOX_STS, 0x10) FIELD(CXL_DEV_MAILBOX_STS, BG_OP, 0, 1) FIELD(CXL_DEV_MAILBOX_STS, ERRNO, 32, 16) FIELD(CXL_DEV_MAILBOX_STS, VENDOR_ERRNO, 48, 16) -/* CXL 2.0 8.2.8.4.7 Background Command Status Register */ +/* CXL r3.1 Section 8.2.8.4.7: Background Command Status Register */ REG64(CXL_DEV_BG_CMD_STS, 0x18) FIELD(CXL_DEV_BG_CMD_STS, OP, 0, 16) FIELD(CXL_DEV_BG_CMD_STS, PERCENTAGE_COMP, 16, 7) FIELD(CXL_DEV_BG_CMD_STS, RET_CODE, 32, 16) FIELD(CXL_DEV_BG_CMD_STS, VENDOR_RET_CODE, 48, 16) -/* CXL 2.0 8.2.8.4.8 Command Payload Registers */ +/* CXL r3.1 Section 8.2.8.4.8: Command Payload Registers */ REG32(CXL_DEV_CMD_PAYLOAD, 0x20) +/* CXL r3.1 Section 8.2.8.4.1: Memory Device Status Registers */ +#define CXL_MEM_DEV_STATUS_VERSION 1 REG64(CXL_MEM_DEV_STS, 0) FIELD(CXL_MEM_DEV_STS, FATAL, 0, 1) FIELD(CXL_MEM_DEV_STS, FW_HALT, 1, 1) diff --git a/include/hw/cxl/cxl_events.h b/include/hw/cxl/cxl_events.h index d778487b7e7..5170b8dbf81 100644 --- a/include/hw/cxl/cxl_events.h +++ b/include/hw/cxl/cxl_events.h @@ -13,7 +13,7 @@ #include "qemu/uuid.h" /* - * CXL rev 3.0 section 8.2.9.2.2; Table 8-49 + * CXL r3.1 section 8.2.9.2.2: Get Event Records (Opcode 0100h); Table 8-52 * * Define these as the bit position for the event status register for ease of * setting the status. @@ -29,7 +29,7 @@ typedef enum CXLEventLogType { /* * Common Event Record Format - * CXL rev 3.0 section 8.2.9.2.1; Table 8-42 + * CXL r3.1 section 8.2.9.2.1: Event Records; Table 8-43 */ #define CXL_EVENT_REC_HDR_RES_LEN 0xf typedef struct CXLEventRecordHdr { @@ -52,7 +52,7 @@ typedef struct CXLEventRecordRaw { /* * Get Event Records output payload - * CXL rev 3.0 section 8.2.9.2.2; Table 8-50 + * CXL r3.1 section 8.2.9.2.2; Table 8-53 */ #define CXL_GET_EVENT_FLAG_OVERFLOW BIT(0) #define CXL_GET_EVENT_FLAG_MORE_RECORDS BIT(1) @@ -70,7 +70,7 @@ typedef struct CXLGetEventPayload { /* * Clear Event Records input payload - * CXL rev 3.0 section 8.2.9.2.3; Table 8-51 + * CXL r3.1 section 8.2.9.2.3; Table 8-54 */ typedef struct CXLClearEventPayload { uint8_t event_log; /* CXLEventLogType */ @@ -80,10 +80,10 @@ typedef struct CXLClearEventPayload { uint16_t handle[]; } CXLClearEventPayload; -/** +/* * Event Interrupt Policy * - * CXL rev 3.0 section 8.2.9.2.4; Table 8-52 + * CXL r3.1 section 8.2.9.2.4; Table 8-55 */ typedef enum CXLEventIntMode { CXL_INT_NONE = 0x00, @@ -106,7 +106,7 @@ typedef struct CXLEventInterruptPolicy { /* * General Media Event Record - * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 + * CXL r3.1 Section 8.2.9.2.1.1; Table 8-45 */ #define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10 #define CXL_EVENT_GEN_MED_RES_SIZE 0x2e @@ -126,7 +126,7 @@ typedef struct CXLEventGenMedia { /* * DRAM Event Record - * CXL Rev 3.0 Section 8.2.9.2.1.2: Table 8-44 + * CXL r3.1 Section 8.2.9.2.1.2: Table 8-46 * All fields little endian. */ typedef struct CXLEventDram { @@ -149,7 +149,7 @@ typedef struct CXLEventDram { /* * Memory Module Event Record - * CXL Rev 3.0 Section 8.2.9.2.1.3: Table 8-45 + * CXL r3.1 Section 8.2.9.2.1.3: Table 8-47 * All fields little endian. */ typedef struct CXLEventMemoryModule { diff --git a/include/hw/cxl/cxl_pci.h b/include/hw/cxl/cxl_pci.h index ddf01a543b8..d0855ed78b0 100644 --- a/include/hw/cxl/cxl_pci.h +++ b/include/hw/cxl/cxl_pci.h @@ -16,9 +16,8 @@ #define PCIE_DVSEC_HEADER1_OFFSET 0x4 /* Offset from start of extend cap */ #define PCIE_DVSEC_ID_OFFSET 0x8 -#define PCIE_CXL_DEVICE_DVSEC_LENGTH 0x38 -#define PCIE_CXL1_DEVICE_DVSEC_REVID 0 -#define PCIE_CXL2_DEVICE_DVSEC_REVID 1 +#define PCIE_CXL_DEVICE_DVSEC_LENGTH 0x3C +#define PCIE_CXL31_DEVICE_DVSEC_REVID 3 #define EXTENSIONS_PORT_DVSEC_LENGTH 0x28 #define EXTENSIONS_PORT_DVSEC_REVID 0 @@ -29,8 +28,8 @@ #define GPF_DEVICE_DVSEC_LENGTH 0x10 #define GPF_DEVICE_DVSEC_REVID 0 -#define PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0 0x14 -#define PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0 1 +#define PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH 0x20 +#define PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID 2 #define REG_LOC_DVSEC_LENGTH 0x24 #define REG_LOC_DVSEC_REVID 0 @@ -55,16 +54,26 @@ typedef struct DVSECHeader { QEMU_BUILD_BUG_ON(sizeof(DVSECHeader) != 10); /* - * CXL 2.0 devices must implement certain DVSEC IDs, and can [optionally] + * CXL r3.1 Table 8-2: CXL DVSEC ID Assignment + * Devices must implement certain DVSEC IDs, and can [optionally] * implement others. + * (x) - IDs in Table 8-2. * - * CXL 2.0 Device: 0, [2], 5, 8 - * CXL 2.0 RP: 3, 4, 7, 8 - * CXL 2.0 Upstream Port: [2], 7, 8 - * CXL 2.0 Downstream Port: 3, 4, 7, 8 + * CXL RCD (D1): 0, [2], [5], 7, [8], A - Not emulated yet + * CXL RCD USP (UP1): 7, [8] - Not emulated yet + * CXL RCH DSP (DP1): 7, [8] + * CXL SLD (D2): 0, [2], 5, 7, 8, [A] + * CXL LD (LD): 0, [2], 5, 7, 8 + * CXL RP (R): 3, 4, 7, 8 + * CXL Switch USP (USP): [2], 7, 8 + * CXL Switch DSP (DSP): 3, 4, 7, 8 + * FM-Owned LD (FMLD): 0, [2], 7, 8, 9 */ -/* CXL 2.0 - 8.1.3 (ID 0001) */ +/* + * CXL r3.1 Section 8.1.3: PCIe DVSEC for Devices + * DVSEC ID: 0, Revision: 3 + */ typedef struct CXLDVSECDevice { DVSECHeader hdr; uint16_t cap; @@ -82,10 +91,15 @@ typedef struct CXLDVSECDevice { uint32_t range2_size_lo; uint32_t range2_base_hi; uint32_t range2_base_lo; -} CXLDVSECDevice; -QEMU_BUILD_BUG_ON(sizeof(CXLDVSECDevice) != 0x38); + uint16_t cap3; + uint16_t resv; +} QEMU_PACKED CXLDVSECDevice; +QEMU_BUILD_BUG_ON(sizeof(CXLDVSECDevice) != PCIE_CXL_DEVICE_DVSEC_LENGTH); -/* CXL 2.0 - 8.1.5 (ID 0003) */ +/* + * CXL r3.1 Section 8.1.5: CXL Extensions DVSEC for Ports + * DVSEC ID: 3, Revision: 0 + */ typedef struct CXLDVSECPortExt { DVSECHeader hdr; uint16_t status; @@ -107,7 +121,10 @@ QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortExt) != 0x28); #define PORT_CONTROL_UNMASK_SBR 1 #define PORT_CONTROL_ALT_MEMID_EN 4 -/* CXL 2.0 - 8.1.6 GPF DVSEC (ID 0004) */ +/* + * CXL r3.1 Section 8.1.6: GPF DVSEC for CXL Port + * DVSEC ID: 4, Revision: 0 + */ typedef struct CXLDVSECPortGPF { DVSECHeader hdr; uint16_t rsvd; @@ -116,7 +133,10 @@ typedef struct CXLDVSECPortGPF { } CXLDVSECPortGPF; QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortGPF) != 0x10); -/* CXL 2.0 - 8.1.7 GPF DVSEC for CXL Device */ +/* + * CXL r3.1 Section 8.1.7: GPF DVSEC for CXL Device + * DVSEC ID: 5, Revision 0 + */ typedef struct CXLDVSECDeviceGPF { DVSECHeader hdr; uint16_t phase2_duration; @@ -124,17 +144,27 @@ typedef struct CXLDVSECDeviceGPF { } CXLDVSECDeviceGPF; QEMU_BUILD_BUG_ON(sizeof(CXLDVSECDeviceGPF) != 0x10); -/* CXL 2.0 - 8.1.8/8.2.1.3 Flex Bus DVSEC (ID 0007) */ +/* + * CXL r3.1 Section 8.1.8: PCIe DVSEC for Flex Bus Port + * CXL r3.1 Section 8.2.1.3: Flex Bus Port DVSEC + * DVSEC ID: 7, Revision 2 + */ typedef struct CXLDVSECPortFlexBus { DVSECHeader hdr; uint16_t cap; uint16_t ctrl; uint16_t status; uint32_t rcvd_mod_ts_data_phase1; + uint32_t cap2; + uint32_t ctrl2; + uint32_t status2; } CXLDVSECPortFlexBus; -QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortFlexBus) != 0x14); +QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortFlexBus) != 0x20); -/* CXL 2.0 - 8.1.9 Register Locator DVSEC (ID 0008) */ +/* + * CXL r3.1 Section 8.1.9: Register Locator DVSEC + * DVSEC ID: 8, Revision 0 + */ typedef struct CXLDVSECRegisterLocator { DVSECHeader hdr; uint16_t rsvd; diff --git a/include/hw/display/bcm2835_fb.h b/include/hw/display/bcm2835_fb.h index 38671afffd5..49541bf08f4 100644 --- a/include/hw/display/bcm2835_fb.h +++ b/include/hw/display/bcm2835_fb.h @@ -16,6 +16,8 @@ #include "ui/console.h" #include "qom/object.h" +#define UPPER_RAM_BASE 0x40000000 + #define TYPE_BCM2835_FB "bcm2835-fb" OBJECT_DECLARE_SIMPLE_TYPE(BCM2835FBState, BCM2835_FB) diff --git a/include/hw/dma/i8257.h b/include/hw/dma/i8257.h index f652345d65a..4342e4a91ed 100644 --- a/include/hw/dma/i8257.h +++ b/include/hw/dma/i8257.h @@ -45,6 +45,6 @@ struct I8257State { PortioList portio_pageh; }; -void i8257_dma_init(ISABus *bus, bool high_page_enable); +void i8257_dma_init(Object *parent, ISABus *bus, bool high_page_enable); #endif diff --git a/include/hw/firmware/smbios.h b/include/hw/firmware/smbios.h index 7f3259a6300..8d3fb2fb3b6 100644 --- a/include/hw/firmware/smbios.h +++ b/include/hw/firmware/smbios.h @@ -2,6 +2,7 @@ #define QEMU_SMBIOS_H #include "qapi/qapi-types-machine.h" +#include "qemu/bitmap.h" /* * SMBIOS Support @@ -16,8 +17,26 @@ * */ +extern uint8_t *usr_blobs; +extern GArray *usr_blobs_sizes; + +typedef struct { + const char *vendor, *version, *date; + bool have_major_minor, uefi; + uint8_t major, minor; +} smbios_type0_t; +extern smbios_type0_t smbios_type0; + +typedef struct { + const char *manufacturer, *product, *version, *serial, *sku, *family; + /* uuid is in qemu_uuid */ +} smbios_type1_t; +extern smbios_type1_t smbios_type1; #define SMBIOS_MAX_TYPE 127 +extern DECLARE_BITMAP(smbios_have_binfile_bitmap, SMBIOS_MAX_TYPE + 1); +extern DECLARE_BITMAP(smbios_have_fields_bitmap, SMBIOS_MAX_TYPE + 1); + #define offsetofend(TYPE, MEMBER) \ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) @@ -211,6 +230,23 @@ struct smbios_type_8 { uint8_t port_type; } QEMU_PACKED; +/* SMBIOS type 9 - System Slots (v2.1+) */ +struct smbios_type_9 { + struct smbios_structure_header header; + uint8_t slot_designation; + uint8_t slot_type; + uint8_t slot_data_bus_width; + uint8_t current_usage; + uint8_t slot_length; + uint16_t slot_id; + uint8_t slot_characteristics1; + uint8_t slot_characteristics2; + /* SMBIOS spec v2.6+ */ + uint16_t segment_group_number; + uint8_t bus_number; + uint8_t device_number; +} QEMU_PACKED; + /* SMBIOS type 11 - OEM strings */ struct smbios_type_11 { struct smbios_structure_header header; @@ -290,13 +326,17 @@ struct smbios_type_127 { struct smbios_structure_header header; } QEMU_PACKED; +bool smbios_validate_table(SmbiosEntryPointType ep_type, Error **errp); +void smbios_add_usr_blob_size(size_t size); void smbios_entry_add(QemuOpts *opts, Error **errp); void smbios_set_cpuid(uint32_t version, uint32_t features); void smbios_set_defaults(const char *manufacturer, const char *product, - const char *version, bool legacy_mode, - bool uuid_encoded, SmbiosEntryPointType ep_type); -uint8_t *smbios_get_table_legacy(MachineState *ms, size_t *length); + const char *version, + bool uuid_encoded); +void smbios_set_default_processor_family(uint16_t processor_family); +uint8_t *smbios_get_table_legacy(size_t *length, Error **errp); void smbios_get_tables(MachineState *ms, + SmbiosEntryPointType ep_type, const struct smbios_phys_mem_area *mem_array, const unsigned int mem_array_size, uint8_t **tables, size_t *tables_len, diff --git a/include/hw/fsi/aspeed_apb2opb.h b/include/hw/fsi/aspeed_apb2opb.h new file mode 100644 index 00000000000..f6a2387abf2 --- /dev/null +++ b/include/hw/fsi/aspeed_apb2opb.h @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 IBM Corp. + * + * ASPEED APB2OPB Bridge + * IBM On-Chip Peripheral Bus + */ +#ifndef FSI_ASPEED_APB2OPB_H +#define FSI_ASPEED_APB2OPB_H + +#include "exec/memory.h" +#include "hw/fsi/fsi-master.h" +#include "hw/sysbus.h" + +#define TYPE_FSI_OPB "fsi.opb" + +#define TYPE_OP_BUS "opb" +OBJECT_DECLARE_SIMPLE_TYPE(OPBus, OP_BUS) + +typedef struct OPBus { + BusState bus; + + MemoryRegion mr; + AddressSpace as; +} OPBus; + +#define TYPE_ASPEED_APB2OPB "aspeed.apb2opb" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedAPB2OPBState, ASPEED_APB2OPB) + +#define ASPEED_APB2OPB_NR_REGS ((0xe8 >> 2) + 1) + +#define ASPEED_FSI_NUM 2 + +typedef struct AspeedAPB2OPBState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + uint32_t regs[ASPEED_APB2OPB_NR_REGS]; + qemu_irq irq; + + OPBus opb[ASPEED_FSI_NUM]; + FSIMasterState fsi[ASPEED_FSI_NUM]; +} AspeedAPB2OPBState; + +#endif /* FSI_ASPEED_APB2OPB_H */ diff --git a/include/hw/fsi/cfam.h b/include/hw/fsi/cfam.h new file mode 100644 index 00000000000..7abc3b287be --- /dev/null +++ b/include/hw/fsi/cfam.h @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 IBM Corp. + * + * IBM Common FRU Access Macro + */ +#ifndef FSI_CFAM_H +#define FSI_CFAM_H + +#include "exec/memory.h" + +#include "hw/fsi/fsi.h" +#include "hw/fsi/lbus.h" + +#define TYPE_FSI_CFAM "cfam" +#define FSI_CFAM(obj) OBJECT_CHECK(FSICFAMState, (obj), TYPE_FSI_CFAM) + +/* P9-ism */ +#define CFAM_CONFIG_NR_REGS 0x28 + +typedef struct FSICFAMState { + /* < private > */ + FSISlaveState parent; + + /* CFAM config address space */ + MemoryRegion config_iomem; + + MemoryRegion mr; + + FSILBus lbus; + FSIScratchPad scratchpad; +} FSICFAMState; + +#endif /* FSI_CFAM_H */ diff --git a/include/hw/fsi/fsi-master.h b/include/hw/fsi/fsi-master.h new file mode 100644 index 00000000000..68e5f56db2e --- /dev/null +++ b/include/hw/fsi/fsi-master.h @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 IBM Corp. + * + * IBM Flexible Service Interface Master + */ +#ifndef FSI_FSI_MASTER_H +#define FSI_FSI_MASTER_H + +#include "exec/memory.h" +#include "hw/qdev-core.h" +#include "hw/fsi/fsi.h" +#include "hw/fsi/cfam.h" + +#define TYPE_FSI_MASTER "fsi.master" +OBJECT_DECLARE_SIMPLE_TYPE(FSIMasterState, FSI_MASTER) + +#define FSI_MASTER_NR_REGS ((0x2e0 >> 2) + 1) + +typedef struct FSIMasterState { + DeviceState parent; + MemoryRegion iomem; + MemoryRegion opb2fsi; + + FSIBus bus; + + uint32_t regs[FSI_MASTER_NR_REGS]; + FSICFAMState cfam; +} FSIMasterState; + + +#endif /* FSI_FSI_H */ diff --git a/include/hw/fsi/fsi.h b/include/hw/fsi/fsi.h new file mode 100644 index 00000000000..e00f6ef078c --- /dev/null +++ b/include/hw/fsi/fsi.h @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 IBM Corp. + * + * IBM Flexible Service Interface + */ +#ifndef FSI_FSI_H +#define FSI_FSI_H + +#include "exec/memory.h" +#include "hw/qdev-core.h" +#include "hw/fsi/lbus.h" +#include "qemu/bitops.h" + +/* Bitwise operations at the word level. */ +#define BE_GENMASK(hb, lb) MAKE_64BIT_MASK((lb), ((hb) - (lb) + 1)) + +#define TYPE_FSI_BUS "fsi.bus" +OBJECT_DECLARE_SIMPLE_TYPE(FSIBus, FSI_BUS) + +typedef struct FSIBus { + BusState bus; +} FSIBus; + +#define TYPE_FSI_SLAVE "fsi.slave" +OBJECT_DECLARE_SIMPLE_TYPE(FSISlaveState, FSI_SLAVE) + +#define FSI_SLAVE_CONTROL_NR_REGS ((0x40 >> 2) + 1) + +typedef struct FSISlaveState { + DeviceState parent; + + MemoryRegion iomem; + uint32_t regs[FSI_SLAVE_CONTROL_NR_REGS]; +} FSISlaveState; + +#endif /* FSI_FSI_H */ diff --git a/include/hw/fsi/lbus.h b/include/hw/fsi/lbus.h new file mode 100644 index 00000000000..558268c013f --- /dev/null +++ b/include/hw/fsi/lbus.h @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 IBM Corp. + * + * IBM Local bus and connected device structures. + */ +#ifndef FSI_LBUS_H +#define FSI_LBUS_H + +#include "hw/qdev-core.h" +#include "qemu/units.h" +#include "exec/memory.h" + +#define TYPE_FSI_LBUS_DEVICE "fsi.lbus.device" +OBJECT_DECLARE_SIMPLE_TYPE(FSILBusDevice, FSI_LBUS_DEVICE) + +typedef struct FSILBusDevice { + DeviceState parent; + + MemoryRegion iomem; +} FSILBusDevice; + +#define TYPE_FSI_LBUS "fsi.lbus" +OBJECT_DECLARE_SIMPLE_TYPE(FSILBus, FSI_LBUS) + +typedef struct FSILBus { + BusState bus; + + MemoryRegion mr; +} FSILBus; + +#define TYPE_FSI_SCRATCHPAD "fsi.scratchpad" +#define SCRATCHPAD(obj) OBJECT_CHECK(FSIScratchPad, (obj), TYPE_FSI_SCRATCHPAD) + +#define FSI_SCRATCHPAD_NR_REGS 4 + +typedef struct FSIScratchPad { + FSILBusDevice parent; + + uint32_t regs[FSI_SCRATCHPAD_NR_REGS]; +} FSIScratchPad; + +#endif /* FSI_LBUS_H */ diff --git a/include/hw/gpio/bcm2838_gpio.h b/include/hw/gpio/bcm2838_gpio.h new file mode 100644 index 00000000000..f2a57a697f2 --- /dev/null +++ b/include/hw/gpio/bcm2838_gpio.h @@ -0,0 +1,45 @@ +/* + * Raspberry Pi (BCM2838) GPIO Controller + * This implementation is based on bcm2835_gpio (hw/gpio/bcm2835_gpio.c) + * + * Copyright (c) 2022 Auriga LLC + * + * Authors: + * Lotosh, Aleksey + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef BCM2838_GPIO_H +#define BCM2838_GPIO_H + +#include "hw/sd/sd.h" +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_BCM2838_GPIO "bcm2838-gpio" +OBJECT_DECLARE_SIMPLE_TYPE(BCM2838GpioState, BCM2838_GPIO) + +#define BCM2838_GPIO_REGS_SIZE 0x1000 +#define BCM2838_GPIO_NUM 58 +#define GPIO_PUP_PDN_CNTRL_NUM 4 + +struct BCM2838GpioState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + /* SDBus selector */ + SDBus sdbus; + SDBus *sdbus_sdhci; + SDBus *sdbus_sdhost; + + uint8_t fsel[BCM2838_GPIO_NUM]; + uint32_t lev0, lev1; + uint8_t sd_fsel; + qemu_irq out[BCM2838_GPIO_NUM]; + uint32_t pup_cntrl_reg[GPIO_PUP_PDN_CNTRL_NUM]; +}; + +#endif diff --git a/include/hw/misc/pca9552.h b/include/hw/gpio/pca9552.h similarity index 89% rename from include/hw/misc/pca9552.h rename to include/hw/gpio/pca9552.h index b6f4e264fed..c36525f0c3f 100644 --- a/include/hw/misc/pca9552.h +++ b/include/hw/gpio/pca9552.h @@ -30,7 +30,8 @@ struct PCA955xState { uint8_t pointer; uint8_t regs[PCA955X_NR_REGS]; - qemu_irq gpio[PCA955X_PIN_COUNT_MAX]; + qemu_irq gpio_out[PCA955X_PIN_COUNT_MAX]; + uint8_t ext_state[PCA955X_PIN_COUNT_MAX]; char *description; /* For debugging purpose only */ }; diff --git a/include/hw/misc/pca9552_regs.h b/include/hw/gpio/pca9552_regs.h similarity index 100% rename from include/hw/misc/pca9552_regs.h rename to include/hw/gpio/pca9552_regs.h diff --git a/include/hw/gpio/pca9554.h b/include/hw/gpio/pca9554.h new file mode 100644 index 00000000000..54bfc4c4c7a --- /dev/null +++ b/include/hw/gpio/pca9554.h @@ -0,0 +1,36 @@ +/* + * PCA9554 I/O port + * + * Copyright (c) 2023, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef PCA9554_H +#define PCA9554_H + +#include "hw/i2c/i2c.h" +#include "qom/object.h" + +#define TYPE_PCA9554 "pca9554" +typedef struct PCA9554State PCA9554State; +DECLARE_INSTANCE_CHECKER(PCA9554State, PCA9554, + TYPE_PCA9554) + +#define PCA9554_NR_REGS 4 +#define PCA9554_PIN_COUNT 8 + +struct PCA9554State { + /*< private >*/ + I2CSlave i2c; + /*< public >*/ + + uint8_t len; + uint8_t pointer; + + uint8_t regs[PCA9554_NR_REGS]; + qemu_irq gpio_out[PCA9554_PIN_COUNT]; + uint8_t ext_state[PCA9554_PIN_COUNT]; + char *description; /* For debugging purpose only */ +}; + +#endif diff --git a/include/hw/gpio/pca9554_regs.h b/include/hw/gpio/pca9554_regs.h new file mode 100644 index 00000000000..602c4a90e0d --- /dev/null +++ b/include/hw/gpio/pca9554_regs.h @@ -0,0 +1,19 @@ +/* + * PCA9554 I/O port registers + * + * Copyright (c) 2023, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef PCA9554_REGS_H +#define PCA9554_REGS_H + +/* + * Bits [0:1] are used to address a specific register. + */ +#define PCA9554_INPUT 0 /* read only input register */ +#define PCA9554_OUTPUT 1 /* read/write pin output state */ +#define PCA9554_POLARITY 2 /* Set polarity of input register */ +#define PCA9554_CONFIG 3 /* Set pins as inputs our ouputs */ + +#endif diff --git a/include/hw/gpio/pcf8574.h b/include/hw/gpio/pcf8574.h new file mode 100644 index 00000000000..3291d7dbbcf --- /dev/null +++ b/include/hw/gpio/pcf8574.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * NXP PCF8574 8-port I2C GPIO expansion chip. + * + * Copyright (c) 2024 KNS Group (YADRO). + * Written by Dmitrii Sharikhin + */ + +#ifndef _HW_GPIO_PCF8574 +#define _HW_GPIO_PCF8574 + +#define TYPE_PCF8574 "pcf8574" + +#endif /* _HW_GPIO_PCF8574 */ diff --git a/include/hw/gpio/stm32l4x5_gpio.h b/include/hw/gpio/stm32l4x5_gpio.h new file mode 100644 index 00000000000..878bd19fc9b --- /dev/null +++ b/include/hw/gpio/stm32l4x5_gpio.h @@ -0,0 +1,71 @@ +/* + * STM32L4x5 GPIO (General Purpose Input/Ouput) + * + * Copyright (c) 2024 Arnaud Minier + * Copyright (c) 2024 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html + */ + +#ifndef HW_STM32L4X5_GPIO_H +#define HW_STM32L4X5_GPIO_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_STM32L4X5_GPIO "stm32l4x5-gpio" +OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5GpioState, STM32L4X5_GPIO) + +#define NUM_GPIOS 8 +#define GPIO_NUM_PINS 16 + +struct Stm32l4x5GpioState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + /* GPIO registers */ + uint32_t moder; + uint32_t otyper; + uint32_t ospeedr; + uint32_t pupdr; + uint32_t idr; + uint32_t odr; + uint32_t lckr; + uint32_t afrl; + uint32_t afrh; + uint32_t ascr; + + /* GPIO registers reset values */ + uint32_t moder_reset; + uint32_t ospeedr_reset; + uint32_t pupdr_reset; + + /* + * External driving of pins. + * The pins can be set externally through the device + * anonymous input GPIOs lines under certain conditions. + * The pin must not be in push-pull output mode, + * and can't be set high in open-drain mode. + * Pins driven externally and configured to + * output mode will in general be "disconnected" + * (see `get_gpio_pinmask_to_disconnect()`) + */ + uint16_t disconnected_pins; + uint16_t pins_connected_high; + + char *name; + Clock *clk; + qemu_irq pin[GPIO_NUM_PINS]; +}; + +#endif diff --git a/include/hw/hyperv/dynmem-proto.h b/include/hw/hyperv/dynmem-proto.h index a657786a94b..68b8b606f26 100644 --- a/include/hw/hyperv/dynmem-proto.h +++ b/include/hw/hyperv/dynmem-proto.h @@ -328,7 +328,8 @@ struct dm_unballoon_response { /* * Hot add request message. Message sent from the host to the guest. * - * mem_range: Memory range to hot add. + * range: Memory range to hot add. + * region: Explicit hot add memory region for guest to use. Optional. * */ @@ -337,6 +338,12 @@ struct dm_hot_add { union dm_mem_page_range range; } QEMU_PACKED; +struct dm_hot_add_with_region { + struct dm_header hdr; + union dm_mem_page_range range; + union dm_mem_page_range region; +} QEMU_PACKED; + /* * Hot add response message. * This message is sent by the guest to report the status of a hot add request. diff --git a/include/hw/hyperv/hyperv.h b/include/hw/hyperv/hyperv.h index 015c3524b1c..d717b4e13d4 100644 --- a/include/hw/hyperv/hyperv.h +++ b/include/hw/hyperv/hyperv.h @@ -139,4 +139,8 @@ typedef struct HvSynDbgMsg { } HvSynDbgMsg; typedef uint16_t (*HvSynDbgHandler)(void *context, HvSynDbgMsg *msg); void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context); + +bool hyperv_are_vmbus_recommended_features_enabled(void); +void hyperv_set_vmbus_recommended_features_enabled(void); + #endif diff --git a/include/hw/i2c/bcm2835_i2c.h b/include/hw/i2c/bcm2835_i2c.h new file mode 100644 index 00000000000..0a56df4720b --- /dev/null +++ b/include/hw/i2c/bcm2835_i2c.h @@ -0,0 +1,80 @@ +/* + * Broadcom Serial Controller (BSC) + * + * Copyright (c) 2024 Rayhan Faizel + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "hw/sysbus.h" +#include "hw/i2c/i2c.h" +#include "qom/object.h" + +#define TYPE_BCM2835_I2C "bcm2835-i2c" +OBJECT_DECLARE_SIMPLE_TYPE(BCM2835I2CState, BCM2835_I2C) + +#define BCM2835_I2C_C 0x0 /* Control */ +#define BCM2835_I2C_S 0x4 /* Status */ +#define BCM2835_I2C_DLEN 0x8 /* Data Length */ +#define BCM2835_I2C_A 0xc /* Slave Address */ +#define BCM2835_I2C_FIFO 0x10 /* FIFO */ +#define BCM2835_I2C_DIV 0x14 /* Clock Divider */ +#define BCM2835_I2C_DEL 0x18 /* Data Delay */ +#define BCM2835_I2C_CLKT 0x20 /* Clock Stretch Timeout */ + +#define BCM2835_I2C_C_I2CEN BIT(15) /* I2C enable */ +#define BCM2835_I2C_C_INTR BIT(10) /* Interrupt on RXR */ +#define BCM2835_I2C_C_INTT BIT(9) /* Interrupt on TXW */ +#define BCM2835_I2C_C_INTD BIT(8) /* Interrupt on DONE */ +#define BCM2835_I2C_C_ST BIT(7) /* Start transfer */ +#define BCM2835_I2C_C_CLEAR (BIT(5) | BIT(4)) /* Clear FIFO */ +#define BCM2835_I2C_C_READ BIT(0) /* I2C read mode */ + +#define BCM2835_I2C_S_CLKT BIT(9) /* Clock stretch timeout */ +#define BCM2835_I2C_S_ERR BIT(8) /* Slave error */ +#define BCM2835_I2C_S_RXF BIT(7) /* RX FIFO full */ +#define BCM2835_I2C_S_TXE BIT(6) /* TX FIFO empty */ +#define BCM2835_I2C_S_RXD BIT(5) /* RX bytes available */ +#define BCM2835_I2C_S_TXD BIT(4) /* TX space available */ +#define BCM2835_I2C_S_RXR BIT(3) /* RX FIFO needs reading */ +#define BCM2835_I2C_S_TXW BIT(2) /* TX FIFO needs writing */ +#define BCM2835_I2C_S_DONE BIT(1) /* I2C Transfer complete */ +#define BCM2835_I2C_S_TA BIT(0) /* I2C Transfer active */ + +struct BCM2835I2CState { + /* */ + SysBusDevice parent_obj; + + /* */ + MemoryRegion iomem; + I2CBus *bus; + qemu_irq irq; + + uint32_t c; + uint32_t s; + uint32_t dlen; + uint32_t a; + uint32_t div; + uint32_t del; + uint32_t clkt; + + uint32_t last_dlen; +}; diff --git a/include/hw/i2c/pnv_i2c_regs.h b/include/hw/i2c/pnv_i2c_regs.h new file mode 100644 index 00000000000..85e96ff480d --- /dev/null +++ b/include/hw/i2c/pnv_i2c_regs.h @@ -0,0 +1,143 @@ +/* + * PowerNV I2C Controller Register Definitions + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef PNV_I2C_REGS_H +#define PNV_I2C_REGS_H + +/* I2C FIFO register */ +#define I2C_FIFO_REG 0x4 +#define I2C_FIFO PPC_BITMASK(0, 7) + +/* I2C command register */ +#define I2C_CMD_REG 0x5 +#define I2C_CMD_WITH_START PPC_BIT(0) +#define I2C_CMD_WITH_ADDR PPC_BIT(1) +#define I2C_CMD_READ_CONT PPC_BIT(2) +#define I2C_CMD_WITH_STOP PPC_BIT(3) +#define I2C_CMD_INTR_STEERING PPC_BITMASK(6, 7) /* P9 */ +#define I2C_CMD_INTR_STEER_HOST 1 +#define I2C_CMD_INTR_STEER_OCC 2 +#define I2C_CMD_DEV_ADDR PPC_BITMASK(8, 14) +#define I2C_CMD_READ_NOT_WRITE PPC_BIT(15) +#define I2C_CMD_LEN_BYTES PPC_BITMASK(16, 31) +#define I2C_MAX_TFR_LEN 0xfff0ull + +/* I2C mode register */ +#define I2C_MODE_REG 0x6 +#define I2C_MODE_BIT_RATE_DIV PPC_BITMASK(0, 15) +#define I2C_MODE_PORT_NUM PPC_BITMASK(16, 21) +#define I2C_MODE_ENHANCED PPC_BIT(28) +#define I2C_MODE_DIAGNOSTIC PPC_BIT(29) +#define I2C_MODE_PACING_ALLOW PPC_BIT(30) +#define I2C_MODE_WRAP PPC_BIT(31) + +/* I2C watermark register */ +#define I2C_WATERMARK_REG 0x7 +#define I2C_WATERMARK_HIGH PPC_BITMASK(16, 19) +#define I2C_WATERMARK_LOW PPC_BITMASK(24, 27) + +/* + * I2C interrupt mask and condition registers + * + * NB: The function of 0x9 and 0xa changes depending on whether you're reading + * or writing to them. When read they return the interrupt condition bits + * and on writes they update the interrupt mask register. + * + * The bit definitions are the same for all the interrupt registers. + */ +#define I2C_INTR_MASK_REG 0x8 + +#define I2C_INTR_RAW_COND_REG 0x9 /* read */ +#define I2C_INTR_MASK_OR_REG 0x9 /* write*/ + +#define I2C_INTR_COND_REG 0xa /* read */ +#define I2C_INTR_MASK_AND_REG 0xa /* write */ + +#define I2C_INTR_ALL PPC_BITMASK(16, 31) +#define I2C_INTR_INVALID_CMD PPC_BIT(16) +#define I2C_INTR_LBUS_PARITY_ERR PPC_BIT(17) +#define I2C_INTR_BKEND_OVERRUN_ERR PPC_BIT(18) +#define I2C_INTR_BKEND_ACCESS_ERR PPC_BIT(19) +#define I2C_INTR_ARBT_LOST_ERR PPC_BIT(20) +#define I2C_INTR_NACK_RCVD_ERR PPC_BIT(21) +#define I2C_INTR_DATA_REQ PPC_BIT(22) +#define I2C_INTR_CMD_COMP PPC_BIT(23) +#define I2C_INTR_STOP_ERR PPC_BIT(24) +#define I2C_INTR_I2C_BUSY PPC_BIT(25) +#define I2C_INTR_NOT_I2C_BUSY PPC_BIT(26) +#define I2C_INTR_SCL_EQ_1 PPC_BIT(28) +#define I2C_INTR_SCL_EQ_0 PPC_BIT(29) +#define I2C_INTR_SDA_EQ_1 PPC_BIT(30) +#define I2C_INTR_SDA_EQ_0 PPC_BIT(31) + +/* I2C status register */ +#define I2C_RESET_I2C_REG 0xb /* write */ +#define I2C_RESET_ERRORS 0xc +#define I2C_STAT_REG 0xb /* read */ +#define I2C_STAT_INVALID_CMD PPC_BIT(0) +#define I2C_STAT_LBUS_PARITY_ERR PPC_BIT(1) +#define I2C_STAT_BKEND_OVERRUN_ERR PPC_BIT(2) +#define I2C_STAT_BKEND_ACCESS_ERR PPC_BIT(3) +#define I2C_STAT_ARBT_LOST_ERR PPC_BIT(4) +#define I2C_STAT_NACK_RCVD_ERR PPC_BIT(5) +#define I2C_STAT_DATA_REQ PPC_BIT(6) +#define I2C_STAT_CMD_COMP PPC_BIT(7) +#define I2C_STAT_STOP_ERR PPC_BIT(8) +#define I2C_STAT_UPPER_THRS PPC_BITMASK(9, 15) +#define I2C_STAT_ANY_I2C_INTR PPC_BIT(16) +#define I2C_STAT_PORT_HISTORY_BUSY PPC_BIT(19) +#define I2C_STAT_SCL_INPUT_LEVEL PPC_BIT(20) +#define I2C_STAT_SDA_INPUT_LEVEL PPC_BIT(21) +#define I2C_STAT_PORT_BUSY PPC_BIT(22) +#define I2C_STAT_INTERFACE_BUSY PPC_BIT(23) +#define I2C_STAT_FIFO_ENTRY_COUNT PPC_BITMASK(24, 31) + +#define I2C_STAT_ANY_ERR (I2C_STAT_INVALID_CMD | I2C_STAT_LBUS_PARITY_ERR | \ + I2C_STAT_BKEND_OVERRUN_ERR | \ + I2C_STAT_BKEND_ACCESS_ERR | I2C_STAT_ARBT_LOST_ERR | \ + I2C_STAT_NACK_RCVD_ERR | I2C_STAT_STOP_ERR) + + +#define I2C_INTR_ACTIVE \ + ((I2C_STAT_ANY_ERR >> 16) | I2C_INTR_CMD_COMP | I2C_INTR_DATA_REQ) + +/* Pseudo-status used for timeouts */ +#define I2C_STAT_PSEUDO_TIMEOUT PPC_BIT(63) + +/* I2C extended status register */ +#define I2C_EXTD_STAT_REG 0xc +#define I2C_EXTD_STAT_FIFO_SIZE PPC_BITMASK(0, 7) +#define I2C_EXTD_STAT_MSM_CURSTATE PPC_BITMASK(11, 15) +#define I2C_EXTD_STAT_SCL_IN_SYNC PPC_BIT(16) +#define I2C_EXTD_STAT_SDA_IN_SYNC PPC_BIT(17) +#define I2C_EXTD_STAT_S_SCL PPC_BIT(18) +#define I2C_EXTD_STAT_S_SDA PPC_BIT(19) +#define I2C_EXTD_STAT_M_SCL PPC_BIT(20) +#define I2C_EXTD_STAT_M_SDA PPC_BIT(21) +#define I2C_EXTD_STAT_HIGH_WATER PPC_BIT(22) +#define I2C_EXTD_STAT_LOW_WATER PPC_BIT(23) +#define I2C_EXTD_STAT_I2C_BUSY PPC_BIT(24) +#define I2C_EXTD_STAT_SELF_BUSY PPC_BIT(25) +#define I2C_EXTD_STAT_I2C_VERSION PPC_BITMASK(27, 31) + +/* I2C residual front end/back end length */ +#define I2C_RESIDUAL_LEN_REG 0xd +#define I2C_RESIDUAL_FRONT_END PPC_BITMASK(0, 15) +#define I2C_RESIDUAL_BACK_END PPC_BITMASK(16, 31) + +/* Port busy register */ +#define I2C_PORT_BUSY_REG 0xe +#define I2C_SET_S_SCL_REG 0xd +#define I2C_RESET_S_SCL_REG 0xf +#define I2C_SET_S_SDA_REG 0x10 +#define I2C_RESET_S_SDA_REG 0x11 + +#define PNV_I2C_FIFO_SIZE 8 +#define PNV_I2C_MAX_BUSSES 64 + +#endif /* PNV_I2C_REGS_H */ diff --git a/include/hw/i386/apic.h b/include/hw/i386/apic.h index bdc15a7a731..eb606d60760 100644 --- a/include/hw/i386/apic.h +++ b/include/hw/i386/apic.h @@ -3,14 +3,14 @@ /* apic.c */ -void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, - uint8_t vector_num, uint8_t trigger_mode); +void apic_set_max_apic_id(uint32_t max_apic_id); int apic_accept_pic_intr(DeviceState *s); void apic_deliver_pic_intr(DeviceState *s, int level); void apic_deliver_nmi(DeviceState *d); int apic_get_interrupt(DeviceState *s); -void cpu_set_apic_base(DeviceState *s, uint64_t val); +int cpu_set_apic_base(DeviceState *s, uint64_t val); uint64_t cpu_get_apic_base(DeviceState *s); +bool cpu_is_apic_enabled(DeviceState *s); void cpu_set_apic_tpr(DeviceState *s, uint8_t val); uint8_t cpu_get_apic_tpr(DeviceState *s); void apic_init_reset(DeviceState *s); @@ -18,6 +18,9 @@ void apic_sipi(DeviceState *s); void apic_poll_irq(DeviceState *d); void apic_designate_bsp(DeviceState *d, bool bsp); int apic_get_highest_priority_irr(DeviceState *dev); +int apic_msr_read(int index, uint64_t *val); +int apic_msr_write(int index, uint64_t val); +bool is_x2apic_mode(DeviceState *d); /* pc.c */ DeviceState *cpu_get_current_apic(void); diff --git a/include/hw/i386/apic_internal.h b/include/hw/i386/apic_internal.h index 5f2ba24bfcd..d6e85833da5 100644 --- a/include/hw/i386/apic_internal.h +++ b/include/hw/i386/apic_internal.h @@ -46,8 +46,10 @@ #define APIC_DM_EXTINT 7 /* APIC destination mode */ -#define APIC_DESTMODE_FLAT 0xf -#define APIC_DESTMODE_CLUSTER 1 +#define APIC_DESTMODE_PHYSICAL 0 +#define APIC_DESTMODE_LOGICAL 1 +#define APIC_DESTMODE_LOGICAL_FLAT 0xf +#define APIC_DESTMODE_LOGICAL_CLUSTER 0 #define APIC_TRIGGER_EDGE 0 #define APIC_TRIGGER_LEVEL 1 @@ -135,7 +137,7 @@ struct APICCommonClass { DeviceRealize realize; DeviceUnrealize unrealize; - void (*set_base)(APICCommonState *s, uint64_t val); + int (*set_base)(APICCommonState *s, uint64_t val); void (*set_tpr)(APICCommonState *s, uint8_t val); uint8_t (*get_tpr)(APICCommonState *s); void (*enable_tpr_reporting)(APICCommonState *s, bool enable); @@ -187,6 +189,7 @@ struct APICCommonState { DeviceState *vapic; hwaddr vapic_paddr; /* note: persistence via kvmvapic */ bool legacy_instance_id; + uint32_t extended_log_dest; }; typedef struct VAPICState { diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index a10ceeabbfa..27a68071d77 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -12,10 +12,9 @@ #include "hw/hotplug.h" #include "qom/object.h" #include "hw/i386/sgx-epc.h" -#include "hw/firmware/smbios.h" #include "hw/cxl/cxl.h" -#define HPET_INTCAP "hpet-intcap" +#define MAX_IDE_BUS 2 /** * PCMachineState: @@ -32,12 +31,12 @@ typedef struct PCMachineState { Notifier machine_done; /* Pointers to devices and objects: */ - PCIBus *bus; - BusState *xenbus; + PCIBus *pcibus; I2CBus *smbus; PFlashCFI01 *flash[2]; ISADevice *pcspk; DeviceState *iommu; + BusState *idebus[MAX_IDE_BUS]; /* Configuration options: */ uint64_t max_ram_below_4g; @@ -51,6 +50,7 @@ typedef struct PCMachineState { bool hpet_enabled; bool i8042_enabled; bool default_bus_bypass_iommu; + bool fd_bootchk; uint64_t max_fw_size; /* ACPI Memory hotplug IO base address */ @@ -93,7 +93,6 @@ struct PCMachineClass { /* Device configuration: */ bool pci_enabled; - bool kvmclock_enabled; const char *default_south_bridge; /* Compat options: */ @@ -148,12 +147,9 @@ OBJECT_DECLARE_TYPE(PCMachineState, PCMachineClass, PC_MACHINE) GSIState *pc_gsi_create(qemu_irq **irqs, bool pci_enabled); /* pc.c */ -extern int fd_bootchk; void pc_acpi_smi_interrupt(void *opaque, int irq, int level); -void pc_guest_info_init(PCMachineState *pcms); - #define PCI_HOST_PROP_RAM_MEM "ram-mem" #define PCI_HOST_PROP_PCI_MEM "pci-mem" #define PCI_HOST_PROP_SYSTEM_MEM "system-mem" @@ -182,11 +178,7 @@ void pc_basic_device_init(struct PCMachineState *pcms, ISADevice *rtc_state, bool create_fdctrl, uint32_t hpet_irqs); -void pc_cmos_init(PCMachineState *pcms, - BusState *ide0, BusState *ide1, - ISADevice *s); -void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus, - BusState *xen_bus); +void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus); void pc_i8259_create(ISABus *isa_bus, qemu_irq *i8259_irqs); @@ -203,13 +195,12 @@ bool pc_system_ovmf_table_find(const char *entry, uint8_t **data, int *data_len); void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size); -/* hw/i386/acpi-common.c */ -void pc_madt_cpu_entry(int uid, const CPUArchIdList *apic_ids, - GArray *entry, bool force_enabled); - /* sgx.c */ void pc_machine_init_sgx_epc(PCMachineState *pcms); +extern GlobalProperty pc_compat_8_2[]; +extern const size_t pc_compat_8_2_len; + extern GlobalProperty pc_compat_8_1[]; extern const size_t pc_compat_8_1_len; @@ -297,27 +288,12 @@ extern const size_t pc_compat_2_1_len; extern GlobalProperty pc_compat_2_0[]; extern const size_t pc_compat_2_0_len; -extern GlobalProperty pc_compat_1_7[]; -extern const size_t pc_compat_1_7_len; - -extern GlobalProperty pc_compat_1_6[]; -extern const size_t pc_compat_1_6_len; - -extern GlobalProperty pc_compat_1_5[]; -extern const size_t pc_compat_1_5_len; - -extern GlobalProperty pc_compat_1_4[]; -extern const size_t pc_compat_1_4_len; - -int pc_machine_kvm_type(MachineState *machine, const char *vm_type); - #define DEFINE_PC_MACHINE(suffix, namestr, initfn, optsfn) \ static void pc_machine_##suffix##_class_init(ObjectClass *oc, void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ optsfn(mc); \ mc->init = initfn; \ - mc->kvm_type = pc_machine_kvm_type; \ } \ static const TypeInfo pc_machine_type_##suffix = { \ .name = namestr TYPE_MACHINE_SUFFIX, \ diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h index da19ae15463..4dc30dcb4d2 100644 --- a/include/hw/i386/x86.h +++ b/include/hw/i386/x86.h @@ -34,6 +34,8 @@ struct X86MachineClass { bool save_tsc_khz; /* use DMA capable linuxboot option rom */ bool fwcfg_dma_enabled; + /* CPU and apic information: */ + bool apic_xrupt_override; }; struct X86MachineState { @@ -57,7 +59,6 @@ struct X86MachineState { uint64_t above_4g_mem_start; /* CPU and apic information: */ - bool apic_xrupt_override; unsigned pci_irq_mask; unsigned apic_id_limit; uint16_t boot_cpus; @@ -138,7 +139,7 @@ typedef struct GSIState { qemu_irq x86_allocate_cpu_irq(void); void gsi_handler(void *opaque, int n, int level); -void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name); +void ioapic_init_gsi(GSIState *gsi_state, Object *parent); DeviceState *ioapic_init_secondary(GSIState *gsi_state); /* pc_sysfw.c */ diff --git a/include/hw/ide.h b/include/hw/ide.h deleted file mode 100644 index db963bdb770..00000000000 --- a/include/hw/ide.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef HW_IDE_H -#define HW_IDE_H - -#include "exec/memory.h" - -/* ide/core.c */ -void ide_drive_get(DriveInfo **hd, int max_bus); - -#endif /* HW_IDE_H */ diff --git a/include/hw/ide/ahci-pci.h b/include/hw/ide/ahci-pci.h new file mode 100644 index 00000000000..c2ee6169625 --- /dev/null +++ b/include/hw/ide/ahci-pci.h @@ -0,0 +1,22 @@ +/* + * QEMU AHCI Emulation (PCI devices) + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef HW_IDE_AHCI_PCI_H +#define HW_IDE_AHCI_PCI_H + +#include "qom/object.h" +#include "hw/ide/ahci.h" +#include "hw/pci/pci_device.h" + +#define TYPE_ICH9_AHCI "ich9-ahci" +OBJECT_DECLARE_SIMPLE_TYPE(AHCIPCIState, ICH9_AHCI) + +struct AHCIPCIState { + PCIDevice parent_obj; + + AHCIState ahci; +}; + +#endif diff --git a/include/hw/ide/ahci-sysbus.h b/include/hw/ide/ahci-sysbus.h new file mode 100644 index 00000000000..06eaac8cb60 --- /dev/null +++ b/include/hw/ide/ahci-sysbus.h @@ -0,0 +1,35 @@ +/* + * QEMU AHCI Emulation (MMIO-mapped devices) + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef HW_IDE_AHCI_SYSBUS_H +#define HW_IDE_AHCI_SYSBUS_H + +#include "qom/object.h" +#include "hw/sysbus.h" +#include "hw/ide/ahci.h" + +#define TYPE_SYSBUS_AHCI "sysbus-ahci" +OBJECT_DECLARE_SIMPLE_TYPE(SysbusAHCIState, SYSBUS_AHCI) + +struct SysbusAHCIState { + SysBusDevice parent_obj; + + AHCIState ahci; +}; + +#define TYPE_ALLWINNER_AHCI "allwinner-ahci" +OBJECT_DECLARE_SIMPLE_TYPE(AllwinnerAHCIState, ALLWINNER_AHCI) + +#define ALLWINNER_AHCI_MMIO_OFF 0x80 +#define ALLWINNER_AHCI_MMIO_SIZE 0x80 + +struct AllwinnerAHCIState { + SysbusAHCIState parent_obj; + + MemoryRegion mmio; + uint32_t regs[ALLWINNER_AHCI_MMIO_SIZE / 4]; +}; + +#endif diff --git a/include/hw/ide/ahci.h b/include/hw/ide/ahci.h index 210e5e734cf..ba31e75ff9b 100644 --- a/include/hw/ide/ahci.h +++ b/include/hw/ide/ahci.h @@ -24,8 +24,7 @@ #ifndef HW_IDE_AHCI_H #define HW_IDE_AHCI_H -#include "hw/sysbus.h" -#include "qom/object.h" +#include "exec/memory.h" typedef struct AHCIDevice AHCIDevice; @@ -46,43 +45,12 @@ typedef struct AHCIState { MemoryRegion idp; /* Index-Data Pair I/O port space */ unsigned idp_offset; /* Offset of index in I/O port space */ uint32_t idp_index; /* Current IDP index */ - int32_t ports; + uint32_t ports; qemu_irq irq; AddressSpace *as; } AHCIState; -#define TYPE_ICH9_AHCI "ich9-ahci" -OBJECT_DECLARE_SIMPLE_TYPE(AHCIPCIState, ICH9_AHCI) - -int32_t ahci_get_num_ports(PCIDevice *dev); -void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd); - -#define TYPE_SYSBUS_AHCI "sysbus-ahci" -OBJECT_DECLARE_SIMPLE_TYPE(SysbusAHCIState, SYSBUS_AHCI) - -struct SysbusAHCIState { - /*< private >*/ - SysBusDevice parent_obj; - /*< public >*/ - - AHCIState ahci; - uint32_t num_ports; -}; - -#define TYPE_ALLWINNER_AHCI "allwinner-ahci" -OBJECT_DECLARE_SIMPLE_TYPE(AllwinnerAHCIState, ALLWINNER_AHCI) - -#define ALLWINNER_AHCI_MMIO_OFF 0x80 -#define ALLWINNER_AHCI_MMIO_SIZE 0x80 - -struct AllwinnerAHCIState { - /*< private >*/ - SysbusAHCIState parent_obj; - /*< public >*/ - - MemoryRegion mmio; - uint32_t regs[ALLWINNER_AHCI_MMIO_SIZE/4]; -}; +void ahci_ide_create_devs(AHCIState *ahci, DriveInfo **hd); #endif /* HW_IDE_AHCI_H */ diff --git a/include/hw/ide/ide-bus.h b/include/hw/ide/ide-bus.h new file mode 100644 index 00000000000..4841a7dcd63 --- /dev/null +++ b/include/hw/ide/ide-bus.h @@ -0,0 +1,42 @@ +#ifndef HW_IDE_BUS_H +#define HW_IDE_BUS_H + +#include "exec/ioport.h" +#include "hw/ide/ide-dev.h" +#include "hw/ide/ide-dma.h" + +struct IDEBus { + BusState qbus; + IDEDevice *master; + IDEDevice *slave; + IDEState ifs[2]; + QEMUBH *bh; + + int bus_id; + int max_units; + IDEDMA *dma; + uint8_t unit; + uint8_t cmd; + qemu_irq irq; /* bus output */ + + int error_status; + uint8_t retry_unit; + int64_t retry_sector_num; + uint32_t retry_nsector; + PortioList portio_list; + PortioList portio2_list; + VMChangeStateEntry *vmstate; +}; + +#define TYPE_IDE_BUS "IDE" +OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) + +void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev, + int bus_id, int max_units); +IDEDevice *ide_bus_create_drive(IDEBus *bus, int unit, DriveInfo *drive); + +int ide_get_geometry(BusState *bus, int unit, + int16_t *cyls, int8_t *heads, int8_t *secs); +int ide_get_bios_chs_trans(BusState *bus, int unit); + +#endif diff --git a/include/hw/ide/ide-dev.h b/include/hw/ide/ide-dev.h new file mode 100644 index 00000000000..9a0d71db4e1 --- /dev/null +++ b/include/hw/ide/ide-dev.h @@ -0,0 +1,186 @@ +/* + * ide device definitions + * + * Copyright (c) 2009 Gerd Hoffmann + * + * This code is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef IDE_DEV_H +#define IDE_DEV_H + +#include "sysemu/dma.h" +#include "hw/qdev-properties.h" +#include "hw/block/block.h" + +typedef struct IDEDevice IDEDevice; +typedef struct IDEState IDEState; +typedef struct IDEBus IDEBus; + +typedef void EndTransferFunc(IDEState *); + +#define MAX_IDE_DEVS 2 + +#define TYPE_IDE_DEVICE "ide-device" +OBJECT_DECLARE_TYPE(IDEDevice, IDEDeviceClass, IDE_DEVICE) + +typedef enum { IDE_HD, IDE_CD, IDE_CFATA } IDEDriveKind; + +struct unreported_events { + bool eject_request; + bool new_media; +}; + +enum ide_dma_cmd { + IDE_DMA_READ = 0, + IDE_DMA_WRITE, + IDE_DMA_TRIM, + IDE_DMA_ATAPI, + IDE_DMA__COUNT +}; + +/* NOTE: IDEState represents in fact one drive */ +struct IDEState { + IDEBus *bus; + uint8_t unit; + /* ide config */ + IDEDriveKind drive_kind; + int drive_heads, drive_sectors; + int cylinders, heads, sectors, chs_trans; + int64_t nb_sectors; + int mult_sectors; + int identify_set; + uint8_t identify_data[512]; + int drive_serial; + char drive_serial_str[21]; + char drive_model_str[41]; + bool win2k_install_hack; + uint64_t wwn; + /* ide regs */ + uint8_t feature; + uint8_t error; + uint32_t nsector; + uint8_t sector; + uint8_t lcyl; + uint8_t hcyl; + /* other part of tf for lba48 support */ + uint8_t hob_feature; + uint8_t hob_nsector; + uint8_t hob_sector; + uint8_t hob_lcyl; + uint8_t hob_hcyl; + + uint8_t select; + uint8_t status; + + bool io8; + bool reset_reverts; + + /* set for lba48 access */ + uint8_t lba48; + BlockBackend *blk; + char version[9]; + /* ATAPI specific */ + struct unreported_events events; + uint8_t sense_key; + uint8_t asc; + bool tray_open; + bool tray_locked; + uint8_t cdrom_changed; + int packet_transfer_size; + int elementary_transfer_size; + int32_t io_buffer_index; + int lba; + int cd_sector_size; + int atapi_dma; /* true if dma is requested for the packet cmd */ + BlockAcctCookie acct; + BlockAIOCB *pio_aiocb; + QEMUIOVector qiov; + QLIST_HEAD(, IDEBufferedRequest) buffered_requests; + /* ATA DMA state */ + uint64_t io_buffer_offset; + int32_t io_buffer_size; + QEMUSGList sg; + /* PIO transfer handling */ + int req_nb_sectors; /* number of sectors per interrupt */ + EndTransferFunc *end_transfer_func; + uint8_t *data_ptr; + uint8_t *data_end; + uint8_t *io_buffer; + /* PIO save/restore */ + int32_t io_buffer_total_len; + int32_t cur_io_buffer_offset; + int32_t cur_io_buffer_len; + uint8_t end_transfer_fn_idx; + QEMUTimer *sector_write_timer; /* only used for win2k install hack */ + uint32_t irq_count; /* counts IRQs when using win2k install hack */ + /* CF-ATA extended error */ + uint8_t ext_error; + /* CF-ATA metadata storage */ + uint32_t mdata_size; + uint8_t *mdata_storage; + int media_changed; + enum ide_dma_cmd dma_cmd; + /* SMART */ + uint8_t smart_enabled; + uint8_t smart_autosave; + int smart_errors; + uint8_t smart_selftest_count; + uint8_t *smart_selftest_data; + /* AHCI */ + int ncq_queues; +}; + +struct IDEDeviceClass { + DeviceClass parent_class; + void (*realize)(IDEDevice *dev, Error **errp); +}; + +struct IDEDevice { + DeviceState qdev; + uint32_t unit; + BlockConf conf; + int chs_trans; + char *version; + char *serial; + char *model; + uint64_t wwn; + /* + * 0x0000 - rotation rate not reported + * 0x0001 - non-rotating medium (SSD) + * 0x0002-0x0400 - reserved + * 0x0401-0xffe - rotations per minute + * 0xffff - reserved + */ + uint16_t rotation_rate; + bool win2k_install_hack; +}; + +typedef struct IDEDrive { + IDEDevice dev; +} IDEDrive; + +#define DEFINE_IDE_DEV_PROPERTIES() \ + DEFINE_BLOCK_PROPERTIES(IDEDrive, dev.conf), \ + DEFINE_BLOCK_ERROR_PROPERTIES(IDEDrive, dev.conf), \ + DEFINE_PROP_STRING("ver", IDEDrive, dev.version), \ + DEFINE_PROP_UINT64("wwn", IDEDrive, dev.wwn, 0), \ + DEFINE_PROP_STRING("serial", IDEDrive, dev.serial),\ + DEFINE_PROP_STRING("model", IDEDrive, dev.model) + +void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp); + +void ide_drive_get(DriveInfo **hd, int max_bus); + +#endif diff --git a/include/hw/ide/ide-dma.h b/include/hw/ide/ide-dma.h new file mode 100644 index 00000000000..d0b19ac9c57 --- /dev/null +++ b/include/hw/ide/ide-dma.h @@ -0,0 +1,37 @@ +#ifndef HW_IDE_DMA_H +#define HW_IDE_DMA_H + +#include "block/aio.h" +#include "qemu/iov.h" + +typedef struct IDEState IDEState; +typedef struct IDEDMAOps IDEDMAOps; +typedef struct IDEDMA IDEDMA; + +typedef void DMAStartFunc(const IDEDMA *, IDEState *, BlockCompletionFunc *); +typedef void DMAVoidFunc(const IDEDMA *); +typedef int DMAIntFunc(const IDEDMA *, bool); +typedef int32_t DMAInt32Func(const IDEDMA *, int32_t len); +typedef void DMAu32Func(const IDEDMA *, uint32_t); +typedef void DMAStopFunc(const IDEDMA *, bool); + +struct IDEDMAOps { + DMAStartFunc *start_dma; + DMAVoidFunc *pio_transfer; + DMAInt32Func *prepare_buf; + DMAu32Func *commit_buf; + DMAIntFunc *rw_buf; + DMAVoidFunc *restart; + DMAVoidFunc *restart_dma; + DMAStopFunc *set_inactive; + DMAVoidFunc *cmd_done; + DMAVoidFunc *reset; +}; + +struct IDEDMA { + const IDEDMAOps *ops; + QEMUIOVector qiov; + BlockAIOCB *aiocb; +}; + +#endif diff --git a/include/hw/ide/pci.h b/include/hw/ide/pci.h index a814a0a7c36..ef03764caaf 100644 --- a/include/hw/ide/pci.h +++ b/include/hw/ide/pci.h @@ -1,7 +1,7 @@ #ifndef HW_IDE_PCI_H #define HW_IDE_PCI_H -#include "hw/ide/internal.h" +#include "hw/ide/ide-bus.h" #include "hw/pci/pci_device.h" #include "qom/object.h" diff --git a/include/hw/input/i8042.h b/include/hw/input/i8042.h index 9fb3f8d7875..e90f008b667 100644 --- a/include/hw/input/i8042.h +++ b/include/hw/input/i8042.h @@ -89,7 +89,6 @@ struct MMIOKBDState { void i8042_isa_mouse_fake_event(ISAKBDState *isa); -void i8042_setup_a20_line(ISADevice *dev, qemu_irq a20_out); static inline bool i8042_present(void) { diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h index 6b4ae566c9a..89fe8aedaa9 100644 --- a/include/hw/intc/armv7m_nvic.h +++ b/include/hw/intc/armv7m_nvic.h @@ -10,7 +10,7 @@ #ifndef HW_ARM_ARMV7M_NVIC_H #define HW_ARM_ARMV7M_NVIC_H -#include "target/arm/cpu.h" +#include "target/arm/cpu-qom.h" #include "hw/sysbus.h" #include "hw/timer/armv7m_systick.h" #include "qom/object.h" diff --git a/include/hw/sparc/grlib.h b/include/hw/intc/grlib_irqmp.h similarity index 83% rename from include/hw/sparc/grlib.h rename to include/hw/intc/grlib_irqmp.h index ef1946c7f81..a76acbf9403 100644 --- a/include/hw/sparc/grlib.h +++ b/include/hw/intc/grlib_irqmp.h @@ -1,7 +1,9 @@ /* * QEMU GRLIB Components * - * Copyright (c) 2010-2019 AdaCore + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2010-2024 AdaCore * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -22,8 +24,8 @@ * THE SOFTWARE. */ -#ifndef GRLIB_H -#define GRLIB_H +#ifndef GRLIB_IRQMP_H +#define GRLIB_IRQMP_H #include "hw/sysbus.h" @@ -34,12 +36,6 @@ /* IRQMP */ #define TYPE_GRLIB_IRQMP "grlib-irqmp" -void grlib_irqmp_ack(DeviceState *dev, int intno); - -/* GPTimer */ -#define TYPE_GRLIB_GPTIMER "grlib-gptimer" - -/* APB UART */ -#define TYPE_GRLIB_APB_UART "grlib-apbuart" +void grlib_irqmp_ack(DeviceState *dev, unsigned int cpu, int intno); -#endif /* GRLIB_H */ +#endif /* GRLIB_IRQMP_H */ diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h index fbdef9a7b3b..a0a46b888cd 100644 --- a/include/hw/intc/loongarch_extioi.h +++ b/include/hw/intc/loongarch_extioi.h @@ -40,25 +40,29 @@ #define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET) #define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET) +typedef struct ExtIOICore { + uint32_t coreisr[EXTIOI_IRQS_GROUP_COUNT]; + DECLARE_BITMAP(sw_isr[LS3A_INTC_IP], EXTIOI_IRQS); + qemu_irq parent_irq[LS3A_INTC_IP]; +} ExtIOICore; + #define TYPE_LOONGARCH_EXTIOI "loongarch.extioi" OBJECT_DECLARE_SIMPLE_TYPE(LoongArchExtIOI, LOONGARCH_EXTIOI) struct LoongArchExtIOI { SysBusDevice parent_obj; + uint32_t num_cpu; /* hardware state */ uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2]; uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT]; uint32_t isr[EXTIOI_IRQS / 32]; - uint32_t coreisr[EXTIOI_CPUS][EXTIOI_IRQS_GROUP_COUNT]; uint32_t enable[EXTIOI_IRQS / 32]; uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4]; uint32_t coremap[EXTIOI_IRQS / 4]; uint32_t sw_pending[EXTIOI_IRQS / 32]; - DECLARE_BITMAP(sw_isr[EXTIOI_CPUS][LS3A_INTC_IP], EXTIOI_IRQS); uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE]; uint8_t sw_coremap[EXTIOI_IRQS]; - qemu_irq parent_irq[EXTIOI_CPUS][LS3A_INTC_IP]; qemu_irq irq[EXTIOI_IRQS]; - MemoryRegion extioi_iocsr_mem[EXTIOI_CPUS]; + ExtIOICore *cpu; MemoryRegion extioi_system_mem; }; #endif /* LOONGARCH_EXTIOI_H */ diff --git a/include/hw/intc/loongarch_ipi.h b/include/hw/intc/loongarch_ipi.h index 6c6194786e8..1c1e834849e 100644 --- a/include/hw/intc/loongarch_ipi.h +++ b/include/hw/intc/loongarch_ipi.h @@ -47,7 +47,8 @@ struct LoongArchIPI { SysBusDevice parent_obj; MemoryRegion ipi_iocsr_mem; MemoryRegion ipi64_iocsr_mem; - IPICore ipi_core; + uint32_t num_cpu; + IPICore *cpu; }; #endif diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h index 674f4655e0e..252f7df7f4a 100644 --- a/include/hw/loongarch/virt.h +++ b/include/hw/loongarch/virt.h @@ -18,14 +18,16 @@ #define VIRT_FWCFG_BASE 0x1e020000UL #define VIRT_BIOS_BASE 0x1c000000UL -#define VIRT_BIOS_SIZE (4 * MiB) +#define VIRT_BIOS_SIZE (16 * MiB) #define VIRT_FLASH_SECTOR_SIZE (128 * KiB) -#define VIRT_FLASH_BASE 0x1d000000UL -#define VIRT_FLASH_SIZE (16 * MiB) +#define VIRT_FLASH0_BASE VIRT_BIOS_BASE +#define VIRT_FLASH0_SIZE VIRT_BIOS_SIZE +#define VIRT_FLASH1_BASE 0x1d000000UL +#define VIRT_FLASH1_SIZE (16 * MiB) #define VIRT_LOWMEM_BASE 0 #define VIRT_LOWMEM_SIZE 0x10000000 -#define VIRT_HIGHMEM_BASE 0x90000000 +#define VIRT_HIGHMEM_BASE 0x80000000 #define VIRT_GED_EVT_ADDR 0x100e0000 #define VIRT_GED_MEM_ADDR (VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN) #define VIRT_GED_REG_ADDR (VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN) @@ -49,7 +51,10 @@ struct LoongArchMachineState { int fdt_size; DeviceState *platform_bus_dev; PCIBus *pci_bus; - PFlashCFI01 *flash; + PFlashCFI01 *flash[2]; + MemoryRegion system_iocsr; + MemoryRegion iocsr_mem; + AddressSpace as_iocsr; }; #define TYPE_LOONGARCH_MACHINE MACHINE_TYPE_NAME("virt") diff --git a/include/hw/m68k/q800-glue.h b/include/hw/m68k/q800-glue.h index ceb916d16c1..04fac25f6c2 100644 --- a/include/hw/m68k/q800-glue.h +++ b/include/hw/m68k/q800-glue.h @@ -23,7 +23,6 @@ #ifndef HW_Q800_GLUE_H #define HW_Q800_GLUE_H -#include "qemu/osdep.h" #include "hw/sysbus.h" #define TYPE_GLUE "q800-glue" diff --git a/include/hw/m68k/q800.h b/include/hw/m68k/q800.h index a9661f65f69..34365c98608 100644 --- a/include/hw/m68k/q800.h +++ b/include/hw/m68k/q800.h @@ -55,6 +55,7 @@ struct Q800MachineState { MOS6522Q800VIA1State via1; MOS6522Q800VIA2State via2; dp8393xState dp8393x; + MemoryRegion dp8393x_prom; ESCCState escc; OrIRQState escc_orgate; SysBusESPState esp; diff --git a/include/hw/mem/memory-device.h b/include/hw/mem/memory-device.h index a1d62cc551a..e0571c8a319 100644 --- a/include/hw/mem/memory-device.h +++ b/include/hw/mem/memory-device.h @@ -14,7 +14,6 @@ #define MEMORY_DEVICE_H #include "hw/qdev-core.h" -#include "qemu/typedefs.h" #include "qapi/qapi-types-machine.h" #include "qom/object.h" diff --git a/include/hw/mips/bios.h b/include/hw/mips/bios.h deleted file mode 100644 index 44acb6815be..00000000000 --- a/include/hw/mips/bios.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef HW_MIPS_BIOS_H -#define HW_MIPS_BIOS_H - -#include "qemu/units.h" -#include "cpu.h" - -#define BIOS_SIZE (4 * MiB) -#if TARGET_BIG_ENDIAN -#define BIOS_FILENAME "mips_bios.bin" -#else -#define BIOS_FILENAME "mipsel_bios.bin" -#endif - -#endif diff --git a/include/hw/misc/imx7_snvs.h b/include/hw/misc/imx7_snvs.h index 14a1d6fe6b0..1272076086a 100644 --- a/include/hw/misc/imx7_snvs.h +++ b/include/hw/misc/imx7_snvs.h @@ -20,7 +20,9 @@ enum IMX7SNVSRegisters { SNVS_LPCR = 0x38, SNVS_LPCR_TOP = BIT(6), - SNVS_LPCR_DP_EN = BIT(5) + SNVS_LPCR_DP_EN = BIT(5), + SNVS_LPSRTCMR = 0x050, /* Secure Real Time Counter MSB Register */ + SNVS_LPSRTCLR = 0x054, /* Secure Real Time Counter LSB Register */ }; #define TYPE_IMX7_SNVS "imx7.snvs" @@ -31,6 +33,9 @@ struct IMX7SNVSState { SysBusDevice parent_obj; MemoryRegion mmio; + + uint64_t tick_offset; + uint64_t lpcr; }; #endif /* IMX7_SNVS_H */ diff --git a/include/hw/misc/lasi.h b/include/hw/misc/lasi.h index 0a8c7352be2..f01c0f680a4 100644 --- a/include/hw/misc/lasi.h +++ b/include/hw/misc/lasi.h @@ -26,9 +26,11 @@ OBJECT_DECLARE_SIMPLE_TYPE(LasiState, LASI_CHIP) #define LASI_IAR 0x10 #define LASI_LPT 0x02000 +#define LASI_AUDIO 0x04000 #define LASI_UART 0x05000 #define LASI_LAN 0x07000 #define LASI_RTC 0x09000 +#define LASI_FDC 0x0A000 #define LASI_PCR 0x0C000 /* LASI Power Control register */ #define LASI_ERRLOG 0x0C004 /* LASI Error Logging register */ diff --git a/include/hw/misc/macio/macio.h b/include/hw/misc/macio/macio.h index 86df2c2b60a..2b54da6b311 100644 --- a/include/hw/misc/macio/macio.h +++ b/include/hw/misc/macio/macio.h @@ -28,7 +28,7 @@ #include "hw/char/escc.h" #include "hw/pci/pci_device.h" -#include "hw/ide/internal.h" +#include "hw/ide/ide-bus.h" #include "hw/intc/heathrow_pic.h" #include "hw/misc/macio/cuda.h" #include "hw/misc/macio/gpio.h" diff --git a/include/hw/misc/mips_itu.h b/include/hw/misc/mips_itu.h index 5caed6cc360..27c9a1090d5 100644 --- a/include/hw/misc/mips_itu.h +++ b/include/hw/misc/mips_itu.h @@ -70,15 +70,9 @@ struct MIPSITUState { /* ITU Control Register */ uint64_t icr0; - - /* SAAR */ - uint64_t *saar; - ArchCPU *cpu0; }; /* Get ITC Configuration Tag memory region. */ MemoryRegion *mips_itu_get_tag_region(MIPSITUState *itu); -void itc_reconfigure(struct MIPSITUState *tag); - #endif /* MIPS_ITU_H */ diff --git a/include/hw/misc/mps2-scc.h b/include/hw/misc/mps2-scc.h index 3b2d13ac9c3..8ff188c06b1 100644 --- a/include/hw/misc/mps2-scc.h +++ b/include/hw/misc/mps2-scc.h @@ -51,6 +51,7 @@ struct MPS2SCC { uint32_t cfg4; uint32_t cfg5; uint32_t cfg6; + uint32_t cfg7; uint32_t cfgdata_rtn; uint32_t cfgdata_out; uint32_t cfgctrl; diff --git a/include/hw/misc/stm32l4x5_exti.h b/include/hw/misc/stm32l4x5_exti.h new file mode 100644 index 00000000000..be961d2f01f --- /dev/null +++ b/include/hw/misc/stm32l4x5_exti.h @@ -0,0 +1,51 @@ +/* + * STM32L4x5 EXTI (Extended interrupts and events controller) + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This work is based on the stm32f4xx_exti by Alistair Francis. + * Original code is licensed under the MIT License: + * + * Copyright (c) 2014 Alistair Francis + */ + +/* + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html + */ + +#ifndef HW_STM32L4X5_EXTI_H +#define HW_STM32L4X5_EXTI_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_STM32L4X5_EXTI "stm32l4x5-exti" +OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5ExtiState, STM32L4X5_EXTI) + +#define EXTI_NUM_INTERRUPT_OUT_LINES 40 +#define EXTI_NUM_REGISTER 2 + +struct Stm32l4x5ExtiState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + uint32_t imr[EXTI_NUM_REGISTER]; + uint32_t emr[EXTI_NUM_REGISTER]; + uint32_t rtsr[EXTI_NUM_REGISTER]; + uint32_t ftsr[EXTI_NUM_REGISTER]; + uint32_t swier[EXTI_NUM_REGISTER]; + uint32_t pr[EXTI_NUM_REGISTER]; + + qemu_irq irq[EXTI_NUM_INTERRUPT_OUT_LINES]; +}; + +#endif diff --git a/include/hw/misc/stm32l4x5_rcc.h b/include/hw/misc/stm32l4x5_rcc.h new file mode 100644 index 00000000000..0fbfba5c40b --- /dev/null +++ b/include/hw/misc/stm32l4x5_rcc.h @@ -0,0 +1,239 @@ +/* + * STM32L4X5 RCC (Reset and clock control) + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * + * Inspired by the BCM2835 CPRMAN clock manager by Luc Michel. + */ + +#ifndef HW_STM32L4X5_RCC_H +#define HW_STM32L4X5_RCC_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_STM32L4X5_RCC "stm32l4x5-rcc" +OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5RccState, STM32L4X5_RCC) + +/* In the Stm32l4x5 clock tree, mux have at most 7 sources */ +#define RCC_NUM_CLOCK_MUX_SRC 7 + +typedef enum PllCommonChannels { + RCC_PLL_COMMON_CHANNEL_P = 0, + RCC_PLL_COMMON_CHANNEL_Q = 1, + RCC_PLL_COMMON_CHANNEL_R = 2, + + RCC_NUM_CHANNEL_PLL_OUT = 3 +} PllCommonChannels; + +/* NB: Prescaler are assimilated to mux with one source and one output */ +typedef enum RccClockMux { + /* Internal muxes that arent't exposed publicly to other peripherals */ + RCC_CLOCK_MUX_SYSCLK, + RCC_CLOCK_MUX_PLL_INPUT, + RCC_CLOCK_MUX_HCLK, + RCC_CLOCK_MUX_PCLK1, + RCC_CLOCK_MUX_PCLK2, + RCC_CLOCK_MUX_HSE_OVER_32, + RCC_CLOCK_MUX_LCD_AND_RTC_COMMON, + + /* Muxes with a publicly available output */ + RCC_CLOCK_MUX_CORTEX_REFCLK, + RCC_CLOCK_MUX_USART1, + RCC_CLOCK_MUX_USART2, + RCC_CLOCK_MUX_USART3, + RCC_CLOCK_MUX_UART4, + RCC_CLOCK_MUX_UART5, + RCC_CLOCK_MUX_LPUART1, + RCC_CLOCK_MUX_I2C1, + RCC_CLOCK_MUX_I2C2, + RCC_CLOCK_MUX_I2C3, + RCC_CLOCK_MUX_LPTIM1, + RCC_CLOCK_MUX_LPTIM2, + RCC_CLOCK_MUX_SWPMI1, + RCC_CLOCK_MUX_MCO, + RCC_CLOCK_MUX_LSCO, + RCC_CLOCK_MUX_DFSDM1, + RCC_CLOCK_MUX_ADC, + RCC_CLOCK_MUX_CLK48, + RCC_CLOCK_MUX_SAI1, + RCC_CLOCK_MUX_SAI2, + + /* + * Mux that have only one input and one output assigned to as peripheral. + * They could be direct lines but it is simpler + * to use the same logic for all outputs. + */ + /* - AHB1 */ + RCC_CLOCK_MUX_TSC, + RCC_CLOCK_MUX_CRC, + RCC_CLOCK_MUX_FLASH, + RCC_CLOCK_MUX_DMA2, + RCC_CLOCK_MUX_DMA1, + + /* - AHB2 */ + RCC_CLOCK_MUX_RNG, + RCC_CLOCK_MUX_AES, + RCC_CLOCK_MUX_OTGFS, + RCC_CLOCK_MUX_GPIOA, + RCC_CLOCK_MUX_GPIOB, + RCC_CLOCK_MUX_GPIOC, + RCC_CLOCK_MUX_GPIOD, + RCC_CLOCK_MUX_GPIOE, + RCC_CLOCK_MUX_GPIOF, + RCC_CLOCK_MUX_GPIOG, + RCC_CLOCK_MUX_GPIOH, + + /* - AHB3 */ + RCC_CLOCK_MUX_QSPI, + RCC_CLOCK_MUX_FMC, + + /* - APB1 */ + RCC_CLOCK_MUX_OPAMP, + RCC_CLOCK_MUX_DAC1, + RCC_CLOCK_MUX_PWR, + RCC_CLOCK_MUX_CAN1, + RCC_CLOCK_MUX_SPI3, + RCC_CLOCK_MUX_SPI2, + RCC_CLOCK_MUX_WWDG, + RCC_CLOCK_MUX_LCD, + RCC_CLOCK_MUX_TIM7, + RCC_CLOCK_MUX_TIM6, + RCC_CLOCK_MUX_TIM5, + RCC_CLOCK_MUX_TIM4, + RCC_CLOCK_MUX_TIM3, + RCC_CLOCK_MUX_TIM2, + + /* - APB2 */ + RCC_CLOCK_MUX_TIM17, + RCC_CLOCK_MUX_TIM16, + RCC_CLOCK_MUX_TIM15, + RCC_CLOCK_MUX_TIM8, + RCC_CLOCK_MUX_SPI1, + RCC_CLOCK_MUX_TIM1, + RCC_CLOCK_MUX_SDMMC1, + RCC_CLOCK_MUX_FW, + RCC_CLOCK_MUX_SYSCFG, + + /* - BDCR */ + RCC_CLOCK_MUX_RTC, + + /* - OTHER */ + RCC_CLOCK_MUX_CORTEX_FCLK, + + RCC_NUM_CLOCK_MUX +} RccClockMux; + +typedef enum RccPll { + RCC_PLL_PLL, + RCC_PLL_PLLSAI1, + RCC_PLL_PLLSAI2, + + RCC_NUM_PLL +} RccPll; + +typedef struct RccClockMuxState { + DeviceState parent_obj; + + RccClockMux id; + Clock *srcs[RCC_NUM_CLOCK_MUX_SRC]; + Clock *out; + bool enabled; + uint32_t src; + uint32_t multiplier; + uint32_t divider; + + /* + * Used by clock srcs update callback to retrieve both the clock and the + * source number. + */ + struct RccClockMuxState *backref[RCC_NUM_CLOCK_MUX_SRC]; +} RccClockMuxState; + +typedef struct RccPllState { + DeviceState parent_obj; + + RccPll id; + Clock *in; + uint32_t vco_multiplier; + Clock *channels[RCC_NUM_CHANNEL_PLL_OUT]; + /* Global pll enabled flag */ + bool enabled; + /* 'enabled' refers to the runtime configuration */ + bool channel_enabled[RCC_NUM_CHANNEL_PLL_OUT]; + /* + * 'exists' refers to the physical configuration + * It should only be set at pll initialization. + * e.g. pllsai2 doesn't have a Q output. + */ + bool channel_exists[RCC_NUM_CHANNEL_PLL_OUT]; + uint32_t channel_divider[RCC_NUM_CHANNEL_PLL_OUT]; +} RccPllState; + +struct Stm32l4x5RccState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + uint32_t cr; + uint32_t icscr; + uint32_t cfgr; + uint32_t pllcfgr; + uint32_t pllsai1cfgr; + uint32_t pllsai2cfgr; + uint32_t cier; + uint32_t cifr; + uint32_t ahb1rstr; + uint32_t ahb2rstr; + uint32_t ahb3rstr; + uint32_t apb1rstr1; + uint32_t apb1rstr2; + uint32_t apb2rstr; + uint32_t ahb1enr; + uint32_t ahb2enr; + uint32_t ahb3enr; + uint32_t apb1enr1; + uint32_t apb1enr2; + uint32_t apb2enr; + uint32_t ahb1smenr; + uint32_t ahb2smenr; + uint32_t ahb3smenr; + uint32_t apb1smenr1; + uint32_t apb1smenr2; + uint32_t apb2smenr; + uint32_t ccipr; + uint32_t bdcr; + uint32_t csr; + + /* Clock sources */ + Clock *gnd; + Clock *hsi16_rc; + Clock *msi_rc; + Clock *hse; + Clock *lsi_rc; + Clock *lse_crystal; + Clock *sai1_extclk; + Clock *sai2_extclk; + + /* PLLs */ + RccPllState plls[RCC_NUM_PLL]; + + /* Muxes ~= outputs */ + RccClockMuxState clock_muxes[RCC_NUM_CLOCK_MUX]; + + qemu_irq irq; + uint64_t hse_frequency; + uint64_t sai1_extclk_frequency; + uint64_t sai2_extclk_frequency; +}; + +#endif /* HW_STM32L4X5_RCC_H */ diff --git a/include/hw/misc/stm32l4x5_rcc_internals.h b/include/hw/misc/stm32l4x5_rcc_internals.h new file mode 100644 index 00000000000..ff1c834f694 --- /dev/null +++ b/include/hw/misc/stm32l4x5_rcc_internals.h @@ -0,0 +1,1042 @@ +/* + * STM32L4X5 RCC (Reset and clock control) + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * + * Inspired by the BCM2835 CPRMAN clock manager implementation by Luc Michel. + */ + +#ifndef HW_STM32L4X5_RCC_INTERNALS_H +#define HW_STM32L4X5_RCC_INTERNALS_H + +#include "hw/registerfields.h" +#include "hw/misc/stm32l4x5_rcc.h" + +#define TYPE_RCC_CLOCK_MUX "stm32l4x5-rcc-clock-mux" +#define TYPE_RCC_PLL "stm32l4x5-rcc-pll" + +OBJECT_DECLARE_SIMPLE_TYPE(RccClockMuxState, RCC_CLOCK_MUX) +OBJECT_DECLARE_SIMPLE_TYPE(RccPllState, RCC_PLL) + +/* Register map */ +REG32(CR, 0x00) + FIELD(CR, PLLSAI2RDY, 29, 1) + FIELD(CR, PLLSAI2ON, 28, 1) + FIELD(CR, PLLSAI1RDY, 27, 1) + FIELD(CR, PLLSAI1ON, 26, 1) + FIELD(CR, PLLRDY, 25, 1) + FIELD(CR, PLLON, 24, 1) + FIELD(CR, CSSON, 19, 1) + FIELD(CR, HSEBYP, 18, 1) + FIELD(CR, HSERDY, 17, 1) + FIELD(CR, HSEON, 16, 1) + FIELD(CR, HSIASFS, 11, 1) + FIELD(CR, HSIRDY, 10, 1) + FIELD(CR, HSIKERON, 9, 1) + FIELD(CR, HSION, 8, 1) + FIELD(CR, MSIRANGE, 4, 4) + FIELD(CR, MSIRGSEL, 3, 1) + FIELD(CR, MSIPLLEN, 2, 1) + FIELD(CR, MSIRDY, 1, 1) + FIELD(CR, MSION, 0, 1) +REG32(ICSCR, 0x04) + FIELD(ICSCR, HSITRIM, 24, 7) + FIELD(ICSCR, HSICAL, 16, 8) + FIELD(ICSCR, MSITRIM, 8, 8) + FIELD(ICSCR, MSICAL, 0, 8) +REG32(CFGR, 0x08) + FIELD(CFGR, MCOPRE, 28, 3) + /* MCOSEL[2:0] only for STM32L475xx/476xx/486xx devices */ + FIELD(CFGR, MCOSEL, 24, 3) + FIELD(CFGR, STOPWUCK, 15, 1) + FIELD(CFGR, PPRE2, 11, 3) + FIELD(CFGR, PPRE1, 8, 3) + FIELD(CFGR, HPRE, 4, 4) + FIELD(CFGR, SWS, 2, 2) + FIELD(CFGR, SW, 0, 2) +REG32(PLLCFGR, 0x0C) + FIELD(PLLCFGR, PLLPDIV, 27, 5) + FIELD(PLLCFGR, PLLR, 25, 2) + FIELD(PLLCFGR, PLLREN, 24, 1) + FIELD(PLLCFGR, PLLQ, 21, 2) + FIELD(PLLCFGR, PLLQEN, 20, 1) + FIELD(PLLCFGR, PLLP, 17, 1) + FIELD(PLLCFGR, PLLPEN, 16, 1) + FIELD(PLLCFGR, PLLN, 8, 7) + FIELD(PLLCFGR, PLLM, 4, 3) + FIELD(PLLCFGR, PLLSRC, 0, 2) +REG32(PLLSAI1CFGR, 0x10) + FIELD(PLLSAI1CFGR, PLLSAI1PDIV, 27, 5) + FIELD(PLLSAI1CFGR, PLLSAI1R, 25, 2) + FIELD(PLLSAI1CFGR, PLLSAI1REN, 24, 1) + FIELD(PLLSAI1CFGR, PLLSAI1Q, 21, 2) + FIELD(PLLSAI1CFGR, PLLSAI1QEN, 20, 1) + FIELD(PLLSAI1CFGR, PLLSAI1P, 17, 1) + FIELD(PLLSAI1CFGR, PLLSAI1PEN, 16, 1) + FIELD(PLLSAI1CFGR, PLLSAI1N, 8, 7) +REG32(PLLSAI2CFGR, 0x14) + FIELD(PLLSAI2CFGR, PLLSAI2PDIV, 27, 5) + FIELD(PLLSAI2CFGR, PLLSAI2R, 25, 2) + FIELD(PLLSAI2CFGR, PLLSAI2REN, 24, 1) + FIELD(PLLSAI2CFGR, PLLSAI2Q, 21, 2) + FIELD(PLLSAI2CFGR, PLLSAI2QEN, 20, 1) + FIELD(PLLSAI2CFGR, PLLSAI2P, 17, 1) + FIELD(PLLSAI2CFGR, PLLSAI2PEN, 16, 1) + FIELD(PLLSAI2CFGR, PLLSAI2N, 8, 7) +REG32(CIER, 0x18) + /* HSI48RDYIE: only on STM32L496xx/4A6xx devices */ + FIELD(CIER, LSECSSIE, 9, 1) + FIELD(CIER, PLLSAI2RDYIE, 7, 1) + FIELD(CIER, PLLSAI1RDYIE, 6, 1) + FIELD(CIER, PLLRDYIE, 5, 1) + FIELD(CIER, HSERDYIE, 4, 1) + FIELD(CIER, HSIRDYIE, 3, 1) + FIELD(CIER, MSIRDYIE, 2, 1) + FIELD(CIER, LSERDYIE, 1, 1) + FIELD(CIER, LSIRDYIE, 0, 1) +REG32(CIFR, 0x1C) + /* HSI48RDYF: only on STM32L496xx/4A6xx devices */ + FIELD(CIFR, LSECSSF, 9, 1) + FIELD(CIFR, CSSF, 8, 1) + FIELD(CIFR, PLLSAI2RDYF, 7, 1) + FIELD(CIFR, PLLSAI1RDYF, 6, 1) + FIELD(CIFR, PLLRDYF, 5, 1) + FIELD(CIFR, HSERDYF, 4, 1) + FIELD(CIFR, HSIRDYF, 3, 1) + FIELD(CIFR, MSIRDYF, 2, 1) + FIELD(CIFR, LSERDYF, 1, 1) + FIELD(CIFR, LSIRDYF, 0, 1) +REG32(CICR, 0x20) + /* HSI48RDYC: only on STM32L496xx/4A6xx devices */ + FIELD(CICR, LSECSSC, 9, 1) + FIELD(CICR, CSSC, 8, 1) + FIELD(CICR, PLLSAI2RDYC, 7, 1) + FIELD(CICR, PLLSAI1RDYC, 6, 1) + FIELD(CICR, PLLRDYC, 5, 1) + FIELD(CICR, HSERDYC, 4, 1) + FIELD(CICR, HSIRDYC, 3, 1) + FIELD(CICR, MSIRDYC, 2, 1) + FIELD(CICR, LSERDYC, 1, 1) + FIELD(CICR, LSIRDYC, 0, 1) +REG32(AHB1RSTR, 0x28) +REG32(AHB2RSTR, 0x2C) +REG32(AHB3RSTR, 0x30) +REG32(APB1RSTR1, 0x38) +REG32(APB1RSTR2, 0x3C) +REG32(APB2RSTR, 0x40) +REG32(AHB1ENR, 0x48) + /* DMA2DEN: reserved for STM32L475xx */ + FIELD(AHB1ENR, TSCEN, 16, 1) + FIELD(AHB1ENR, CRCEN, 12, 1) + FIELD(AHB1ENR, FLASHEN, 8, 1) + FIELD(AHB1ENR, DMA2EN, 1, 1) + FIELD(AHB1ENR, DMA1EN, 0, 1) +REG32(AHB2ENR, 0x4C) + FIELD(AHB2ENR, RNGEN, 18, 1) + /* HASHEN: reserved for STM32L475xx */ + FIELD(AHB2ENR, AESEN, 16, 1) + /* DCMIEN: reserved for STM32L475xx */ + FIELD(AHB2ENR, ADCEN, 13, 1) + FIELD(AHB2ENR, OTGFSEN, 12, 1) + /* GPIOIEN: reserved for STM32L475xx */ + FIELD(AHB2ENR, GPIOHEN, 7, 1) + FIELD(AHB2ENR, GPIOGEN, 6, 1) + FIELD(AHB2ENR, GPIOFEN, 5, 1) + FIELD(AHB2ENR, GPIOEEN, 4, 1) + FIELD(AHB2ENR, GPIODEN, 3, 1) + FIELD(AHB2ENR, GPIOCEN, 2, 1) + FIELD(AHB2ENR, GPIOBEN, 1, 1) + FIELD(AHB2ENR, GPIOAEN, 0, 1) +REG32(AHB3ENR, 0x50) + FIELD(AHB3ENR, QSPIEN, 8, 1) + FIELD(AHB3ENR, FMCEN, 0, 1) +REG32(APB1ENR1, 0x58) + FIELD(APB1ENR1, LPTIM1EN, 31, 1) + FIELD(APB1ENR1, OPAMPEN, 30, 1) + FIELD(APB1ENR1, DAC1EN, 29, 1) + FIELD(APB1ENR1, PWREN, 28, 1) + FIELD(APB1ENR1, CAN2EN, 26, 1) + FIELD(APB1ENR1, CAN1EN, 25, 1) + /* CRSEN: reserved for STM32L475xx */ + FIELD(APB1ENR1, I2C3EN, 23, 1) + FIELD(APB1ENR1, I2C2EN, 22, 1) + FIELD(APB1ENR1, I2C1EN, 21, 1) + FIELD(APB1ENR1, UART5EN, 20, 1) + FIELD(APB1ENR1, UART4EN, 19, 1) + FIELD(APB1ENR1, USART3EN, 18, 1) + FIELD(APB1ENR1, USART2EN, 17, 1) + FIELD(APB1ENR1, SPI3EN, 15, 1) + FIELD(APB1ENR1, SPI2EN, 14, 1) + FIELD(APB1ENR1, WWDGEN, 11, 1) + /* RTCAPBEN: reserved for STM32L475xx */ + FIELD(APB1ENR1, LCDEN, 9, 1) + FIELD(APB1ENR1, TIM7EN, 5, 1) + FIELD(APB1ENR1, TIM6EN, 4, 1) + FIELD(APB1ENR1, TIM5EN, 3, 1) + FIELD(APB1ENR1, TIM4EN, 2, 1) + FIELD(APB1ENR1, TIM3EN, 1, 1) + FIELD(APB1ENR1, TIM2EN, 0, 1) +REG32(APB1ENR2, 0x5C) + FIELD(APB1ENR2, LPTIM2EN, 5, 1) + FIELD(APB1ENR2, SWPMI1EN, 2, 1) + /* I2C4EN: reserved for STM32L475xx */ + FIELD(APB1ENR2, LPUART1EN, 0, 1) +REG32(APB2ENR, 0x60) + FIELD(APB2ENR, DFSDM1EN, 24, 1) + FIELD(APB2ENR, SAI2EN, 22, 1) + FIELD(APB2ENR, SAI1EN, 21, 1) + FIELD(APB2ENR, TIM17EN, 18, 1) + FIELD(APB2ENR, TIM16EN, 17, 1) + FIELD(APB2ENR, TIM15EN, 16, 1) + FIELD(APB2ENR, USART1EN, 14, 1) + FIELD(APB2ENR, TIM8EN, 13, 1) + FIELD(APB2ENR, SPI1EN, 12, 1) + FIELD(APB2ENR, TIM1EN, 11, 1) + FIELD(APB2ENR, SDMMC1EN, 10, 1) + FIELD(APB2ENR, FWEN, 7, 1) + FIELD(APB2ENR, SYSCFGEN, 0, 1) +REG32(AHB1SMENR, 0x68) +REG32(AHB2SMENR, 0x6C) +REG32(AHB3SMENR, 0x70) +REG32(APB1SMENR1, 0x78) +REG32(APB1SMENR2, 0x7C) +REG32(APB2SMENR, 0x80) +REG32(CCIPR, 0x88) + FIELD(CCIPR, DFSDM1SEL, 31, 1) + FIELD(CCIPR, SWPMI1SEL, 30, 1) + FIELD(CCIPR, ADCSEL, 28, 2) + FIELD(CCIPR, CLK48SEL, 26, 2) + FIELD(CCIPR, SAI2SEL, 24, 2) + FIELD(CCIPR, SAI1SEL, 22, 2) + FIELD(CCIPR, LPTIM2SEL, 20, 2) + FIELD(CCIPR, LPTIM1SEL, 18, 2) + FIELD(CCIPR, I2C3SEL, 16, 2) + FIELD(CCIPR, I2C2SEL, 14, 2) + FIELD(CCIPR, I2C1SEL, 12, 2) + FIELD(CCIPR, LPUART1SEL, 10, 2) + FIELD(CCIPR, UART5SEL, 8, 2) + FIELD(CCIPR, UART4SEL, 6, 2) + FIELD(CCIPR, USART3SEL, 4, 2) + FIELD(CCIPR, USART2SEL, 2, 2) + FIELD(CCIPR, USART1SEL, 0, 2) +REG32(BDCR, 0x90) + FIELD(BDCR, LSCOSEL, 25, 1) + FIELD(BDCR, LSCOEN, 24, 1) + FIELD(BDCR, BDRST, 16, 1) + FIELD(BDCR, RTCEN, 15, 1) + FIELD(BDCR, RTCSEL, 8, 2) + FIELD(BDCR, LSECSSD, 6, 1) + FIELD(BDCR, LSECSSON, 5, 1) + FIELD(BDCR, LSEDRV, 3, 2) + FIELD(BDCR, LSEBYP, 2, 1) + FIELD(BDCR, LSERDY, 1, 1) + FIELD(BDCR, LSEON, 0, 1) +REG32(CSR, 0x94) + FIELD(CSR, LPWRRSTF, 31, 1) + FIELD(CSR, WWDGRSTF, 30, 1) + FIELD(CSR, IWWGRSTF, 29, 1) + FIELD(CSR, SFTRSTF, 28, 1) + FIELD(CSR, BORRSTF, 27, 1) + FIELD(CSR, PINRSTF, 26, 1) + FIELD(CSR, OBLRSTF, 25, 1) + FIELD(CSR, FWRSTF, 24, 1) + FIELD(CSR, RMVF, 23, 1) + FIELD(CSR, MSISRANGE, 8, 4) + FIELD(CSR, LSIRDY, 1, 1) + FIELD(CSR, LSION, 0, 1) +/* CRRCR and CCIPR2 registers are present on L496/L4A6 devices only. */ + +/* Read Only masks to prevent writes in unauthorized bits */ +#define CR_READ_ONLY_MASK (R_CR_PLLSAI2RDY_MASK | \ + R_CR_PLLSAI1RDY_MASK | \ + R_CR_PLLRDY_MASK | \ + R_CR_HSERDY_MASK | \ + R_CR_HSIRDY_MASK | \ + R_CR_MSIRDY_MASK) +#define CR_READ_SET_MASK (R_CR_CSSON_MASK | R_CR_MSIRGSEL_MASK) +#define ICSCR_READ_ONLY_MASK (R_ICSCR_HSICAL_MASK | R_ICSCR_MSICAL_MASK) +#define CFGR_READ_ONLY_MASK (R_CFGR_SWS_MASK) +#define CIFR_READ_ONLY_MASK (R_CIFR_LSECSSF_MASK | \ + R_CIFR_CSSF_MASK | \ + R_CIFR_PLLSAI2RDYF_MASK | \ + R_CIFR_PLLSAI1RDYF_MASK | \ + R_CIFR_PLLRDYF_MASK | \ + R_CIFR_HSERDYF_MASK | \ + R_CIFR_HSIRDYF_MASK | \ + R_CIFR_MSIRDYF_MASK | \ + R_CIFR_LSERDYF_MASK | \ + R_CIFR_LSIRDYF_MASK) +#define CIFR_IRQ_MASK CIFR_READ_ONLY_MASK +#define APB2ENR_READ_SET_MASK (R_APB2ENR_FWEN_MASK) +#define BDCR_READ_ONLY_MASK (R_BDCR_LSECSSD_MASK | R_BDCR_LSERDY_MASK) +#define CSR_READ_ONLY_MASK (R_CSR_LPWRRSTF_MASK | \ + R_CSR_WWDGRSTF_MASK | \ + R_CSR_IWWGRSTF_MASK | \ + R_CSR_SFTRSTF_MASK | \ + R_CSR_BORRSTF_MASK | \ + R_CSR_PINRSTF_MASK | \ + R_CSR_OBLRSTF_MASK | \ + R_CSR_FWRSTF_MASK | \ + R_CSR_LSIRDY_MASK) + +/* Pll Channels */ +enum PllChannels { + RCC_PLL_CHANNEL_PLLSAI3CLK = 0, + RCC_PLL_CHANNEL_PLL48M1CLK = 1, + RCC_PLL_CHANNEL_PLLCLK = 2, +}; + +enum PllSai1Channels { + RCC_PLLSAI1_CHANNEL_PLLSAI1CLK = 0, + RCC_PLLSAI1_CHANNEL_PLL48M2CLK = 1, + RCC_PLLSAI1_CHANNEL_PLLADC1CLK = 2, +}; + +enum PllSai2Channels { + RCC_PLLSAI2_CHANNEL_PLLSAI2CLK = 0, + /* No Q channel */ + RCC_PLLSAI2_CHANNEL_PLLADC2CLK = 2, +}; + +typedef enum RccClockMuxSource { + RCC_CLOCK_MUX_SRC_GND = 0, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_HSE, + RCC_CLOCK_MUX_SRC_MSI, + RCC_CLOCK_MUX_SRC_LSI, + RCC_CLOCK_MUX_SRC_LSE, + RCC_CLOCK_MUX_SRC_SAI1_EXTCLK, + RCC_CLOCK_MUX_SRC_SAI2_EXTCLK, + RCC_CLOCK_MUX_SRC_PLL, + RCC_CLOCK_MUX_SRC_PLLSAI1, + RCC_CLOCK_MUX_SRC_PLLSAI2, + RCC_CLOCK_MUX_SRC_PLLSAI3, + RCC_CLOCK_MUX_SRC_PLL48M1, + RCC_CLOCK_MUX_SRC_PLL48M2, + RCC_CLOCK_MUX_SRC_PLLADC1, + RCC_CLOCK_MUX_SRC_PLLADC2, + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_HCLK, + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_PCLK2, + RCC_CLOCK_MUX_SRC_HSE_OVER_32, + RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON, + + RCC_CLOCK_MUX_SRC_NUMBER, +} RccClockMuxSource; + +/* PLL init info */ +typedef struct PllInitInfo { + const char *name; + + const char *channel_name[RCC_NUM_CHANNEL_PLL_OUT]; + bool channel_exists[RCC_NUM_CHANNEL_PLL_OUT]; + uint32_t default_channel_divider[RCC_NUM_CHANNEL_PLL_OUT]; + + RccClockMuxSource src_mapping[RCC_NUM_CLOCK_MUX_SRC]; +} PllInitInfo; + +static const PllInitInfo PLL_INIT_INFO[] = { + [RCC_PLL_PLL] = { + .name = "pll", + .channel_name = { + "pllsai3clk", + "pll48m1clk", + "pllclk" + }, + .channel_exists = { + true, true, true + }, + /* From PLLCFGR register documentation */ + .default_channel_divider = { + 7, 2, 2 + } + }, + [RCC_PLL_PLLSAI1] = { + .name = "pllsai1", + .channel_name = { + "pllsai1clk", + "pll48m2clk", + "plladc1clk" + }, + .channel_exists = { + true, true, true + }, + /* From PLLSAI1CFGR register documentation */ + .default_channel_divider = { + 7, 2, 2 + } + }, + [RCC_PLL_PLLSAI2] = { + .name = "pllsai2", + .channel_name = { + "pllsai2clk", + NULL, + "plladc2clk" + }, + .channel_exists = { + true, false, true + }, + /* From PLLSAI2CFGR register documentation */ + .default_channel_divider = { + 7, 0, 2 + } + } +}; + +static inline void set_pll_init_info(RccPllState *pll, + RccPll id) +{ + int i; + + pll->id = id; + pll->vco_multiplier = 1; + for (i = 0; i < RCC_NUM_CHANNEL_PLL_OUT; i++) { + pll->channel_enabled[i] = false; + pll->channel_exists[i] = PLL_INIT_INFO[id].channel_exists[i]; + pll->channel_divider[i] = PLL_INIT_INFO[id].default_channel_divider[i]; + } +} + +/* Clock mux init info */ +typedef struct ClockMuxInitInfo { + const char *name; + + uint32_t multiplier; + uint32_t divider; + bool enabled; + /* If this is true, the clock will not be exposed outside of the device */ + bool hidden; + + RccClockMuxSource src_mapping[RCC_NUM_CLOCK_MUX_SRC]; +} ClockMuxInitInfo; + +#define FILL_DEFAULT_FACTOR \ + .multiplier = 1, \ + .divider = 1 + +#define FILL_DEFAULT_INIT_ENABLED \ + FILL_DEFAULT_FACTOR, \ + .enabled = true + +#define FILL_DEFAULT_INIT_DISABLED \ + FILL_DEFAULT_FACTOR, \ + .enabled = false + + +static const ClockMuxInitInfo CLOCK_MUX_INIT_INFO[] = { + [RCC_CLOCK_MUX_SYSCLK] = { + .name = "sysclk", + /* Same mapping as: CFGR_SW */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_MSI, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_HSE, + RCC_CLOCK_MUX_SRC_PLL, + }, + .hidden = true, + FILL_DEFAULT_INIT_ENABLED, + }, + [RCC_CLOCK_MUX_PLL_INPUT] = { + .name = "pll-input", + /* Same mapping as: PLLCFGR_PLLSRC */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_MSI, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_HSE, + }, + .hidden = true, + FILL_DEFAULT_INIT_ENABLED, + }, + [RCC_CLOCK_MUX_HCLK] = { + .name = "hclk", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + .hidden = true, + FILL_DEFAULT_INIT_ENABLED, + }, + [RCC_CLOCK_MUX_PCLK1] = { + .name = "pclk1", + .src_mapping = { + RCC_CLOCK_MUX_SRC_HCLK, + }, + .hidden = true, + FILL_DEFAULT_INIT_ENABLED, + }, + [RCC_CLOCK_MUX_PCLK2] = { + .name = "pclk2", + .src_mapping = { + RCC_CLOCK_MUX_SRC_HCLK, + }, + .hidden = true, + FILL_DEFAULT_INIT_ENABLED, + }, + [RCC_CLOCK_MUX_HSE_OVER_32] = { + .name = "hse-divided-by-32", + .multiplier = 1, + .divider = 32, + .enabled = true, + .src_mapping = { + RCC_CLOCK_MUX_SRC_HSE, + }, + .hidden = true, + }, + [RCC_CLOCK_MUX_LCD_AND_RTC_COMMON] = { + .name = "lcd-and-rtc-common-mux", + /* Same mapping as: BDCR_RTCSEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_GND, + RCC_CLOCK_MUX_SRC_LSE, + RCC_CLOCK_MUX_SRC_LSI, + RCC_CLOCK_MUX_SRC_HSE_OVER_32, + }, + .hidden = true, + FILL_DEFAULT_INIT_ENABLED, + }, + /* From now on, muxes with a publicly available output */ + [RCC_CLOCK_MUX_CORTEX_REFCLK] = { + .name = "cortex-refclk", + .multiplier = 1, + /* REFCLK is always HCLK/8 */ + .divider = 8, + .enabled = true, + .src_mapping = { + RCC_CLOCK_MUX_SRC_HCLK, + } + }, + [RCC_CLOCK_MUX_USART1] = { + .name = "usart1", + /* Same mapping as: CCIPR_USART1SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_LSE, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_USART2] = { + .name = "usart2", + /* Same mapping as: CCIPR_USART2SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_LSE, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_USART3] = { + .name = "usart3", + /* Same mapping as: CCIPR_USART3SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_LSE, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_UART4] = { + .name = "uart4", + /* Same mapping as: CCIPR_UART4SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_LSE, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_UART5] = { + .name = "uart5", + /* Same mapping as: CCIPR_UART5SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_LSE, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_LPUART1] = { + .name = "lpuart1", + /* Same mapping as: CCIPR_LPUART1SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_LSE, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_I2C1] = { + .name = "i2c1", + /* Same mapping as: CCIPR_I2C1SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_HSI, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_I2C2] = { + .name = "i2c2", + /* Same mapping as: CCIPR_I2C2SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_HSI, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_I2C3] = { + .name = "i2c3", + /* Same mapping as: CCIPR_I2C3SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_HSI, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_LPTIM1] = { + .name = "lptim1", + /* Same mapping as: CCIPR_LPTIM1SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_LSI, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_LSE, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_LPTIM2] = { + .name = "lptim2", + /* Same mapping as: CCIPR_LPTIM2SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_LSI, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_LSE, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_SWPMI1] = { + .name = "swpmi1", + /* Same mapping as: CCIPR_SWPMI1SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + RCC_CLOCK_MUX_SRC_HSI, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_MCO] = { + .name = "mco", + /* Same mapping as: CFGR_MCOSEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + RCC_CLOCK_MUX_SRC_MSI, + RCC_CLOCK_MUX_SRC_HSI, + RCC_CLOCK_MUX_SRC_HSE, + RCC_CLOCK_MUX_SRC_PLL, + RCC_CLOCK_MUX_SRC_LSI, + RCC_CLOCK_MUX_SRC_LSE, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_LSCO] = { + .name = "lsco", + /* Same mapping as: BDCR_LSCOSEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_LSI, + RCC_CLOCK_MUX_SRC_LSE, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_DFSDM1] = { + .name = "dfsdm1", + /* Same mapping as: CCIPR_DFSDM1SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_ADC] = { + .name = "adc", + /* Same mapping as: CCIPR_ADCSEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_GND, + RCC_CLOCK_MUX_SRC_PLLADC1, + RCC_CLOCK_MUX_SRC_PLLADC2, + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_CLK48] = { + .name = "clk48", + /* Same mapping as: CCIPR_CLK48SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_GND, + RCC_CLOCK_MUX_SRC_PLL48M2, + RCC_CLOCK_MUX_SRC_PLL48M1, + RCC_CLOCK_MUX_SRC_MSI, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_SAI2] = { + .name = "sai2", + /* Same mapping as: CCIPR_SAI2SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PLLSAI1, + RCC_CLOCK_MUX_SRC_PLLSAI2, + RCC_CLOCK_MUX_SRC_PLLSAI3, + RCC_CLOCK_MUX_SRC_SAI2_EXTCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_SAI1] = { + .name = "sai1", + /* Same mapping as: CCIPR_SAI1SEL */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_PLLSAI1, + RCC_CLOCK_MUX_SRC_PLLSAI2, + RCC_CLOCK_MUX_SRC_PLLSAI3, + RCC_CLOCK_MUX_SRC_SAI1_EXTCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + /* From now on, these muxes only have one valid source */ + [RCC_CLOCK_MUX_TSC] = { + .name = "tsc", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_CRC] = { + .name = "crc", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_FLASH] = { + .name = "flash", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_DMA2] = { + .name = "dma2", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_DMA1] = { + .name = "dma1", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_RNG] = { + .name = "rng", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_AES] = { + .name = "aes", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_OTGFS] = { + .name = "otgfs", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_GPIOA] = { + .name = "gpioa", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_GPIOB] = { + .name = "gpiob", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_GPIOC] = { + .name = "gpioc", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_GPIOD] = { + .name = "gpiod", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_GPIOE] = { + .name = "gpioe", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_GPIOF] = { + .name = "gpiof", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_GPIOG] = { + .name = "gpiog", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_GPIOH] = { + .name = "gpioh", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_QSPI] = { + .name = "qspi", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_FMC] = { + .name = "fmc", + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_OPAMP] = { + .name = "opamp", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_DAC1] = { + .name = "dac1", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_PWR] = { + .name = "pwr", + /* + * PWREN is in the APB1ENR1 register, + * but PWR uses SYSCLK according to the clock tree. + */ + .src_mapping = { + RCC_CLOCK_MUX_SRC_SYSCLK, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_CAN1] = { + .name = "can1", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_SPI3] = { + .name = "spi3", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_SPI2] = { + .name = "spi2", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_WWDG] = { + .name = "wwdg", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_LCD] = { + .name = "lcd", + .src_mapping = { + RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM7] = { + .name = "tim7", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM6] = { + .name = "tim6", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM5] = { + .name = "tim5", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM4] = { + .name = "tim4", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM3] = { + .name = "tim3", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM2] = { + .name = "tim2", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK1, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM17] = { + .name = "tim17", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM16] = { + .name = "tim16", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM15] = { + .name = "tim15", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM8] = { + .name = "tim8", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_SPI1] = { + .name = "spi1", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_TIM1] = { + .name = "tim1", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_SDMMC1] = { + .name = "sdmmc1", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_FW] = { + .name = "fw", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_SYSCFG] = { + .name = "syscfg", + .src_mapping = { + RCC_CLOCK_MUX_SRC_PCLK2, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_RTC] = { + .name = "rtc", + .src_mapping = { + RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON, + }, + FILL_DEFAULT_INIT_DISABLED, + }, + [RCC_CLOCK_MUX_CORTEX_FCLK] = { + .name = "cortex-fclk", + .src_mapping = { + RCC_CLOCK_MUX_SRC_HCLK, + }, + FILL_DEFAULT_INIT_ENABLED, + }, +}; + +static inline void set_clock_mux_init_info(RccClockMuxState *mux, + RccClockMux id) +{ + mux->id = id; + mux->multiplier = CLOCK_MUX_INIT_INFO[id].multiplier; + mux->divider = CLOCK_MUX_INIT_INFO[id].divider; + mux->enabled = CLOCK_MUX_INIT_INFO[id].enabled; + /* + * Every peripheral has the first source of their source list as + * as their default source. + */ + mux->src = 0; +} + +#endif /* HW_STM32L4X5_RCC_INTERNALS_H */ diff --git a/include/hw/misc/stm32l4x5_syscfg.h b/include/hw/misc/stm32l4x5_syscfg.h new file mode 100644 index 00000000000..23bb5641507 --- /dev/null +++ b/include/hw/misc/stm32l4x5_syscfg.h @@ -0,0 +1,53 @@ +/* + * STM32L4x5 SYSCFG (System Configuration Controller) + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This work is based on the stm32f4xx_syscfg by Alistair Francis. + * Original code is licensed under the MIT License: + * + * Copyright (c) 2014 Alistair Francis + */ + +/* + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html + */ + +#ifndef HW_STM32L4X5_SYSCFG_H +#define HW_STM32L4X5_SYSCFG_H + +#include "hw/sysbus.h" +#include "qom/object.h" +#include "hw/gpio/stm32l4x5_gpio.h" + +#define TYPE_STM32L4X5_SYSCFG "stm32l4x5-syscfg" +OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5SyscfgState, STM32L4X5_SYSCFG) + +#define SYSCFG_NUM_EXTICR 4 + +struct Stm32l4x5SyscfgState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + uint32_t memrmp; + uint32_t cfgr1; + uint32_t exticr[SYSCFG_NUM_EXTICR]; + uint32_t scsr; + uint32_t cfgr2; + uint32_t swpr; + uint32_t skr; + uint32_t swpr2; + + qemu_irq gpio_out[GPIO_NUM_PINS]; +}; + +#endif diff --git a/include/hw/misc/xlnx-versal-cframe-reg.h b/include/hw/misc/xlnx-versal-cframe-reg.h index 0091505246f..83f6a077446 100644 --- a/include/hw/misc/xlnx-versal-cframe-reg.h +++ b/include/hw/misc/xlnx-versal-cframe-reg.h @@ -23,7 +23,7 @@ #include "hw/misc/xlnx-versal-cfu.h" #include "qemu/fifo32.h" -#define TYPE_XLNX_VERSAL_CFRAME_REG "xlnx,cframe-reg" +#define TYPE_XLNX_VERSAL_CFRAME_REG "xlnx-cframe-reg" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameReg, XLNX_VERSAL_CFRAME_REG) #define TYPE_XLNX_VERSAL_CFRAME_BCAST_REG "xlnx.cframe-bcast-reg" diff --git a/include/hw/misc/xlnx-versal-cfu.h b/include/hw/misc/xlnx-versal-cfu.h index be62bab8c8c..3de3ee49231 100644 --- a/include/hw/misc/xlnx-versal-cfu.h +++ b/include/hw/misc/xlnx-versal-cfu.h @@ -22,13 +22,13 @@ #include "hw/misc/xlnx-cfi-if.h" #include "qemu/fifo32.h" -#define TYPE_XLNX_VERSAL_CFU_APB "xlnx,versal-cfu-apb" +#define TYPE_XLNX_VERSAL_CFU_APB "xlnx-versal-cfu-apb" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUAPB, XLNX_VERSAL_CFU_APB) -#define TYPE_XLNX_VERSAL_CFU_FDRO "xlnx,versal-cfu-fdro" +#define TYPE_XLNX_VERSAL_CFU_FDRO "xlnx-versal-cfu-fdro" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUFDRO, XLNX_VERSAL_CFU_FDRO) -#define TYPE_XLNX_VERSAL_CFU_SFR "xlnx,versal-cfu-sfr" +#define TYPE_XLNX_VERSAL_CFU_SFR "xlnx-versal-cfu-sfr" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUSFR, XLNX_VERSAL_CFU_SFR) REG32(CFU_ISR, 0x0) diff --git a/include/hw/misc/xlnx-versal-crl.h b/include/hw/misc/xlnx-versal-crl.h index 2857f4169a5..dba6d3585d1 100644 --- a/include/hw/misc/xlnx-versal-crl.h +++ b/include/hw/misc/xlnx-versal-crl.h @@ -11,9 +11,9 @@ #include "hw/sysbus.h" #include "hw/register.h" -#include "target/arm/cpu.h" +#include "target/arm/cpu-qom.h" -#define TYPE_XLNX_VERSAL_CRL "xlnx,versal-crl" +#define TYPE_XLNX_VERSAL_CRL "xlnx-versal-crl" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCRL, XLNX_VERSAL_CRL) REG32(ERR_CTRL, 0x0) diff --git a/include/hw/net/lan9118.h b/include/hw/net/lan9118.h index 3d0c67f3393..4bf9da7a63e 100644 --- a/include/hw/net/lan9118.h +++ b/include/hw/net/lan9118.h @@ -15,6 +15,6 @@ #define TYPE_LAN9118 "lan9118" -void lan9118_init(NICInfo *, uint32_t, qemu_irq); +void lan9118_init(uint32_t, qemu_irq); #endif diff --git a/include/hw/net/lasi_82596.h b/include/hw/net/lasi_82596.h index 3ef2f47ba20..439356ec19b 100644 --- a/include/hw/net/lasi_82596.h +++ b/include/hw/net/lasi_82596.h @@ -25,7 +25,7 @@ struct SysBusI82596State { int val_index:1; }; -SysBusI82596State *lasi_82596_init(MemoryRegion *addr_space, - hwaddr hpa, qemu_irq irq); +SysBusI82596State *lasi_82596_init(MemoryRegion *addr_space, hwaddr hpa, + qemu_irq irq, gboolean match_default); #endif diff --git a/include/hw/net/ne2000-isa.h b/include/hw/net/ne2000-isa.h index af59ee0b028..73bae10ad1a 100644 --- a/include/hw/net/ne2000-isa.h +++ b/include/hw/net/ne2000-isa.h @@ -22,8 +22,6 @@ static inline ISADevice *isa_ne2000_init(ISABus *bus, int base, int irq, { ISADevice *d; - qemu_check_nic_model(nd, "ne2k_isa"); - d = isa_try_new(TYPE_ISA_NE2000); if (d) { DeviceState *dev = DEVICE(d); diff --git a/include/hw/net/npcm_gmac.h b/include/hw/net/npcm_gmac.h new file mode 100644 index 00000000000..6340ffe92ca --- /dev/null +++ b/include/hw/net/npcm_gmac.h @@ -0,0 +1,343 @@ +/* + * Nuvoton NPCM7xx/8xx GMAC Module + * + * Copyright 2024 Google LLC + * Authors: + * Hao Wu + * Nabih Estefan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef NPCM_GMAC_H +#define NPCM_GMAC_H + +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "net/net.h" + +#define NPCM_GMAC_NR_REGS (0x1060 / sizeof(uint32_t)) + +#define NPCM_GMAC_MAX_PHYS 32 +#define NPCM_GMAC_MAX_PHY_REGS 32 + +struct NPCMGMACRxDesc { + uint32_t rdes0; + uint32_t rdes1; + uint32_t rdes2; + uint32_t rdes3; +}; + +/* NPCMGMACRxDesc.flags values */ +/* RDES2 and RDES3 are buffer addresses */ +/* Owner: 0 = software, 1 = dma */ +#define RX_DESC_RDES0_OWN BIT(31) +/* Destination Address Filter Fail */ +#define RX_DESC_RDES0_DEST_ADDR_FILT_FAIL BIT(30) +/* Frame length */ +#define RX_DESC_RDES0_FRAME_LEN_MASK(word) extract32(word, 16, 14) +/* Frame length Shift*/ +#define RX_DESC_RDES0_FRAME_LEN_SHIFT 16 +/* Error Summary */ +#define RX_DESC_RDES0_ERR_SUMM_MASK BIT(15) +/* Descriptor Error */ +#define RX_DESC_RDES0_DESC_ERR_MASK BIT(14) +/* Source Address Filter Fail */ +#define RX_DESC_RDES0_SRC_ADDR_FILT_FAIL_MASK BIT(13) +/* Length Error */ +#define RX_DESC_RDES0_LEN_ERR_MASK BIT(12) +/* Overflow Error */ +#define RX_DESC_RDES0_OVRFLW_ERR_MASK BIT(11) +/* VLAN Tag */ +#define RX_DESC_RDES0_VLAN_TAG_MASK BIT(10) +/* First Descriptor */ +#define RX_DESC_RDES0_FIRST_DESC_MASK BIT(9) +/* Last Descriptor */ +#define RX_DESC_RDES0_LAST_DESC_MASK BIT(8) +/* IPC Checksum Error/Giant Frame */ +#define RX_DESC_RDES0_IPC_CHKSM_ERR_GNT_FRM_MASK BIT(7) +/* Late Collision */ +#define RX_DESC_RDES0_LT_COLL_MASK BIT(6) +/* Frame Type */ +#define RX_DESC_RDES0_FRM_TYPE_MASK BIT(5) +/* Receive Watchdog Timeout */ +#define RX_DESC_RDES0_REC_WTCHDG_TMT_MASK BIT(4) +/* Receive Error */ +#define RX_DESC_RDES0_RCV_ERR_MASK BIT(3) +/* Dribble Bit Error */ +#define RX_DESC_RDES0_DRBL_BIT_ERR_MASK BIT(2) +/* Cyclcic Redundancy Check Error */ +#define RX_DESC_RDES0_CRC_ERR_MASK BIT(1) +/* Rx MAC Address/Payload Checksum Error */ +#define RC_DESC_RDES0_RCE_MASK BIT(0) + +/* Disable Interrupt on Completion */ +#define RX_DESC_RDES1_DIS_INTR_COMP_MASK BIT(31) +/* Receive end of ring */ +#define RX_DESC_RDES1_RC_END_RING_MASK BIT(25) +/* Second Address Chained */ +#define RX_DESC_RDES1_SEC_ADDR_CHND_MASK BIT(24) +/* Receive Buffer 2 Size */ +#define RX_DESC_RDES1_BFFR2_SZ_SHIFT 11 +#define RX_DESC_RDES1_BFFR2_SZ_MASK(word) extract32(word, \ + RX_DESC_RDES1_BFFR2_SZ_SHIFT, 11) +/* Receive Buffer 1 Size */ +#define RX_DESC_RDES1_BFFR1_SZ_MASK(word) extract32(word, 0, 11) + + +struct NPCMGMACTxDesc { + uint32_t tdes0; + uint32_t tdes1; + uint32_t tdes2; + uint32_t tdes3; +}; + +/* NPCMGMACTxDesc.flags values */ +/* TDES2 and TDES3 are buffer addresses */ +/* Owner: 0 = software, 1 = gmac */ +#define TX_DESC_TDES0_OWN BIT(31) +/* Tx Time Stamp Status */ +#define TX_DESC_TDES0_TTSS_MASK BIT(17) +/* IP Header Error */ +#define TX_DESC_TDES0_IP_HEAD_ERR_MASK BIT(16) +/* Error Summary */ +#define TX_DESC_TDES0_ERR_SUMM_MASK BIT(15) +/* Jabber Timeout */ +#define TX_DESC_TDES0_JBBR_TMT_MASK BIT(14) +/* Frame Flushed */ +#define TX_DESC_TDES0_FRM_FLSHD_MASK BIT(13) +/* Payload Checksum Error */ +#define TX_DESC_TDES0_PYLD_CHKSM_ERR_MASK BIT(12) +/* Loss of Carrier */ +#define TX_DESC_TDES0_LSS_CARR_MASK BIT(11) +/* No Carrier */ +#define TX_DESC_TDES0_NO_CARR_MASK BIT(10) +/* Late Collision */ +#define TX_DESC_TDES0_LATE_COLL_MASK BIT(9) +/* Excessive Collision */ +#define TX_DESC_TDES0_EXCS_COLL_MASK BIT(8) +/* VLAN Frame */ +#define TX_DESC_TDES0_VLAN_FRM_MASK BIT(7) +/* Collision Count */ +#define TX_DESC_TDES0_COLL_CNT_MASK(word) extract32(word, 3, 4) +/* Excessive Deferral */ +#define TX_DESC_TDES0_EXCS_DEF_MASK BIT(2) +/* Underflow Error */ +#define TX_DESC_TDES0_UNDRFLW_ERR_MASK BIT(1) +/* Deferred Bit */ +#define TX_DESC_TDES0_DFRD_BIT_MASK BIT(0) + +/* Interrupt of Completion */ +#define TX_DESC_TDES1_INTERR_COMP_MASK BIT(31) +/* Last Segment */ +#define TX_DESC_TDES1_LAST_SEG_MASK BIT(30) +/* First Segment */ +#define TX_DESC_TDES1_FIRST_SEG_MASK BIT(29) +/* Checksum Insertion Control */ +#define TX_DESC_TDES1_CHKSM_INS_CTRL_MASK(word) extract32(word, 27, 2) +/* Disable Cyclic Redundancy Check */ +#define TX_DESC_TDES1_DIS_CDC_MASK BIT(26) +/* Transmit End of Ring */ +#define TX_DESC_TDES1_TX_END_RING_MASK BIT(25) +/* Secondary Address Chained */ +#define TX_DESC_TDES1_SEC_ADDR_CHND_MASK BIT(24) +/* Transmit Buffer 2 Size */ +#define TX_DESC_TDES1_BFFR2_SZ_MASK(word) extract32(word, 11, 11) +/* Transmit Buffer 1 Size */ +#define TX_DESC_TDES1_BFFR1_SZ_MASK(word) extract32(word, 0, 11) + +typedef struct NPCMGMACState { + SysBusDevice parent; + + MemoryRegion iomem; + qemu_irq irq; + + NICState *nic; + NICConf conf; + + uint32_t regs[NPCM_GMAC_NR_REGS]; + uint16_t phy_regs[NPCM_GMAC_MAX_PHYS][NPCM_GMAC_MAX_PHY_REGS]; +} NPCMGMACState; + +#define TYPE_NPCM_GMAC "npcm-gmac" +OBJECT_DECLARE_SIMPLE_TYPE(NPCMGMACState, NPCM_GMAC) + +/* Mask for RO bits in Status */ +#define NPCM_DMA_STATUS_RO_MASK(word) (word & 0xfffe0000) +/* Mask for RO bits in Status */ +#define NPCM_DMA_STATUS_W1C_MASK(word) (word & 0x1e7ff) + +/* Transmit Process State */ +#define NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT 20 +/* Transmit States */ +#define NPCM_DMA_STATUS_TX_STOPPED_STATE \ + (0b000) +#define NPCM_DMA_STATUS_TX_RUNNING_FETCHING_STATE \ + (0b001) +#define NPCM_DMA_STATUS_TX_RUNNING_WAITING_STATE \ + (0b010) +#define NPCM_DMA_STATUS_TX_RUNNING_READ_STATE \ + (0b011) +#define NPCM_DMA_STATUS_TX_SUSPENDED_STATE \ + (0b110) +#define NPCM_DMA_STATUS_TX_RUNNING_CLOSING_STATE \ + (0b111) +/* Transmit Process State */ +#define NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT 17 +/* Receive States */ +#define NPCM_DMA_STATUS_RX_STOPPED_STATE \ + (0b000) +#define NPCM_DMA_STATUS_RX_RUNNING_FETCHING_STATE \ + (0b001) +#define NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE \ + (0b011) +#define NPCM_DMA_STATUS_RX_SUSPENDED_STATE \ + (0b100) +#define NPCM_DMA_STATUS_RX_RUNNING_CLOSING_STATE \ + (0b101) +#define NPCM_DMA_STATUS_RX_RUNNING_TRANSFERRING_STATE \ + (0b111) + + +/* Early Receive Interrupt */ +#define NPCM_DMA_STATUS_ERI BIT(14) +/* Fatal Bus Error Interrupt */ +#define NPCM_DMA_STATUS_FBI BIT(13) +/* Early transmit Interrupt */ +#define NPCM_DMA_STATUS_ETI BIT(10) +/* Receive Watchdog Timeout */ +#define NPCM_DMA_STATUS_RWT BIT(9) +/* Receive Process Stopped */ +#define NPCM_DMA_STATUS_RPS BIT(8) +/* Receive Buffer Unavailable */ +#define NPCM_DMA_STATUS_RU BIT(7) +/* Receive Interrupt */ +#define NPCM_DMA_STATUS_RI BIT(6) +/* Transmit Underflow */ +#define NPCM_DMA_STATUS_UNF BIT(5) +/* Receive Overflow */ +#define NPCM_DMA_STATUS_OVF BIT(4) +/* Transmit Jabber Timeout */ +#define NPCM_DMA_STATUS_TJT BIT(3) +/* Transmit Buffer Unavailable */ +#define NPCM_DMA_STATUS_TU BIT(2) +/* Transmit Process Stopped */ +#define NPCM_DMA_STATUS_TPS BIT(1) +/* Transmit Interrupt */ +#define NPCM_DMA_STATUS_TI BIT(0) + +/* Normal Interrupt Summary */ +#define NPCM_DMA_STATUS_NIS BIT(16) +/* Interrupts enabled by NIE */ +#define NPCM_DMA_STATUS_NIS_BITS (NPCM_DMA_STATUS_TI | \ + NPCM_DMA_STATUS_TU | \ + NPCM_DMA_STATUS_RI | \ + NPCM_DMA_STATUS_ERI) +/* Abnormal Interrupt Summary */ +#define NPCM_DMA_STATUS_AIS BIT(15) +/* Interrupts enabled by AIE */ +#define NPCM_DMA_STATUS_AIS_BITS (NPCM_DMA_STATUS_TPS | \ + NPCM_DMA_STATUS_TJT | \ + NPCM_DMA_STATUS_OVF | \ + NPCM_DMA_STATUS_UNF | \ + NPCM_DMA_STATUS_RU | \ + NPCM_DMA_STATUS_RPS | \ + NPCM_DMA_STATUS_RWT | \ + NPCM_DMA_STATUS_ETI | \ + NPCM_DMA_STATUS_FBI) + +/* Early Receive Interrupt Enable */ +#define NPCM_DMA_INTR_ENAB_ERE BIT(14) +/* Fatal Bus Error Interrupt Enable */ +#define NPCM_DMA_INTR_ENAB_FBE BIT(13) +/* Early transmit Interrupt Enable */ +#define NPCM_DMA_INTR_ENAB_ETE BIT(10) +/* Receive Watchdog Timout Enable */ +#define NPCM_DMA_INTR_ENAB_RWE BIT(9) +/* Receive Process Stopped Enable */ +#define NPCM_DMA_INTR_ENAB_RSE BIT(8) +/* Receive Buffer Unavailable Enable */ +#define NPCM_DMA_INTR_ENAB_RUE BIT(7) +/* Receive Interrupt Enable */ +#define NPCM_DMA_INTR_ENAB_RIE BIT(6) +/* Transmit Underflow Enable */ +#define NPCM_DMA_INTR_ENAB_UNE BIT(5) +/* Receive Overflow Enable */ +#define NPCM_DMA_INTR_ENAB_OVE BIT(4) +/* Transmit Jabber Timeout Enable */ +#define NPCM_DMA_INTR_ENAB_TJE BIT(3) +/* Transmit Buffer Unavailable Enable */ +#define NPCM_DMA_INTR_ENAB_TUE BIT(2) +/* Transmit Process Stopped Enable */ +#define NPCM_DMA_INTR_ENAB_TSE BIT(1) +/* Transmit Interrupt Enable */ +#define NPCM_DMA_INTR_ENAB_TIE BIT(0) + +/* Normal Interrupt Summary Enable */ +#define NPCM_DMA_INTR_ENAB_NIE BIT(16) +/* Interrupts enabled by NIE Enable */ +#define NPCM_DMA_INTR_ENAB_NIE_BITS (NPCM_DMA_INTR_ENAB_TIE | \ + NPCM_DMA_INTR_ENAB_TUE | \ + NPCM_DMA_INTR_ENAB_RIE | \ + NPCM_DMA_INTR_ENAB_ERE) +/* Abnormal Interrupt Summary Enable */ +#define NPCM_DMA_INTR_ENAB_AIE BIT(15) +/* Interrupts enabled by AIE Enable */ +#define NPCM_DMA_INTR_ENAB_AIE_BITS (NPCM_DMA_INTR_ENAB_TSE | \ + NPCM_DMA_INTR_ENAB_TJE | \ + NPCM_DMA_INTR_ENAB_OVE | \ + NPCM_DMA_INTR_ENAB_UNE | \ + NPCM_DMA_INTR_ENAB_RUE | \ + NPCM_DMA_INTR_ENAB_RSE | \ + NPCM_DMA_INTR_ENAB_RWE | \ + NPCM_DMA_INTR_ENAB_ETE | \ + NPCM_DMA_INTR_ENAB_FBE) + +/* Flushing Disabled */ +#define NPCM_DMA_CONTROL_FLUSH_MASK BIT(24) +/* Start/stop Transmit */ +#define NPCM_DMA_CONTROL_START_STOP_TX BIT(13) +/* Start/stop Receive */ +#define NPCM_DMA_CONTROL_START_STOP_RX BIT(1) +/* Next receive descriptor start address */ +#define NPCM_DMA_HOST_RX_DESC_MASK(word) ((uint32_t) (word) & ~3u) +/* Next transmit descriptor start address */ +#define NPCM_DMA_HOST_TX_DESC_MASK(word) ((uint32_t) (word) & ~3u) + +/* Receive enable */ +#define NPCM_GMAC_MAC_CONFIG_RX_EN BIT(2) +/* Transmit enable */ +#define NPCM_GMAC_MAC_CONFIG_TX_EN BIT(3) + +/* Frame Receive All */ +#define NPCM_GMAC_FRAME_FILTER_REC_ALL_MASK BIT(31) +/* Frame HPF Filter*/ +#define NPCM_GMAC_FRAME_FILTER_HPF_MASK BIT(10) +/* Frame SAF Filter*/ +#define NPCM_GMAC_FRAME_FILTER_SAF_MASK BIT(9) +/* Frame SAIF Filter*/ +#define NPCM_GMAC_FRAME_FILTER_SAIF_MASK BIT(8) +/* Frame PCF Filter*/ +#define NPCM_GMAC_FRAME_FILTER_PCF_MASK BIT(word) extract32((word), 6, 2) +/* Frame DBF Filter*/ +#define NPCM_GMAC_FRAME_FILTER_DBF_MASK BIT(5) +/* Frame PM Filter*/ +#define NPCM_GMAC_FRAME_FILTER_PM_MASK BIT(4) +/* Frame DAIF Filter*/ +#define NPCM_GMAC_FRAME_FILTER_DAIF_MASK BIT(3) +/* Frame HMC Filter*/ +#define NPCM_GMAC_FRAME_FILTER_HMC_MASK BIT(2) +/* Frame HUC Filter*/ +#define NPCM_GMAC_FRAME_FILTER_HUC_MASK BIT(1) +/* Frame PR Filter*/ +#define NPCM_GMAC_FRAME_FILTER_PR_MASK BIT(0) + +#endif /* NPCM_GMAC_H */ diff --git a/include/hw/net/smc91c111.h b/include/hw/net/smc91c111.h index df5b11dcef2..dba32a233fb 100644 --- a/include/hw/net/smc91c111.h +++ b/include/hw/net/smc91c111.h @@ -13,6 +13,6 @@ #include "net/net.h" -void smc91c111_init(NICInfo *, uint32_t, qemu_irq); +void smc91c111_init(uint32_t, qemu_irq); #endif diff --git a/include/hw/nubus/nubus-virtio-mmio.h b/include/hw/nubus/nubus-virtio-mmio.h new file mode 100644 index 00000000000..de497b7f763 --- /dev/null +++ b/include/hw/nubus/nubus-virtio-mmio.h @@ -0,0 +1,36 @@ +/* + * QEMU Macintosh Nubus Virtio MMIO card + * + * Copyright (c) 2023 Mark Cave-Ayland + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_NUBUS_VIRTIO_MMIO_H +#define HW_NUBUS_VIRTIO_MMIO_H + +#include "hw/nubus/nubus.h" +#include "qom/object.h" +#include "hw/intc/goldfish_pic.h" +#include "hw/virtio/virtio-mmio.h" + +#define TYPE_NUBUS_VIRTIO_MMIO "nubus-virtio-mmio" +OBJECT_DECLARE_TYPE(NubusVirtioMMIO, NubusVirtioMMIODeviceClass, + NUBUS_VIRTIO_MMIO) + +struct NubusVirtioMMIODeviceClass { + DeviceClass parent_class; + + DeviceRealize parent_realize; +}; + +#define NUBUS_VIRTIO_MMIO_NUM_DEVICES 32 + +struct NubusVirtioMMIO { + NubusDevice parent_obj; + + GoldfishPICState pic; + VirtIOMMIOProxy virtio_mmio[NUBUS_VIRTIO_MMIO_NUM_DEVICES]; +}; + +#endif diff --git a/include/hw/nubus/nubus.h b/include/hw/nubus/nubus.h index b3b4d2eadb4..fee79b71d16 100644 --- a/include/hw/nubus/nubus.h +++ b/include/hw/nubus/nubus.h @@ -51,7 +51,7 @@ struct NubusBus { qemu_irq irqs[NUBUS_IRQS]; }; -#define NUBUS_DECL_ROM_MAX_SIZE (128 * KiB) +#define NUBUS_DECL_ROM_MAX_SIZE (1 * MiB) struct NubusDevice { DeviceState qdev; diff --git a/include/hw/nvram/fw_cfg_acpi.h b/include/hw/nvram/fw_cfg_acpi.h new file mode 100644 index 00000000000..b39eb0490ff --- /dev/null +++ b/include/hw/nvram/fw_cfg_acpi.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ACPI support for fw_cfg + * + */ + +#ifndef FW_CFG_ACPI_H +#define FW_CFG_ACPI_H + +#include "exec/hwaddr.h" + +void fw_cfg_acpi_dsdt_add(Aml *scope, const MemMapEntry *fw_cfg_memmap); + +#endif diff --git a/include/hw/nvram/xlnx-efuse.h b/include/hw/nvram/xlnx-efuse.h index 58414e468b5..cff7924106a 100644 --- a/include/hw/nvram/xlnx-efuse.h +++ b/include/hw/nvram/xlnx-efuse.h @@ -30,7 +30,7 @@ #include "sysemu/block-backend.h" #include "hw/qdev-core.h" -#define TYPE_XLNX_EFUSE "xlnx,efuse" +#define TYPE_XLNX_EFUSE "xlnx-efuse" OBJECT_DECLARE_SIMPLE_TYPE(XlnxEFuse, XLNX_EFUSE); struct XlnxEFuse { diff --git a/include/hw/nvram/xlnx-versal-efuse.h b/include/hw/nvram/xlnx-versal-efuse.h index a873dc5cb01..86e2261b9a3 100644 --- a/include/hw/nvram/xlnx-versal-efuse.h +++ b/include/hw/nvram/xlnx-versal-efuse.h @@ -29,8 +29,8 @@ #define XLNX_VERSAL_EFUSE_CTRL_R_MAX ((0x100 / 4) + 1) -#define TYPE_XLNX_VERSAL_EFUSE_CTRL "xlnx,versal-efuse" -#define TYPE_XLNX_VERSAL_EFUSE_CACHE "xlnx,pmc-efuse-cache" +#define TYPE_XLNX_VERSAL_EFUSE_CTRL "xlnx-versal-efuse" +#define TYPE_XLNX_VERSAL_EFUSE_CACHE "xlnx-pmc-efuse-cache" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCtrl, XLNX_VERSAL_EFUSE_CTRL); OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCache, XLNX_VERSAL_EFUSE_CACHE); diff --git a/include/hw/nvram/xlnx-zynqmp-efuse.h b/include/hw/nvram/xlnx-zynqmp-efuse.h index 6b051ec4f15..f5beacc2e6a 100644 --- a/include/hw/nvram/xlnx-zynqmp-efuse.h +++ b/include/hw/nvram/xlnx-zynqmp-efuse.h @@ -29,7 +29,7 @@ #define XLNX_ZYNQMP_EFUSE_R_MAX ((0x10fc / 4) + 1) -#define TYPE_XLNX_ZYNQMP_EFUSE "xlnx,zynqmp-efuse" +#define TYPE_XLNX_ZYNQMP_EFUSE "xlnx-zynqmp-efuse" OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPEFuse, XLNX_ZYNQMP_EFUSE); struct XlnxZynqMPEFuse { diff --git a/include/hw/pci-host/astro.h b/include/hw/pci-host/astro.h index f63fd220f30..e2966917cd9 100644 --- a/include/hw/pci-host/astro.h +++ b/include/hw/pci-host/astro.h @@ -27,6 +27,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(ElroyState, ELROY_PCI_HOST_BRIDGE) #define IOS_DIST_BASE_ADDR 0xfffee00000ULL #define IOS_DIST_BASE_SIZE 0x10000ULL +#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */ + struct AstroState; struct ElroyState { diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h index b0240bd7681..dce883573ba 100644 --- a/include/hw/pci-host/gpex.h +++ b/include/hw/pci-host/gpex.h @@ -40,6 +40,15 @@ struct GPEXRootState { /*< public >*/ }; +struct GPEXConfig { + MemMapEntry ecam; + MemMapEntry mmio32; + MemMapEntry mmio64; + MemMapEntry pio; + int irq; + PCIBus *bus; +}; + struct GPEXHost { /*< private >*/ PCIExpressHost parent_obj; @@ -55,19 +64,22 @@ struct GPEXHost { int irq_num[GPEX_NUM_IRQS]; bool allow_unmapped_accesses; -}; -struct GPEXConfig { - MemMapEntry ecam; - MemMapEntry mmio32; - MemMapEntry mmio64; - MemMapEntry pio; - int irq; - PCIBus *bus; + struct GPEXConfig gpex_cfg; }; int gpex_set_irq_num(GPEXHost *s, int index, int gsi); void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg); +void acpi_dsdt_add_gpex_host(Aml *scope, uint32_t irq); + +#define PCI_HOST_PIO_BASE "x-pio-base" +#define PCI_HOST_PIO_SIZE "x-pio-size" +#define PCI_HOST_ECAM_BASE "x-ecam-base" +#define PCI_HOST_ECAM_SIZE "x-ecam-size" +#define PCI_HOST_BELOW_4G_MMIO_BASE "x-below-4g-mmio-base" +#define PCI_HOST_BELOW_4G_MMIO_SIZE "x-below-4g-mmio-size" +#define PCI_HOST_ABOVE_4G_MMIO_BASE "x-above-4g-mmio-base" +#define PCI_HOST_ABOVE_4G_MMIO_SIZE "x-above-4g-mmio-size" #endif /* HW_GPEX_H */ diff --git a/include/hw/pci-host/ppc4xx.h b/include/hw/pci-host/ppc4xx.h new file mode 100644 index 00000000000..32396417fc7 --- /dev/null +++ b/include/hw/pci-host/ppc4xx.h @@ -0,0 +1,17 @@ +/* + * QEMU PowerPC 4xx PCI-host definitions + * + * Copyright (c) 2018-2023 BALATON Zoltan + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_PCIHOST_PPC4XX_H +#define HW_PCIHOST_PPC4XX_H + +#define TYPE_PPC4xx_HOST_BRIDGE "ppc4xx-host-bridge" +#define TYPE_PPC4xx_PCI_HOST "ppc4xx-pci-host" +#define TYPE_PPC440_PCIX_HOST "ppc440-pcix-host" +#define TYPE_PPC460EX_PCIE_HOST "ppc460ex-pcie-host" + +#endif diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index fa6313aabc4..eaa3fc99d88 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -314,10 +314,9 @@ void pci_device_set_intx_routing_notifier(PCIDevice *dev, PCIINTxRoutingNotifier notifier); void pci_device_reset(PCIDevice *dev); -PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus, - const char *default_model, - const char *default_devaddr); - +void pci_init_nic_devices(PCIBus *bus, const char *default_model); +bool pci_init_nic_in_slot(PCIBus *rootbus, const char *default_model, + const char *alias, const char *devaddr); PCIDevice *pci_vga_init(PCIBus *bus); static inline PCIBus *pci_get_bus(const PCIDevice *dev) diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h index 4972106c429..9d3b6868dce 100644 --- a/include/hw/pci/pcie_regs.h +++ b/include/hw/pci/pcie_regs.h @@ -39,6 +39,8 @@ typedef enum PCIExpLinkSpeed { QEMU_PCI_EXP_LNK_5GT, QEMU_PCI_EXP_LNK_8GT, QEMU_PCI_EXP_LNK_16GT, + QEMU_PCI_EXP_LNK_32GT, + QEMU_PCI_EXP_LNK_64GT, } PCIExpLinkSpeed; #define QEMU_PCI_EXP_LNKCAP_MLS(speed) (speed) diff --git a/include/hw/pci/pcie_sriov.h b/include/hw/pci/pcie_sriov.h index 095fb0c9edf..b77eb7bf58a 100644 --- a/include/hw/pci/pcie_sriov.h +++ b/include/hw/pci/pcie_sriov.h @@ -58,8 +58,8 @@ void pcie_sriov_pf_add_sup_pgsize(PCIDevice *dev, uint16_t opt_sup_pgsize); void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, uint32_t val, int len); -/* Reset SR/IOV VF Enable bit to unregister all VFs */ -void pcie_sriov_pf_disable_vfs(PCIDevice *dev); +/* Reset SR/IOV */ +void pcie_sriov_pf_reset(PCIDevice *dev); /* Get logical VF number of a VF - only valid for VFs */ uint16_t pcie_sriov_vf_number(PCIDevice *dev); diff --git a/include/hw/pci/shpc.h b/include/hw/pci/shpc.h index 89c7a3b7fa9..a0789df1538 100644 --- a/include/hw/pci/shpc.h +++ b/include/hw/pci/shpc.h @@ -52,7 +52,7 @@ void shpc_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, void shpc_device_unplug_request_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); -extern VMStateInfo shpc_vmstate_info; +extern const VMStateInfo shpc_vmstate_info; #define SHPC_VMSTATE(_field, _type, _test) \ VMSTATE_BUFFER_UNSAFE_INFO_TEST(_field, _type, _test, 0, \ shpc_vmstate_info, 0) diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h index 7e5fef7c433..476b1361464 100644 --- a/include/hw/ppc/pnv.h +++ b/include/hw/ppc/pnv.h @@ -28,6 +28,7 @@ #define TYPE_PNV_CHIP "pnv-chip" +typedef struct PnvCore PnvCore; typedef struct PnvChip PnvChip; typedef struct Pnv8Chip Pnv8Chip; typedef struct Pnv9Chip Pnv9Chip; @@ -56,6 +57,7 @@ DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER9, DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER10, TYPE_PNV_CHIP_POWER10) +PnvCore *pnv_chip_find_core(PnvChip *chip, uint32_t core_id); PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir); typedef struct PnvPHB PnvPHB; @@ -76,6 +78,7 @@ struct PnvMachineClass { int compat_size; void (*dt_power_mgt)(PnvMachineState *pnv, void *fdt); + void (*i2c_init)(PnvMachineState *pnv); }; struct PnvMachineState { diff --git a/include/hw/ppc/pnv_chip.h b/include/hw/ppc/pnv_chip.h index 0ab5c423088..8589f3291ed 100644 --- a/include/hw/ppc/pnv_chip.h +++ b/include/hw/ppc/pnv_chip.h @@ -2,8 +2,10 @@ #define PPC_PNV_CHIP_H #include "hw/pci-host/pnv_phb4.h" +#include "hw/ppc/pnv_chiptod.h" #include "hw/ppc/pnv_core.h" #include "hw/ppc/pnv_homer.h" +#include "hw/ppc/pnv_n1_chiplet.h" #include "hw/ppc/pnv_lpc.h" #include "hw/ppc/pnv_occ.h" #include "hw/ppc/pnv_psi.h" @@ -78,6 +80,7 @@ struct Pnv9Chip { PnvXive xive; Pnv9Psi psi; PnvLpcController lpc; + PnvChipTOD chiptod; PnvOCC occ; PnvSBE sbe; PnvHomer homer; @@ -110,9 +113,11 @@ struct Pnv10Chip { PnvXive2 xive; Pnv9Psi psi; PnvLpcController lpc; + PnvChipTOD chiptod; PnvOCC occ; PnvSBE sbe; PnvHomer homer; + PnvN1Chiplet n1_chiplet; uint32_t nr_quads; PnvQuad *quads; @@ -142,7 +147,7 @@ struct PnvChipClass { DeviceRealize parent_realize; - uint32_t (*core_pir)(PnvChip *chip, uint32_t core_id); + uint32_t (*chip_pir)(PnvChip *chip, uint32_t core_id, uint32_t thread_id); void (*intc_create)(PnvChip *chip, PowerPCCPU *cpu, Error **errp); void (*intc_reset)(PnvChip *chip, PowerPCCPU *cpu); void (*intc_destroy)(PnvChip *chip, PowerPCCPU *cpu); diff --git a/include/hw/ppc/pnv_chiptod.h b/include/hw/ppc/pnv_chiptod.h new file mode 100644 index 00000000000..fde569bcbfa --- /dev/null +++ b/include/hw/ppc/pnv_chiptod.h @@ -0,0 +1,53 @@ +/* + * QEMU PowerPC PowerNV Emulation of some CHIPTOD behaviour + * + * Copyright (c) 2022-2023, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef PPC_PNV_CHIPTOD_H +#define PPC_PNV_CHIPTOD_H + +#include "qom/object.h" + +#define TYPE_PNV_CHIPTOD "pnv-chiptod" +OBJECT_DECLARE_TYPE(PnvChipTOD, PnvChipTODClass, PNV_CHIPTOD) +#define TYPE_PNV9_CHIPTOD TYPE_PNV_CHIPTOD "-POWER9" +DECLARE_INSTANCE_CHECKER(PnvChipTOD, PNV9_CHIPTOD, TYPE_PNV9_CHIPTOD) +#define TYPE_PNV10_CHIPTOD TYPE_PNV_CHIPTOD "-POWER10" +DECLARE_INSTANCE_CHECKER(PnvChipTOD, PNV10_CHIPTOD, TYPE_PNV10_CHIPTOD) + +enum tod_state { + tod_error = 0, + tod_not_set = 7, + tod_running = 2, + tod_stopped = 1, +}; + +typedef struct PnvCore PnvCore; + +struct PnvChipTOD { + DeviceState xd; + + PnvChip *chip; + MemoryRegion xscom_regs; + + bool primary; + bool secondary; + enum tod_state tod_state; + uint64_t tod_error; + uint64_t pss_mss_ctrl_reg; + PnvCore *slave_pc_target; +}; + +struct PnvChipTODClass { + DeviceClass parent_class; + + void (*broadcast_ttype)(PnvChipTOD *sender, uint32_t trigger); + PnvCore *(*tx_ttype_target)(PnvChipTOD *chiptod, uint64_t val); + + int xscom_size; +}; + +#endif /* PPC_PNV_CHIPTOD_H */ diff --git a/include/hw/ppc/pnv_core.h b/include/hw/ppc/pnv_core.h index 4db21229a68..c6d62fd1459 100644 --- a/include/hw/ppc/pnv_core.h +++ b/include/hw/ppc/pnv_core.h @@ -36,6 +36,7 @@ struct PnvCore { /*< public >*/ PowerPCCPU **threads; uint32_t pir; + uint32_t hwid; uint64_t hrmor; PnvChip *chip; diff --git a/include/hw/ppc/pnv_n1_chiplet.h b/include/hw/ppc/pnv_n1_chiplet.h new file mode 100644 index 00000000000..a7ad0396681 --- /dev/null +++ b/include/hw/ppc/pnv_n1_chiplet.h @@ -0,0 +1,32 @@ +/* + * QEMU PowerPC N1 chiplet model + * + * Copyright (c) 2023, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef PPC_PNV_N1_CHIPLET_H +#define PPC_PNV_N1_CHIPLET_H + +#include "hw/ppc/pnv_nest_pervasive.h" + +#define TYPE_PNV_N1_CHIPLET "pnv-N1-chiplet" +#define PNV_N1_CHIPLET(obj) OBJECT_CHECK(PnvN1Chiplet, (obj), TYPE_PNV_N1_CHIPLET) + +typedef struct PnvPbScom { + uint64_t mode; + uint64_t hp_mode2_curr; +} PnvPbScom; + +typedef struct PnvN1Chiplet { + DeviceState parent; + MemoryRegion xscom_pb_eq_mr; + MemoryRegion xscom_pb_es_mr; + PnvNestChipletPervasive nest_pervasive; /* common pervasive chiplet unit */ +#define PNV_PB_SCOM_EQ_SIZE 8 + PnvPbScom eq[PNV_PB_SCOM_EQ_SIZE]; +#define PNV_PB_SCOM_ES_SIZE 4 + PnvPbScom es[PNV_PB_SCOM_ES_SIZE]; +} PnvN1Chiplet; +#endif /*PPC_PNV_N1_CHIPLET_H */ diff --git a/include/hw/ppc/pnv_nest_pervasive.h b/include/hw/ppc/pnv_nest_pervasive.h new file mode 100644 index 00000000000..73cacf38238 --- /dev/null +++ b/include/hw/ppc/pnv_nest_pervasive.h @@ -0,0 +1,32 @@ +/* + * QEMU PowerPC nest pervasive common chiplet model + * + * Copyright (c) 2023, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef PPC_PNV_NEST_CHIPLET_PERVASIVE_H +#define PPC_PNV_NEST_CHIPLET_PERVASIVE_H + +#define TYPE_PNV_NEST_CHIPLET_PERVASIVE "pnv-nest-chiplet-pervasive" +#define PNV_NEST_CHIPLET_PERVASIVE(obj) OBJECT_CHECK(PnvNestChipletPervasive, (obj), TYPE_PNV_NEST_CHIPLET_PERVASIVE) + +typedef struct PnvPervasiveCtrlRegs { +#define PNV_CPLT_CTRL_SIZE 6 + uint64_t cplt_ctrl[PNV_CPLT_CTRL_SIZE]; + uint64_t cplt_cfg0; + uint64_t cplt_cfg1; + uint64_t cplt_stat0; + uint64_t cplt_mask0; + uint64_t ctrl_protect_mode; + uint64_t ctrl_atomic_lock; +} PnvPervasiveCtrlRegs; + +typedef struct PnvNestChipletPervasive { + DeviceState parent; + MemoryRegion xscom_ctrl_regs_mr; + PnvPervasiveCtrlRegs control_regs; +} PnvNestChipletPervasive; + +#endif /*PPC_PNV_NEST_CHIPLET_PERVASIVE_H */ diff --git a/include/hw/ppc/pnv_xscom.h b/include/hw/ppc/pnv_xscom.h index f5becbab416..6209e184924 100644 --- a/include/hw/ppc/pnv_xscom.h +++ b/include/hw/ppc/pnv_xscom.h @@ -64,6 +64,9 @@ struct PnvXScomInterfaceClass { #define PNV_XSCOM_PSIHB_BASE 0x2010900 #define PNV_XSCOM_PSIHB_SIZE 0x20 +#define PNV_XSCOM_CHIPTOD_BASE 0x0040000 +#define PNV_XSCOM_CHIPTOD_SIZE 0x31 + #define PNV_XSCOM_OCC_BASE 0x0066000 #define PNV_XSCOM_OCC_SIZE 0x6000 @@ -93,6 +96,9 @@ struct PnvXScomInterfaceClass { #define PNV9_XSCOM_I2CM_BASE 0xa0000 #define PNV9_XSCOM_I2CM_SIZE 0x1000 +#define PNV9_XSCOM_CHIPTOD_BASE PNV_XSCOM_CHIPTOD_BASE +#define PNV9_XSCOM_CHIPTOD_SIZE PNV_XSCOM_CHIPTOD_SIZE + #define PNV9_XSCOM_OCC_BASE PNV_XSCOM_OCC_BASE #define PNV9_XSCOM_OCC_SIZE 0x8000 @@ -155,6 +161,9 @@ struct PnvXScomInterfaceClass { #define PNV10_XSCOM_I2CM_BASE PNV9_XSCOM_I2CM_BASE #define PNV10_XSCOM_I2CM_SIZE PNV9_XSCOM_I2CM_SIZE +#define PNV10_XSCOM_CHIPTOD_BASE PNV9_XSCOM_CHIPTOD_BASE +#define PNV10_XSCOM_CHIPTOD_SIZE PNV9_XSCOM_CHIPTOD_SIZE + #define PNV10_XSCOM_OCC_BASE PNV9_XSCOM_OCC_BASE #define PNV10_XSCOM_OCC_SIZE PNV9_XSCOM_OCC_SIZE @@ -170,6 +179,15 @@ struct PnvXScomInterfaceClass { #define PNV10_XSCOM_XIVE2_BASE 0x2010800 #define PNV10_XSCOM_XIVE2_SIZE 0x400 +#define PNV10_XSCOM_N1_CHIPLET_CTRL_REGS_BASE 0x3000000 +#define PNV10_XSCOM_CHIPLET_CTRL_REGS_SIZE 0x400 + +#define PNV10_XSCOM_N1_PB_SCOM_EQ_BASE 0x3011000 +#define PNV10_XSCOM_N1_PB_SCOM_EQ_SIZE 0x200 + +#define PNV10_XSCOM_N1_PB_SCOM_ES_BASE 0x3011300 +#define PNV10_XSCOM_N1_PB_SCOM_ES_SIZE 0x100 + #define PNV10_XSCOM_PEC_NEST_BASE 0x3011800 /* index goes downwards ... */ #define PNV10_XSCOM_PEC_NEST_SIZE 0x100 diff --git a/include/hw/ppc/ppc4xx.h b/include/hw/ppc/ppc4xx.h index ea7740239b3..1bd9b8821b0 100644 --- a/include/hw/ppc/ppc4xx.h +++ b/include/hw/ppc/ppc4xx.h @@ -29,11 +29,6 @@ #include "exec/memory.h" #include "hw/sysbus.h" -#define TYPE_PPC4xx_HOST_BRIDGE "ppc4xx-host-bridge" -#define TYPE_PPC4xx_PCI_HOST "ppc4xx-pci-host" -#define TYPE_PPC440_PCIX_HOST "ppc440-pcix-host" -#define TYPE_PPC460EX_PCIE_HOST "ppc460ex-pcie-host" - /* * Generic DCR device */ diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index e91791a1a9d..4aaf23d28f8 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -12,6 +12,7 @@ #include "hw/ppc/spapr_xive.h" /* For SpaprXive */ #include "hw/ppc/xics.h" /* For ICSState */ #include "hw/ppc/spapr_tpm_proxy.h" +#include "hw/ppc/spapr_nested.h" /* For SpaprMachineStateNested */ struct SpaprVioBus; struct SpaprPhbState; @@ -80,8 +81,10 @@ typedef enum { #define SPAPR_CAP_RPT_INVALIDATE 0x0B /* Support for AIL modes */ #define SPAPR_CAP_AIL_MODE_3 0x0C +/* Nested PAPR */ +#define SPAPR_CAP_NESTED_PAPR 0x0D /* Num Caps */ -#define SPAPR_CAP_NUM (SPAPR_CAP_AIL_MODE_3 + 1) +#define SPAPR_CAP_NUM (SPAPR_CAP_NESTED_PAPR + 1) /* * Capability Values @@ -213,7 +216,7 @@ struct SpaprMachineState { uint32_t vsmt; /* Virtual SMT mode (KVM's "core stride") */ /* Nested HV support (TCG only) */ - uint64_t nested_ptcr; + SpaprMachineStateNested nested; Notifier epow_notifier; QTAILQ_HEAD(, SpaprEventLogEntry) pending_events; @@ -363,6 +366,9 @@ struct SpaprMachineState { #define H_NOOP -63 #define H_UNSUPPORTED -67 #define H_OVERLAP -68 +#define H_STATE -75 +#define H_IN_USE -77 +#define H_INVALID_ELEMENT_VALUE -81 #define H_UNSUPPORTED_FLAG -256 #define H_MULTI_THREADS_ACTIVE -9005 @@ -582,8 +588,16 @@ struct SpaprMachineState { #define H_RPT_INVALIDATE 0x448 #define H_SCM_FLUSH 0x44C #define H_WATCHDOG 0x45C +#define H_GUEST_GET_CAPABILITIES 0x460 +#define H_GUEST_SET_CAPABILITIES 0x464 +#define H_GUEST_CREATE 0x470 +#define H_GUEST_CREATE_VCPU 0x474 +#define H_GUEST_GET_STATE 0x478 +#define H_GUEST_SET_STATE 0x47C +#define H_GUEST_RUN_VCPU 0x480 +#define H_GUEST_DELETE 0x488 -#define MAX_HCALL_OPCODE H_WATCHDOG +#define MAX_HCALL_OPCODE H_GUEST_DELETE /* The hcalls above are standardized in PAPR and implemented by pHyp * as well. @@ -631,13 +645,17 @@ typedef target_ulong (*spapr_hcall_fn)(PowerPCCPU *cpu, SpaprMachineState *sm, target_ulong *args); void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn); +void spapr_unregister_hypercall(target_ulong opcode); target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, target_ulong *args); -target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu, SpaprMachineState *spapr, +target_ulong vhyp_mmu_resize_hpt_prepare(PowerPCCPU *cpu, + SpaprMachineState *spapr, target_ulong shift); -target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu, SpaprMachineState *spapr, - target_ulong flags, target_ulong shift); +target_ulong vhyp_mmu_resize_hpt_commit(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong flags, + target_ulong shift); bool is_ram_address(SpaprMachineState *spapr, hwaddr addr); void push_sregs_to_kvm_pr(SpaprMachineState *spapr); @@ -981,6 +999,7 @@ extern const VMStateDescription vmstate_spapr_cap_sbbc; extern const VMStateDescription vmstate_spapr_cap_ibs; extern const VMStateDescription vmstate_spapr_cap_hpt_maxpagesize; extern const VMStateDescription vmstate_spapr_cap_nested_kvm_hv; +extern const VMStateDescription vmstate_spapr_cap_nested_papr; extern const VMStateDescription vmstate_spapr_cap_large_decr; extern const VMStateDescription vmstate_spapr_cap_ccf_assist; extern const VMStateDescription vmstate_spapr_cap_fwnmi; @@ -1025,5 +1044,10 @@ void spapr_vof_client_dt_finalize(SpaprMachineState *spapr, void *fdt); /* H_WATCHDOG */ void spapr_watchdog_init(SpaprMachineState *spapr); +void spapr_register_nested_hv(void); +void spapr_unregister_nested_hv(void); +void spapr_nested_reset(SpaprMachineState *spapr); +void spapr_register_nested_papr(void); +void spapr_unregister_nested_papr(void); #endif /* HW_SPAPR_H */ diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index c22a72c9e27..4fd2d5853d8 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -14,9 +14,21 @@ #include "qom/object.h" /* - * IRQ range offsets per device type + * The XIVE IRQ backend uses the same layout as the XICS backend but + * covers the full range of the IRQ number space. The IRQ numbers for + * the CPU IPIs are allocated at the bottom of this space, below 4K, + * to preserve compatibility with XICS which does not use that range. + */ + +/* + * CPU IPI range (XIVE only) */ #define SPAPR_IRQ_IPI 0x0 +#define SPAPR_IRQ_NR_IPIS 0x1000 + +/* + * IRQ range offsets per device type + */ #define SPAPR_XIRQ_BASE XICS_IRQ_BASE /* 0x1000 */ #define SPAPR_IRQ_EPOW (SPAPR_XIRQ_BASE + 0x0000) diff --git a/include/hw/ppc/spapr_nested.h b/include/hw/ppc/spapr_nested.h index d3834864764..93ef14adcc5 100644 --- a/include/hw/ppc/spapr_nested.h +++ b/include/hw/ppc/spapr_nested.h @@ -1,9 +1,350 @@ #ifndef HW_SPAPR_NESTED_H #define HW_SPAPR_NESTED_H -#include "qemu/osdep.h" #include "target/ppc/cpu.h" +/* Guest State Buffer Element IDs */ +#define GSB_HV_VCPU_IGNORED_ID 0x0000 /* An element whose value is ignored */ +#define GSB_HV_VCPU_STATE_SIZE 0x0001 /* HV internal format VCPU state size */ +#define GSB_VCPU_OUT_BUF_MIN_SZ 0x0002 /* Min size of the Run VCPU o/p buffer */ +#define GSB_VCPU_LPVR 0x0003 /* Logical PVR */ +#define GSB_TB_OFFSET 0x0004 /* Timebase Offset */ +#define GSB_PART_SCOPED_PAGETBL 0x0005 /* Partition Scoped Page Table */ +#define GSB_PROCESS_TBL 0x0006 /* Process Table */ + /* RESERVED 0x0007 - 0x0BFF */ +#define GSB_VCPU_IN_BUFFER 0x0C00 /* Run VCPU Input Buffer */ +#define GSB_VCPU_OUT_BUFFER 0x0C01 /* Run VCPU Out Buffer */ +#define GSB_VCPU_VPA 0x0C02 /* HRA to Guest VCPU VPA */ + /* RESERVED 0x0C03 - 0x0FFF */ +#define GSB_VCPU_GPR0 0x1000 +#define GSB_VCPU_GPR1 0x1001 +#define GSB_VCPU_GPR2 0x1002 +#define GSB_VCPU_GPR3 0x1003 +#define GSB_VCPU_GPR4 0x1004 +#define GSB_VCPU_GPR5 0x1005 +#define GSB_VCPU_GPR6 0x1006 +#define GSB_VCPU_GPR7 0x1007 +#define GSB_VCPU_GPR8 0x1008 +#define GSB_VCPU_GPR9 0x1009 +#define GSB_VCPU_GPR10 0x100A +#define GSB_VCPU_GPR11 0x100B +#define GSB_VCPU_GPR12 0x100C +#define GSB_VCPU_GPR13 0x100D +#define GSB_VCPU_GPR14 0x100E +#define GSB_VCPU_GPR15 0x100F +#define GSB_VCPU_GPR16 0x1010 +#define GSB_VCPU_GPR17 0x1011 +#define GSB_VCPU_GPR18 0x1012 +#define GSB_VCPU_GPR19 0x1013 +#define GSB_VCPU_GPR20 0x1014 +#define GSB_VCPU_GPR21 0x1015 +#define GSB_VCPU_GPR22 0x1016 +#define GSB_VCPU_GPR23 0x1017 +#define GSB_VCPU_GPR24 0x1018 +#define GSB_VCPU_GPR25 0x1019 +#define GSB_VCPU_GPR26 0x101A +#define GSB_VCPU_GPR27 0x101B +#define GSB_VCPU_GPR28 0x101C +#define GSB_VCPU_GPR29 0x101D +#define GSB_VCPU_GPR30 0x101E +#define GSB_VCPU_GPR31 0x101F +#define GSB_VCPU_HDEC_EXPIRY_TB 0x1020 +#define GSB_VCPU_SPR_NIA 0x1021 +#define GSB_VCPU_SPR_MSR 0x1022 +#define GSB_VCPU_SPR_LR 0x1023 +#define GSB_VCPU_SPR_XER 0x1024 +#define GSB_VCPU_SPR_CTR 0x1025 +#define GSB_VCPU_SPR_CFAR 0x1026 +#define GSB_VCPU_SPR_SRR0 0x1027 +#define GSB_VCPU_SPR_SRR1 0x1028 +#define GSB_VCPU_SPR_DAR 0x1029 +#define GSB_VCPU_DEC_EXPIRE_TB 0x102A +#define GSB_VCPU_SPR_VTB 0x102B +#define GSB_VCPU_SPR_LPCR 0x102C +#define GSB_VCPU_SPR_HFSCR 0x102D +#define GSB_VCPU_SPR_FSCR 0x102E +#define GSB_VCPU_SPR_FPSCR 0x102F +#define GSB_VCPU_SPR_DAWR0 0x1030 +#define GSB_VCPU_SPR_DAWR1 0x1031 +#define GSB_VCPU_SPR_CIABR 0x1032 +#define GSB_VCPU_SPR_PURR 0x1033 +#define GSB_VCPU_SPR_SPURR 0x1034 +#define GSB_VCPU_SPR_IC 0x1035 +#define GSB_VCPU_SPR_SPRG0 0x1036 +#define GSB_VCPU_SPR_SPRG1 0x1037 +#define GSB_VCPU_SPR_SPRG2 0x1038 +#define GSB_VCPU_SPR_SPRG3 0x1039 +#define GSB_VCPU_SPR_PPR 0x103A +#define GSB_VCPU_SPR_MMCR0 0x103B +#define GSB_VCPU_SPR_MMCR1 0x103C +#define GSB_VCPU_SPR_MMCR2 0x103D +#define GSB_VCPU_SPR_MMCR3 0x103E +#define GSB_VCPU_SPR_MMCRA 0x103F +#define GSB_VCPU_SPR_SIER 0x1040 +#define GSB_VCPU_SPR_SIER2 0x1041 +#define GSB_VCPU_SPR_SIER3 0x1042 +#define GSB_VCPU_SPR_BESCR 0x1043 +#define GSB_VCPU_SPR_EBBHR 0x1044 +#define GSB_VCPU_SPR_EBBRR 0x1045 +#define GSB_VCPU_SPR_AMR 0x1046 +#define GSB_VCPU_SPR_IAMR 0x1047 +#define GSB_VCPU_SPR_AMOR 0x1048 +#define GSB_VCPU_SPR_UAMOR 0x1049 +#define GSB_VCPU_SPR_SDAR 0x104A +#define GSB_VCPU_SPR_SIAR 0x104B +#define GSB_VCPU_SPR_DSCR 0x104C +#define GSB_VCPU_SPR_TAR 0x104D +#define GSB_VCPU_SPR_DEXCR 0x104E +#define GSB_VCPU_SPR_HDEXCR 0x104F +#define GSB_VCPU_SPR_HASHKEYR 0x1050 +#define GSB_VCPU_SPR_HASHPKEYR 0x1051 +#define GSB_VCPU_SPR_CTRL 0x1052 + /* RESERVED 0x1053 - 0x1FFF */ +#define GSB_VCPU_SPR_CR 0x2000 +#define GSB_VCPU_SPR_PIDR 0x2001 +#define GSB_VCPU_SPR_DSISR 0x2002 +#define GSB_VCPU_SPR_VSCR 0x2003 +#define GSB_VCPU_SPR_VRSAVE 0x2004 +#define GSB_VCPU_SPR_DAWRX0 0x2005 +#define GSB_VCPU_SPR_DAWRX1 0x2006 +#define GSB_VCPU_SPR_PMC1 0x2007 +#define GSB_VCPU_SPR_PMC2 0x2008 +#define GSB_VCPU_SPR_PMC3 0x2009 +#define GSB_VCPU_SPR_PMC4 0x200A +#define GSB_VCPU_SPR_PMC5 0x200B +#define GSB_VCPU_SPR_PMC6 0x200C +#define GSB_VCPU_SPR_WORT 0x200D +#define GSB_VCPU_SPR_PSPB 0x200E + /* RESERVED 0x200F - 0x2FFF */ +#define GSB_VCPU_SPR_VSR0 0x3000 +#define GSB_VCPU_SPR_VSR1 0x3001 +#define GSB_VCPU_SPR_VSR2 0x3002 +#define GSB_VCPU_SPR_VSR3 0x3003 +#define GSB_VCPU_SPR_VSR4 0x3004 +#define GSB_VCPU_SPR_VSR5 0x3005 +#define GSB_VCPU_SPR_VSR6 0x3006 +#define GSB_VCPU_SPR_VSR7 0x3007 +#define GSB_VCPU_SPR_VSR8 0x3008 +#define GSB_VCPU_SPR_VSR9 0x3009 +#define GSB_VCPU_SPR_VSR10 0x300A +#define GSB_VCPU_SPR_VSR11 0x300B +#define GSB_VCPU_SPR_VSR12 0x300C +#define GSB_VCPU_SPR_VSR13 0x300D +#define GSB_VCPU_SPR_VSR14 0x300E +#define GSB_VCPU_SPR_VSR15 0x300F +#define GSB_VCPU_SPR_VSR16 0x3010 +#define GSB_VCPU_SPR_VSR17 0x3011 +#define GSB_VCPU_SPR_VSR18 0x3012 +#define GSB_VCPU_SPR_VSR19 0x3013 +#define GSB_VCPU_SPR_VSR20 0x3014 +#define GSB_VCPU_SPR_VSR21 0x3015 +#define GSB_VCPU_SPR_VSR22 0x3016 +#define GSB_VCPU_SPR_VSR23 0x3017 +#define GSB_VCPU_SPR_VSR24 0x3018 +#define GSB_VCPU_SPR_VSR25 0x3019 +#define GSB_VCPU_SPR_VSR26 0x301A +#define GSB_VCPU_SPR_VSR27 0x301B +#define GSB_VCPU_SPR_VSR28 0x301C +#define GSB_VCPU_SPR_VSR29 0x301D +#define GSB_VCPU_SPR_VSR30 0x301E +#define GSB_VCPU_SPR_VSR31 0x301F +#define GSB_VCPU_SPR_VSR32 0x3020 +#define GSB_VCPU_SPR_VSR33 0x3021 +#define GSB_VCPU_SPR_VSR34 0x3022 +#define GSB_VCPU_SPR_VSR35 0x3023 +#define GSB_VCPU_SPR_VSR36 0x3024 +#define GSB_VCPU_SPR_VSR37 0x3025 +#define GSB_VCPU_SPR_VSR38 0x3026 +#define GSB_VCPU_SPR_VSR39 0x3027 +#define GSB_VCPU_SPR_VSR40 0x3028 +#define GSB_VCPU_SPR_VSR41 0x3029 +#define GSB_VCPU_SPR_VSR42 0x302A +#define GSB_VCPU_SPR_VSR43 0x302B +#define GSB_VCPU_SPR_VSR44 0x302C +#define GSB_VCPU_SPR_VSR45 0x302D +#define GSB_VCPU_SPR_VSR46 0x302E +#define GSB_VCPU_SPR_VSR47 0x302F +#define GSB_VCPU_SPR_VSR48 0x3030 +#define GSB_VCPU_SPR_VSR49 0x3031 +#define GSB_VCPU_SPR_VSR50 0x3032 +#define GSB_VCPU_SPR_VSR51 0x3033 +#define GSB_VCPU_SPR_VSR52 0x3034 +#define GSB_VCPU_SPR_VSR53 0x3035 +#define GSB_VCPU_SPR_VSR54 0x3036 +#define GSB_VCPU_SPR_VSR55 0x3037 +#define GSB_VCPU_SPR_VSR56 0x3038 +#define GSB_VCPU_SPR_VSR57 0x3039 +#define GSB_VCPU_SPR_VSR58 0x303A +#define GSB_VCPU_SPR_VSR59 0x303B +#define GSB_VCPU_SPR_VSR60 0x303C +#define GSB_VCPU_SPR_VSR61 0x303D +#define GSB_VCPU_SPR_VSR62 0x303E +#define GSB_VCPU_SPR_VSR63 0x303F + /* RESERVED 0x3040 - 0xEFFF */ +#define GSB_VCPU_SPR_HDAR 0xF000 +#define GSB_VCPU_SPR_HDSISR 0xF001 +#define GSB_VCPU_SPR_HEIR 0xF002 +#define GSB_VCPU_SPR_ASDR 0xF003 +/* End of list of Guest State Buffer Element IDs */ +#define GSB_LAST GSB_VCPU_SPR_ASDR + +typedef struct SpaprMachineStateNested { + uint64_t ptcr; + uint8_t api; +#define NESTED_API_KVM_HV 1 +#define NESTED_API_PAPR 2 + bool capabilities_set; + uint32_t pvr_base; + GHashTable *guests; +} SpaprMachineStateNested; + +typedef struct SpaprMachineStateNestedGuest { + uint32_t pvr_logical; + unsigned long nr_vcpus; + uint64_t parttbl[2]; + uint64_t tb_offset; + struct SpaprMachineStateNestedGuestVcpu *vcpus; +} SpaprMachineStateNestedGuest; + +/* Nested PAPR API related macros */ +#define H_GUEST_CAPABILITIES_COPY_MEM 0x8000000000000000 +#define H_GUEST_CAPABILITIES_P9_MODE 0x4000000000000000 +#define H_GUEST_CAPABILITIES_P10_MODE 0x2000000000000000 +#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P10_MODE | \ + H_GUEST_CAPABILITIES_P9_MODE) +#define H_GUEST_CAP_COPY_MEM_BMAP 0 +#define H_GUEST_CAP_P9_MODE_BMAP 1 +#define H_GUEST_CAP_P10_MODE_BMAP 2 +#define PAPR_NESTED_GUEST_MAX 4096 +#define H_GUEST_DELETE_ALL_FLAG 0x8000000000000000ULL +#define PAPR_NESTED_GUEST_VCPU_MAX 2048 +#define VCPU_OUT_BUF_MIN_SZ 0x80ULL +#define HVMASK_DEFAULT 0xffffffffffffffff +#define HVMASK_LPCR 0x0070000003820800 +#define HVMASK_MSR 0xEBFFFFFFFFBFEFFF +#define HVMASK_HDEXCR 0x00000000FFFFFFFF +#define HVMASK_TB_OFFSET 0x000000FFFFFFFFFF +#define GSB_MAX_BUF_SIZE (1024 * 1024) +#define H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE 0x8000000000000000 +#define GUEST_STATE_REQUEST_GUEST_WIDE 0x1 +#define GUEST_STATE_REQUEST_SET 0x2 + +/* + * As per ISA v3.1B, following bits are reserved: + * 0:2 + * 4:57 (ISA mentions bit 58 as well but it should be used for P10) + * 61:63 (hence, haven't included PCR bits for v2.06 and v2.05 + * in LOW BITS) + */ +#define PCR_LOW_BITS (PCR_COMPAT_3_10 | PCR_COMPAT_3_00) +#define HVMASK_PCR (~PCR_LOW_BITS) + +#define GUEST_STATE_ELEMENT(i, sz, s, f, ptr, c) { \ + .id = (i), \ + .size = (sz), \ + .location = ptr, \ + .offset = offsetof(struct s, f), \ + .copy = (c) \ +} + +#define GSBE_NESTED(i, sz, f, c) { \ + .id = (i), \ + .size = (sz), \ + .location = get_guest_ptr, \ + .offset = offsetof(struct SpaprMachineStateNestedGuest, f),\ + .copy = (c), \ + .mask = HVMASK_DEFAULT \ +} + +#define GSBE_NESTED_MSK(i, sz, f, c, m) { \ + .id = (i), \ + .size = (sz), \ + .location = get_guest_ptr, \ + .offset = offsetof(struct SpaprMachineStateNestedGuest, f),\ + .copy = (c), \ + .mask = (m) \ +} + +#define GSBE_NESTED_VCPU(i, sz, f, c) { \ + .id = (i), \ + .size = (sz), \ + .location = get_vcpu_ptr, \ + .offset = offsetof(struct SpaprMachineStateNestedGuestVcpu, f),\ + .copy = (c), \ + .mask = HVMASK_DEFAULT \ +} + +#define GUEST_STATE_ELEMENT_NOP(i, sz) { \ + .id = (i), \ + .size = (sz), \ + .location = NULL, \ + .offset = 0, \ + .copy = NULL, \ + .mask = HVMASK_DEFAULT \ +} + +#define GUEST_STATE_ELEMENT_NOP_DW(i) \ + GUEST_STATE_ELEMENT_NOP(i, 8) +#define GUEST_STATE_ELEMENT_NOP_W(i) \ + GUEST_STATE_ELEMENT_NOP(i, 4) + +#define GUEST_STATE_ELEMENT_BASE(i, s, c) { \ + .id = (i), \ + .size = (s), \ + .location = get_vcpu_state_ptr, \ + .offset = 0, \ + .copy = (c), \ + .mask = HVMASK_DEFAULT \ + } + +#define GUEST_STATE_ELEMENT_OFF(i, s, f, c) { \ + .id = (i), \ + .size = (s), \ + .location = get_vcpu_state_ptr, \ + .offset = offsetof(struct nested_ppc_state, f), \ + .copy = (c), \ + .mask = HVMASK_DEFAULT \ + } + +#define GUEST_STATE_ELEMENT_MSK(i, s, f, c, m) { \ + .id = (i), \ + .size = (s), \ + .location = get_vcpu_state_ptr, \ + .offset = offsetof(struct nested_ppc_state, f), \ + .copy = (c), \ + .mask = (m) \ + } + +#define GUEST_STATE_ELEMENT_ENV_QW(i, f) \ + GUEST_STATE_ELEMENT_OFF(i, 16, f, copy_state_16to16) +#define GUEST_STATE_ELEMENT_ENV_DW(i, f) \ + GUEST_STATE_ELEMENT_OFF(i, 8, f, copy_state_8to8) +#define GUEST_STATE_ELEMENT_ENV_W(i, f) \ + GUEST_STATE_ELEMENT_OFF(i, 4, f, copy_state_4to8) +#define GUEST_STATE_ELEMENT_ENV_WW(i, f) \ + GUEST_STATE_ELEMENT_OFF(i, 4, f, copy_state_4to4) +#define GSE_ENV_DWM(i, f, m) \ + GUEST_STATE_ELEMENT_MSK(i, 8, f, copy_state_8to8, m) + +struct guest_state_element { + uint16_t id; + uint16_t size; + uint8_t value[]; +} QEMU_PACKED; + +struct guest_state_buffer { + uint32_t num_elements; + struct guest_state_element elements[]; +} QEMU_PACKED; + +/* Actual buffer plus some metadata about the request */ +struct guest_state_request { + struct guest_state_buffer *gsb; + int64_t buf; + int64_t len; + uint16_t flags; +}; + /* * Register state for entering a nested guest with H_ENTER_NESTED. * New member must be added at the end. @@ -94,9 +435,90 @@ struct nested_ppc_state { uint64_t ppr; int64_t tb_offset; + /* Nested PAPR API */ + uint64_t amor; + uint64_t dawr0; + uint64_t dawrx0; + uint64_t ciabr; + uint64_t purr; + uint64_t spurr; + uint64_t ic; + uint64_t vtb; + uint64_t hdar; + uint64_t hdsisr; + uint64_t heir; + uint64_t asdr; + uint64_t dawr1; + uint64_t dawrx1; + uint64_t dexcr; + uint64_t hdexcr; + uint64_t hashkeyr; + uint64_t hashpkeyr; + ppc_vsr_t vsr[64] QEMU_ALIGNED(16); + uint64_t ebbhr; + uint64_t tar; + uint64_t ebbrr; + uint64_t bescr; + uint64_t iamr; + uint64_t amr; + uint64_t uamor; + uint64_t dscr; + uint64_t fscr; + uint64_t pspb; + uint64_t ctrl; + uint64_t vrsave; + uint64_t dar; + uint64_t dsisr; + uint64_t pmc1; + uint64_t pmc2; + uint64_t pmc3; + uint64_t pmc4; + uint64_t pmc5; + uint64_t pmc6; + uint64_t mmcr0; + uint64_t mmcr1; + uint64_t mmcr2; + uint64_t mmcra; + uint64_t sdar; + uint64_t siar; + uint64_t sier; + uint32_t vscr; + uint64_t fpscr; + int64_t dec_expiry_tb; }; -void spapr_register_nested(void); -void spapr_exit_nested(PowerPCCPU *cpu, int excp); +struct SpaprMachineStateNestedGuestVcpuRunBuf { + uint64_t addr; + uint64_t size; +}; +typedef struct SpaprMachineStateNestedGuestVcpu { + bool enabled; + struct nested_ppc_state state; + struct SpaprMachineStateNestedGuestVcpuRunBuf runbufin; + struct SpaprMachineStateNestedGuestVcpuRunBuf runbufout; + int64_t tb_offset; + uint64_t hdecr_expiry_tb; +} SpaprMachineStateNestedGuestVcpu; + +struct guest_state_element_type { + uint16_t id; + int size; +#define GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE 0x1 +#define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x2 + uint16_t flags; + void *(*location)(SpaprMachineStateNestedGuest *, target_ulong); + size_t offset; + void (*copy)(void *, void *, bool); + uint64_t mask; +}; + +void spapr_exit_nested(PowerPCCPU *cpu, int excp); +typedef struct SpaprMachineState SpaprMachineState; +bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry); +uint8_t spapr_nested_api(SpaprMachineState *spapr); +void spapr_nested_gsb_init(void); +bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, + target_ulong lpid, ppc_v3_pate_t *entry); #endif /* HW_SPAPR_NESTED_H */ diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h index b7adbdb7b98..816f5d0e840 100644 --- a/include/hw/ppc/xive2_regs.h +++ b/include/hw/ppc/xive2_regs.h @@ -10,7 +10,7 @@ #ifndef PPC_XIVE2_REGS_H #define PPC_XIVE2_REGS_H -#include "cpu.h" +#include "qemu/bswap.h" /* * Thread Interrupt Management Area (TIMA) diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index 151d9682380..9228e96c87e 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -329,8 +329,6 @@ struct BusClass { */ char *(*get_fw_dev_path)(DeviceState *dev); - void (*reset)(BusState *bus); - /* * Return whether the device can be added to @bus, * based on the address that was set (via device properties) @@ -993,6 +991,20 @@ const char *qdev_fw_name(DeviceState *dev); void qdev_assert_realized_properly(void); Object *qdev_get_machine(void); +/** + * qdev_get_human_name() - Return a human-readable name for a device + * @dev: The device. Must be a valid and non-NULL pointer. + * + * .. note:: + * This function is intended for user friendly error messages. + * + * Returns: A newly allocated string containing the device id if not null, + * else the object canonical path. + * + * Use g_free() to free it. + */ +char *qdev_get_human_name(DeviceState *dev); + /* FIXME: make this a link<> */ bool qdev_set_parent_bus(DeviceState *dev, BusState *bus, Error **errp); @@ -1071,6 +1083,11 @@ typedef enum MachineInitPhase { */ PHASE_ACCEL_CREATED, + /* + * Late backend objects have been created and initialized. + */ + PHASE_LATE_BACKENDS_CREATED, + /* * machine_class->init has been called, thus creating any embedded * devices and validating machine properties. Devices created at diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h index 91f7a2452d9..438f65389f7 100644 --- a/include/hw/qdev-properties-system.h +++ b/include/hw/qdev-properties-system.h @@ -8,6 +8,8 @@ extern const PropertyInfo qdev_prop_macaddr; extern const PropertyInfo qdev_prop_reserved_region; extern const PropertyInfo qdev_prop_multifd_compression; extern const PropertyInfo qdev_prop_mig_mode; +extern const PropertyInfo qdev_prop_granule_mode; +extern const PropertyInfo qdev_prop_zero_page_detection; extern const PropertyInfo qdev_prop_losttickpolicy; extern const PropertyInfo qdev_prop_blockdev_on_error; extern const PropertyInfo qdev_prop_bios_chs_trans; @@ -24,6 +26,7 @@ extern const PropertyInfo qdev_prop_off_auto_pcibar; extern const PropertyInfo qdev_prop_pcie_link_speed; extern const PropertyInfo qdev_prop_pcie_link_width; extern const PropertyInfo qdev_prop_cpus390entitlement; +extern const PropertyInfo qdev_prop_iothread_vq_mapping_list; #define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t) @@ -46,6 +49,11 @@ extern const PropertyInfo qdev_prop_cpus390entitlement; #define DEFINE_PROP_MIG_MODE(_n, _s, _f, _d) \ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_mig_mode, \ MigMode) +#define DEFINE_PROP_GRANULE_MODE(_n, _s, _f, _d) \ + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_granule_mode, GranuleMode) +#define DEFINE_PROP_ZERO_PAGE_DETECTION(_n, _s, _f, _d) \ + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_zero_page_detection, \ + ZeroPageDetection) #define DEFINE_PROP_LOSTTICKPOLICY(_n, _s, _f, _d) \ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_losttickpolicy, \ LostTickPolicy) @@ -82,4 +90,8 @@ extern const PropertyInfo qdev_prop_cpus390entitlement; DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_cpus390entitlement, \ CpuS390Entitlement) +#define DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST(_name, _state, _field) \ + DEFINE_PROP(_name, _state, _field, qdev_prop_iothread_vq_mapping_list, \ + IOThreadVirtQueueMappingList *) + #endif diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h index 25743a29a00..09aa04ca1e2 100644 --- a/include/hw/qdev-properties.h +++ b/include/hw/qdev-properties.h @@ -230,8 +230,8 @@ void qdev_property_add_static(DeviceState *dev, Property *prop); * @target: Device which has properties to be aliased * @source: Object to add alias properties to * - * Add alias properties to the @source object for all qdev properties on - * the @target DeviceState. + * Add alias properties to the @source object for all properties on the @target + * DeviceState. * * This is useful when @target is an internal implementation object * owned by @source, and you want to expose all the properties of that diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h index e5c474b26eb..3db839160f9 100644 --- a/include/hw/riscv/virt.h +++ b/include/hw/riscv/virt.h @@ -23,6 +23,7 @@ #include "hw/riscv/riscv_hart.h" #include "hw/sysbus.h" #include "hw/block/flash.h" +#include "hw/intc/riscv_imsic.h" #define VIRT_CPUS_MAX_BITS 9 #define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS) @@ -60,6 +61,7 @@ struct RISCVVirtState { char *oem_table_id; OnOffAuto acpi; const MemMapEntry *memmap; + struct GPEXHost *gpex_host; }; enum { @@ -127,4 +129,28 @@ enum { bool virt_is_acpi_enabled(RISCVVirtState *s); void virt_acpi_setup(RISCVVirtState *vms); +uint32_t imsic_num_bits(uint32_t count); + +/* + * The virt machine physical address space used by some of the devices + * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets, + * number of CPUs, and number of IMSIC guest files. + * + * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS, + * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization + * of virt machine physical address space. + */ + +#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT) +#if VIRT_IMSIC_GROUP_MAX_SIZE < \ + IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS) +#error "Can't accommodate single IMSIC group in address space" +#endif + +#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \ + VIRT_IMSIC_GROUP_MAX_SIZE) +#if 0x4000000 < VIRT_IMSIC_MAX_SIZE +#error "Can't accommodate all IMSIC groups in address space" +#endif + #endif diff --git a/include/hw/rtc/sun4v-rtc.h b/include/hw/rtc/sun4v-rtc.h index fc54dfcba47..26a9eb61967 100644 --- a/include/hw/rtc/sun4v-rtc.h +++ b/include/hw/rtc/sun4v-rtc.h @@ -5,7 +5,7 @@ * * Copyright (c) 2016 Artyom Tarasenko * - * This code is licensed under the GNU GPL v3 or (at your option) any later + * This code is licensed under the GNU GPL v2 or (at your option) any later * version. */ diff --git a/include/hw/rx/rx62n.h b/include/hw/rx/rx62n.h index 73ceeb58e55..766fe0e4355 100644 --- a/include/hw/rx/rx62n.h +++ b/include/hw/rx/rx62n.h @@ -29,7 +29,6 @@ #include "hw/timer/renesas_tmr.h" #include "hw/timer/renesas_cmt.h" #include "hw/char/renesas_sci.h" -#include "qemu/units.h" #include "qom/object.h" #define TYPE_RX62N_MCU "rx62n-mcu" @@ -68,7 +67,6 @@ struct RX62NState { MemoryRegion iomem2; MemoryRegion iomem3; MemoryRegion c_flash; - qemu_irq irq[NR_IRQS]; /* Input Clock (XTAL) frequency */ uint32_t xtal_freq_hz; diff --git a/include/hw/scsi/esp.h b/include/hw/scsi/esp.h index 13b17496f8c..533d856aa34 100644 --- a/include/hw/scsi/esp.h +++ b/include/hw/scsi/esp.h @@ -25,7 +25,8 @@ struct ESPState { uint8_t rregs[ESP_REGS]; uint8_t wregs[ESP_REGS]; qemu_irq irq; - qemu_irq irq_data; + qemu_irq drq_irq; + bool drq_state; uint8_t chip_id; bool tchi_written; int32_t ti_size; @@ -40,8 +41,7 @@ struct ESPState { uint8_t lun; uint32_t do_cmd; - bool data_in_ready; - uint8_t ti_cmd; + bool data_ready; int dma_enabled; uint32_t async_len; @@ -51,7 +51,6 @@ struct ESPState { ESPDMAMemoryReadWriteFunc dma_memory_write; void *dma_opaque; void (*dma_cb)(ESPState *s); - uint8_t pdma_cb; uint8_t mig_version_id; @@ -63,6 +62,8 @@ struct ESPState { uint8_t mig_ti_buf[ESP_FIFO_SZ]; uint8_t mig_cmdbuf[ESP_CMDFIFO_SZ]; uint32_t mig_cmdlen; + + uint8_t mig_ti_cmd; }; #define TYPE_SYSBUS_ESP "sysbus-esp" @@ -150,15 +151,6 @@ struct SysBusESPState { #define TCHI_FAS100A 0x4 #define TCHI_AM53C974 0x12 -/* PDMA callbacks */ -enum pdma_cb { - SATN_PDMA_CB = 0, - S_WITHOUT_SATN_PDMA_CB = 1, - SATN_STOP_PDMA_CB = 2, - WRITE_RESPONSE_PDMA_CB = 3, - DO_DMA_PDMA_CB = 4 -}; - void esp_dma_enable(ESPState *s, int irq, int level); void esp_request_cancelled(SCSIRequest *req); void esp_command_complete(SCSIRequest *req, size_t resid); diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h index 3692ca82f31..c3d5e17e385 100644 --- a/include/hw/scsi/scsi.h +++ b/include/hw/scsi/scsi.h @@ -69,14 +69,19 @@ struct SCSIDevice { DeviceState qdev; VMChangeStateEntry *vmsentry; - QEMUBH *bh; uint32_t id; BlockConf conf; SCSISense unit_attention; bool sense_is_ua; uint8_t sense[SCSI_SENSE_BUF_SIZE]; uint32_t sense_len; + + /* + * The requests list is only accessed from the AioContext that executes + * requests or from the main loop when IOThread processing is stopped. + */ QTAILQ_HEAD(, SCSIRequest) requests; + uint32_t channel; uint32_t lun; int blocksize; @@ -194,10 +199,7 @@ static inline SCSIBus *scsi_bus_from_device(SCSIDevice *d) } SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, - int unit, bool removable, int bootindex, - bool share_rw, - BlockdevOnError rerror, - BlockdevOnError werror, + int unit, bool removable, BlockConf *conf, const char *serial, Error **errp); void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense); void scsi_bus_legacy_handle_cmdline(SCSIBus *bus); diff --git a/include/hw/ssi/bcm2835_spi.h b/include/hw/ssi/bcm2835_spi.h new file mode 100644 index 00000000000..d3f8cec1119 --- /dev/null +++ b/include/hw/ssi/bcm2835_spi.h @@ -0,0 +1,81 @@ +/* + * BCM2835 SPI Master Controller + * + * Copyright (c) 2024 Rayhan Faizel + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "hw/sysbus.h" +#include "hw/ssi/ssi.h" +#include "qom/object.h" +#include "qemu/fifo8.h" + +#define TYPE_BCM2835_SPI "bcm2835-spi" +OBJECT_DECLARE_SIMPLE_TYPE(BCM2835SPIState, BCM2835_SPI) + +/* + * Though BCM2835 documentation says FIFOs have a capacity of 16, + * FIFOs are actually 16 words in size or effectively 64 bytes when operating + * in non DMA mode. + */ +#define FIFO_SIZE 64 +#define FIFO_SIZE_3_4 48 + +#define RO_MASK 0x1f0000 + +#define BCM2835_SPI_CS 0x00 +#define BCM2835_SPI_FIFO 0x04 +#define BCM2835_SPI_CLK 0x08 +#define BCM2835_SPI_DLEN 0x0c +#define BCM2835_SPI_LTOH 0x10 +#define BCM2835_SPI_DC 0x14 + +#define BCM2835_SPI_CS_RXF BIT(20) +#define BCM2835_SPI_CS_RXR BIT(19) +#define BCM2835_SPI_CS_TXD BIT(18) +#define BCM2835_SPI_CS_RXD BIT(17) +#define BCM2835_SPI_CS_DONE BIT(16) +#define BCM2835_SPI_CS_LEN BIT(13) +#define BCM2835_SPI_CS_REN BIT(12) +#define BCM2835_SPI_CS_INTR BIT(10) +#define BCM2835_SPI_CS_INTD BIT(9) +#define BCM2835_SPI_CS_DMAEN BIT(8) +#define BCM2835_SPI_CS_TA BIT(7) +#define BCM2835_SPI_CLEAR_RX BIT(5) +#define BCM2835_SPI_CLEAR_TX BIT(4) + +struct BCM2835SPIState { + /* */ + SysBusDevice parent_obj; + + /* */ + SSIBus *bus; + MemoryRegion iomem; + qemu_irq irq; + + uint32_t cs; + uint32_t clk; + uint32_t dlen; + uint32_t ltoh; + uint32_t dc; + + Fifo8 tx_fifo; + Fifo8 rx_fifo; +}; diff --git a/include/hw/sysbus.h b/include/hw/sysbus.h index 3564b7b6a22..3cb29a480eb 100644 --- a/include/hw/sysbus.h +++ b/include/hw/sysbus.h @@ -83,9 +83,6 @@ void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr); void sysbus_mmio_map_overlap(SysBusDevice *dev, int n, hwaddr addr, int priority); void sysbus_mmio_unmap(SysBusDevice *dev, int n); -void sysbus_add_io(SysBusDevice *dev, hwaddr addr, - MemoryRegion *mem); -MemoryRegion *sysbus_address_space(SysBusDevice *dev); bool sysbus_realize(SysBusDevice *dev, Error **errp); bool sysbus_realize_and_unref(SysBusDevice *dev, Error **errp); diff --git a/include/hw/timer/grlib_gptimer.h b/include/hw/timer/grlib_gptimer.h new file mode 100644 index 00000000000..e56f1b8bf34 --- /dev/null +++ b/include/hw/timer/grlib_gptimer.h @@ -0,0 +1,32 @@ +/* + * QEMU GRLIB GPTimer + * + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2024 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef GRLIB_GPTIMER_H +#define GRLIB_GPTIMER_H + +#define TYPE_GRLIB_GPTIMER "grlib-gptimer" + +#endif diff --git a/include/hw/timer/hpet.h b/include/hw/timer/hpet.h index f04c4d32387..d17a8d43199 100644 --- a/include/hw/timer/hpet.h +++ b/include/hw/timer/hpet.h @@ -78,6 +78,8 @@ extern struct hpet_fw_config hpet_cfg; #define TYPE_HPET "hpet" +#define HPET_INTCAP "hpet-intcap" + static inline bool hpet_find(void) { return object_resolve_path_type("", TYPE_HPET, NULL); diff --git a/include/hw/tricore/tricore_testdevice.h b/include/hw/tricore/tricore_testdevice.h index 8b4fe15f24a..2c57b62f222 100644 --- a/include/hw/tricore/tricore_testdevice.h +++ b/include/hw/tricore/tricore_testdevice.h @@ -25,12 +25,9 @@ OBJECT_CHECK(TriCoreTestDeviceState, (obj), TYPE_TRICORE_TESTDEVICE) typedef struct { - /* */ SysBusDevice parent_obj; - /* */ MemoryRegion iomem; - } TriCoreTestDeviceState; #endif diff --git a/include/hw/usb.h b/include/hw/usb.h index 32c23a5ca2a..d46d96779ad 100644 --- a/include/hw/usb.h +++ b/include/hw/usb.h @@ -30,6 +30,7 @@ #include "qemu/iov.h" #include "qemu/queue.h" #include "qom/object.h" +#include "qapi/error.h" /* Constants related to the USB / PCI interaction */ #define USB_SBRN 0x60 /* Serial Bus Release Number Register */ @@ -497,12 +498,8 @@ struct USBBusOps { void usb_bus_new(USBBus *bus, size_t bus_size, USBBusOps *ops, DeviceState *host); void usb_bus_release(USBBus *bus); -USBBus *usb_bus_find(int busnr); void usb_legacy_register(const char *typename, const char *usbdevice_name, USBDevice *(*usbdevice_init)(void)); -USBDevice *usb_new(const char *name); -bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **errp); -USBDevice *usb_create_simple(USBBus *bus, const char *name); USBDevice *usbdevice_create(const char *cmdline); void usb_register_port(USBBus *bus, USBPort *port, void *opaque, int index, USBPortOps *ops, int speedmask); @@ -582,4 +579,27 @@ void usb_pcap_init(FILE *fp); void usb_pcap_ctrl(USBPacket *p, bool setup); void usb_pcap_data(USBPacket *p, bool setup); +static inline USBDevice *usb_new(const char *name) +{ + return USB_DEVICE(qdev_new(name)); +} + +static inline USBDevice *usb_try_new(const char *name) +{ + return USB_DEVICE(qdev_try_new(name)); +} + +static inline bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **errp) +{ + return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); +} + +static inline USBDevice *usb_create_simple(USBBus *bus, const char *name) +{ + USBDevice *dev = usb_new(name); + + usb_realize_and_unref(dev, bus, &error_abort); + return dev; +} + #endif diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index a4a22accb94..b9da6c08ef4 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -30,6 +30,7 @@ #include #endif #include "sysemu/sysemu.h" +#include "hw/vfio/vfio-container-base.h" #define VFIO_MSG_PREFIX "vfio %s: " @@ -61,7 +62,7 @@ typedef struct VFIORegion { typedef struct VFIOMigration { struct VFIODevice *vbasedev; VMChangeStateEntry *vm_state; - Notifier migration_state; + NotifierWithReturn migration_state; uint32_t device_state; int data_fd; void *data_buffer; @@ -72,54 +73,15 @@ typedef struct VFIOMigration { bool initial_data_sent; } VFIOMigration; -typedef struct VFIOAddressSpace { - AddressSpace *as; - QLIST_HEAD(, VFIOContainer) containers; - QLIST_ENTRY(VFIOAddressSpace) list; -} VFIOAddressSpace; - struct VFIOGroup; typedef struct VFIOContainer { - VFIOAddressSpace *space; + VFIOContainerBase bcontainer; int fd; /* /dev/vfio/vfio, empowered by the attached groups */ - MemoryListener listener; - MemoryListener prereg_listener; unsigned iommu_type; - Error *error; - bool initialized; - bool dirty_pages_supported; - uint64_t dirty_pgsizes; - uint64_t max_dirty_bitmap_size; - unsigned long pgsizes; - unsigned int dma_max_mappings; - QLIST_HEAD(, VFIOGuestIOMMU) giommu_list; - QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list; QLIST_HEAD(, VFIOGroup) group_list; - QLIST_HEAD(, VFIORamDiscardListener) vrdl_list; - QLIST_ENTRY(VFIOContainer) next; - QLIST_HEAD(, VFIODevice) device_list; - GList *iova_ranges; } VFIOContainer; -typedef struct VFIOGuestIOMMU { - VFIOContainer *container; - IOMMUMemoryRegion *iommu_mr; - hwaddr iommu_offset; - IOMMUNotifier n; - QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; -} VFIOGuestIOMMU; - -typedef struct VFIORamDiscardListener { - VFIOContainer *container; - MemoryRegion *mr; - hwaddr offset_within_address_space; - hwaddr size; - uint64_t granularity; - RamDiscardListener listener; - QLIST_ENTRY(VFIORamDiscardListener) next; -} VFIORamDiscardListener; - typedef struct VFIOHostDMAWindow { hwaddr min_iova; hwaddr max_iova; @@ -127,6 +89,14 @@ typedef struct VFIOHostDMAWindow { QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next; } VFIOHostDMAWindow; +typedef struct IOMMUFDBackend IOMMUFDBackend; + +typedef struct VFIOIOMMUFDContainer { + VFIOContainerBase bcontainer; + IOMMUFDBackend *be; + uint32_t ioas_id; +} VFIOIOMMUFDContainer; + typedef struct VFIODeviceOps VFIODeviceOps; typedef struct VFIODevice { @@ -134,7 +104,7 @@ typedef struct VFIODevice { QLIST_ENTRY(VFIODevice) container_next; QLIST_ENTRY(VFIODevice) global_next; struct VFIOGroup *group; - VFIOContainer *container; + VFIOContainerBase *bcontainer; char *sysfsdev; char *name; DeviceState *dev; @@ -154,6 +124,8 @@ typedef struct VFIODevice { OnOffAuto pre_copy_dirty_page_tracking; bool dirty_pages_supported; bool dirty_tracking; + int devid; + IOMMUFDBackend *iommufd; } VFIODevice; struct VFIODeviceOps { @@ -201,31 +173,10 @@ typedef struct VFIODisplay { } dmabuf; } VFIODisplay; -typedef struct { - unsigned long *bitmap; - hwaddr size; - hwaddr pages; -} VFIOBitmap; - VFIOAddressSpace *vfio_get_address_space(AddressSpace *as); void vfio_put_address_space(VFIOAddressSpace *space); -bool vfio_devices_all_running_and_saving(VFIOContainer *container); - -/* container->fd */ -int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, - ram_addr_t size, IOMMUTLBEntry *iotlb); -int vfio_dma_map(VFIOContainer *container, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly); -int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start); -int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap, - hwaddr iova, hwaddr size); /* SPAPR specific */ -int vfio_container_add_section_window(VFIOContainer *container, - MemoryRegionSection *section, - Error **errp); -void vfio_container_del_section_window(VFIOContainer *container, - MemoryRegionSection *section); int vfio_spapr_container_init(VFIOContainer *container, Error **errp); void vfio_spapr_container_deinit(VFIOContainer *container); @@ -254,12 +205,14 @@ void vfio_detach_device(VFIODevice *vbasedev); int vfio_kvm_device_add_fd(int fd, Error **errp); int vfio_kvm_device_del_fd(int fd, Error **errp); +int vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp); +void vfio_cpr_unregister_container(VFIOContainerBase *bcontainer); + extern const MemoryRegionOps vfio_region_ops; typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList; typedef QLIST_HEAD(VFIODeviceList, VFIODevice) VFIODeviceList; extern VFIOGroupList vfio_group_list; extern VFIODeviceList vfio_device_list; - extern const MemoryListener vfio_memory_listener; extern int vfio_kvm_device_fd; @@ -292,11 +245,19 @@ bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp); void vfio_migration_exit(VFIODevice *vbasedev); int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size); -bool vfio_devices_all_running_and_mig_active(VFIOContainer *container); -bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container); -int vfio_devices_query_dirty_bitmap(VFIOContainer *container, +bool +vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer); +bool +vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer); +int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer, VFIOBitmap *vbmap, hwaddr iova, hwaddr size); -int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, - uint64_t size, ram_addr_t ram_addr); +int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova, + uint64_t size, ram_addr_t ram_addr); + +/* Returns 0 on success, or a negative errno. */ +int vfio_device_get_name(VFIODevice *vbasedev, Error **errp); +void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp); +void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, + DeviceState *dev, bool ram_discard); #endif /* HW_VFIO_VFIO_COMMON_H */ diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h new file mode 100644 index 00000000000..3582d5f97a3 --- /dev/null +++ b/include/hw/vfio/vfio-container-base.h @@ -0,0 +1,141 @@ +/* + * VFIO BASE CONTAINER + * + * Copyright (C) 2023 Intel Corporation. + * Copyright Red Hat, Inc. 2023 + * + * Authors: Yi Liu + * Eric Auger + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_VFIO_CONTAINER_BASE_H +#define HW_VFIO_VFIO_CONTAINER_BASE_H + +#include "exec/memory.h" + +typedef struct VFIODevice VFIODevice; +typedef struct VFIOIOMMUClass VFIOIOMMUClass; + +typedef struct { + unsigned long *bitmap; + hwaddr size; + hwaddr pages; +} VFIOBitmap; + +typedef struct VFIOAddressSpace { + AddressSpace *as; + QLIST_HEAD(, VFIOContainerBase) containers; + QLIST_ENTRY(VFIOAddressSpace) list; +} VFIOAddressSpace; + +/* + * This is the base object for vfio container backends + */ +typedef struct VFIOContainerBase { + const VFIOIOMMUClass *ops; + VFIOAddressSpace *space; + MemoryListener listener; + Error *error; + bool initialized; + uint64_t dirty_pgsizes; + uint64_t max_dirty_bitmap_size; + unsigned long pgsizes; + unsigned int dma_max_mappings; + bool dirty_pages_supported; + QLIST_HEAD(, VFIOGuestIOMMU) giommu_list; + QLIST_HEAD(, VFIORamDiscardListener) vrdl_list; + QLIST_ENTRY(VFIOContainerBase) next; + QLIST_HEAD(, VFIODevice) device_list; + GList *iova_ranges; + NotifierWithReturn cpr_reboot_notifier; +} VFIOContainerBase; + +typedef struct VFIOGuestIOMMU { + VFIOContainerBase *bcontainer; + IOMMUMemoryRegion *iommu_mr; + hwaddr iommu_offset; + IOMMUNotifier n; + QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; +} VFIOGuestIOMMU; + +typedef struct VFIORamDiscardListener { + VFIOContainerBase *bcontainer; + MemoryRegion *mr; + hwaddr offset_within_address_space; + hwaddr size; + uint64_t granularity; + RamDiscardListener listener; + QLIST_ENTRY(VFIORamDiscardListener) next; +} VFIORamDiscardListener; + +int vfio_container_dma_map(VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + void *vaddr, bool readonly); +int vfio_container_dma_unmap(VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb); +int vfio_container_add_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, + Error **errp); +void vfio_container_del_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section); +int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer, + bool start); +int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, + VFIOBitmap *vbmap, + hwaddr iova, hwaddr size); + +void vfio_container_init(VFIOContainerBase *bcontainer, + VFIOAddressSpace *space, + const VFIOIOMMUClass *ops); +void vfio_container_destroy(VFIOContainerBase *bcontainer); + + +#define TYPE_VFIO_IOMMU "vfio-iommu" +#define TYPE_VFIO_IOMMU_LEGACY TYPE_VFIO_IOMMU "-legacy" +#define TYPE_VFIO_IOMMU_SPAPR TYPE_VFIO_IOMMU "-spapr" +#define TYPE_VFIO_IOMMU_IOMMUFD TYPE_VFIO_IOMMU "-iommufd" + +/* + * VFIOContainerBase is not an abstract QOM object because it felt + * unnecessary to expose all the IOMMU backends to the QEMU machine + * and human interface. However, we can still abstract the IOMMU + * backend handlers using a QOM interface class. This provides more + * flexibility when referencing the various implementations. + */ +DECLARE_CLASS_CHECKERS(VFIOIOMMUClass, VFIO_IOMMU, TYPE_VFIO_IOMMU) + +struct VFIOIOMMUClass { + InterfaceClass parent_class; + + /* basic feature */ + int (*setup)(VFIOContainerBase *bcontainer, Error **errp); + int (*dma_map)(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + void *vaddr, bool readonly); + int (*dma_unmap)(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb); + int (*attach_device)(const char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp); + void (*detach_device)(VFIODevice *vbasedev); + /* migration feature */ + int (*set_dirty_page_tracking)(const VFIOContainerBase *bcontainer, + bool start); + int (*query_dirty_bitmap)(const VFIOContainerBase *bcontainer, + VFIOBitmap *vbmap, + hwaddr iova, hwaddr size); + /* PCI specific */ + int (*pci_hot_reset)(VFIODevice *vbasedev, bool single); + + /* SPAPR specific */ + int (*add_window)(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, + Error **errp); + void (*del_window)(VFIOContainerBase *bcontainer, + MemoryRegionSection *section); + void (*release)(VFIOContainerBase *bcontainer); +}; +#endif /* HW_VFIO_VFIO_CONTAINER_BASE_H */ diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index a86d103f824..70c2e8ffeee 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -45,6 +45,8 @@ struct vhost_memory; struct vhost_vring_file; struct vhost_vring_state; struct vhost_vring_addr; +struct vhost_vring_worker; +struct vhost_worker_state; struct vhost_scsi_target; struct vhost_iotlb_msg; struct vhost_virtqueue; @@ -85,6 +87,14 @@ typedef int (*vhost_set_vring_err_op)(struct vhost_dev *dev, struct vhost_vring_file *file); typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev, struct vhost_vring_state *r); +typedef int (*vhost_attach_vring_worker_op)(struct vhost_dev *dev, + struct vhost_vring_worker *worker); +typedef int (*vhost_get_vring_worker_op)(struct vhost_dev *dev, + struct vhost_vring_worker *worker); +typedef int (*vhost_new_worker_op)(struct vhost_dev *dev, + struct vhost_worker_state *worker); +typedef int (*vhost_free_worker_op)(struct vhost_dev *dev, + struct vhost_worker_state *worker); typedef int (*vhost_set_features_op)(struct vhost_dev *dev, uint64_t features); typedef int (*vhost_get_features_op)(struct vhost_dev *dev, @@ -172,6 +182,10 @@ typedef struct VhostOps { vhost_set_vring_call_op vhost_set_vring_call; vhost_set_vring_err_op vhost_set_vring_err; vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout; + vhost_new_worker_op vhost_new_worker; + vhost_free_worker_op vhost_free_worker; + vhost_get_vring_worker_op vhost_get_vring_worker; + vhost_attach_vring_worker_op vhost_attach_vring_worker; vhost_set_features_op vhost_set_features; vhost_get_features_op vhost_get_features; vhost_set_backend_cap_op vhost_set_backend_cap; diff --git a/include/hw/virtio/vhost-user-device.h b/include/hw/virtio/vhost-user-base.h similarity index 71% rename from include/hw/virtio/vhost-user-device.h rename to include/hw/virtio/vhost-user-base.h index 3ddf88a146a..51d0968b893 100644 --- a/include/hw/virtio/vhost-user-device.h +++ b/include/hw/virtio/vhost-user-base.h @@ -6,8 +6,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef QEMU_VHOST_USER_DEVICE_H -#define QEMU_VHOST_USER_DEVICE_H +#ifndef QEMU_VHOST_USER_BASE_H +#define QEMU_VHOST_USER_BASE_H #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user.h" @@ -17,11 +17,13 @@ OBJECT_DECLARE_TYPE(VHostUserBase, VHostUserBaseClass, VHOST_USER_BASE) struct VHostUserBase { - VirtIODevice parent; + VirtIODevice parent_obj; + /* Properties */ CharBackend chardev; uint16_t virtio_id; uint32_t num_vqs; + uint32_t vq_size; /* can't exceed VIRTIO_QUEUE_MAX */ uint32_t config_size; /* State tracking */ VhostUserState vhost_user; @@ -31,16 +33,17 @@ struct VHostUserBase { bool connected; }; - /* needed so we can use the base realize after specialisation - tweaks */ +/* + * Needed so we can use the base realize after specialisation + * tweaks + */ struct VHostUserBaseClass { - /*< private >*/ VirtioDeviceClass parent_class; - /*< public >*/ + DeviceRealize parent_realize; }; -/* shared for the benefit of the derived pci class */ + #define TYPE_VHOST_USER_DEVICE "vhost-user-device" -#endif /* QEMU_VHOST_USER_DEVICE_H */ +#endif /* QEMU_VHOST_USER_BASE_H */ diff --git a/include/hw/virtio/vhost-user-gpio.h b/include/hw/virtio/vhost-user-gpio.h index a9d3f9b049f..5814a8400a0 100644 --- a/include/hw/virtio/vhost-user-gpio.h +++ b/include/hw/virtio/vhost-user-gpio.h @@ -12,34 +12,13 @@ #include "hw/virtio/virtio.h" #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user.h" -#include "standard-headers/linux/virtio_gpio.h" -#include "chardev/char-fe.h" +#include "hw/virtio/vhost-user-base.h" #define TYPE_VHOST_USER_GPIO "vhost-user-gpio-device" OBJECT_DECLARE_SIMPLE_TYPE(VHostUserGPIO, VHOST_USER_GPIO); struct VHostUserGPIO { - /*< private >*/ - VirtIODevice parent_obj; - CharBackend chardev; - struct virtio_gpio_config config; - struct vhost_virtqueue *vhost_vqs; - struct vhost_dev vhost_dev; - VhostUserState vhost_user; - VirtQueue *command_vq; - VirtQueue *interrupt_vq; - /** - * There are at least two steps of initialization of the - * vhost-user device. The first is a "connect" step and - * second is a "start" step. Make a separation between - * those initialization phases by using two fields. - * - * @connected: see vu_gpio_connect()/vu_gpio_disconnect() - * @started_vu: see vu_gpio_start()/vu_gpio_stop() - */ - bool connected; - bool started_vu; - /*< public >*/ + VHostUserBase parent_obj; }; #endif /* _QEMU_VHOST_USER_GPIO_H */ diff --git a/include/hw/virtio/vhost-user-i2c.h b/include/hw/virtio/vhost-user-i2c.h index 0f7acd40e3a..a9b5612ad01 100644 --- a/include/hw/virtio/vhost-user-i2c.h +++ b/include/hw/virtio/vhost-user-i2c.h @@ -9,23 +9,17 @@ #ifndef QEMU_VHOST_USER_I2C_H #define QEMU_VHOST_USER_I2C_H +#include "hw/virtio/virtio.h" #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user.h" +#include "hw/virtio/vhost-user-base.h" #define TYPE_VHOST_USER_I2C "vhost-user-i2c-device" + OBJECT_DECLARE_SIMPLE_TYPE(VHostUserI2C, VHOST_USER_I2C) struct VHostUserI2C { - VirtIODevice parent; - CharBackend chardev; - struct vhost_virtqueue *vhost_vq; - struct vhost_dev vhost_dev; - VhostUserState vhost_user; - VirtQueue *vq; - bool connected; + VHostUserBase parent_obj; }; -/* Virtio Feature bits */ -#define VIRTIO_I2C_F_ZERO_LENGTH_REQUEST 0 - #endif /* QEMU_VHOST_USER_I2C_H */ diff --git a/include/hw/virtio/vhost-user-rng.h b/include/hw/virtio/vhost-user-rng.h index ddd9f01eea6..10868c7de4d 100644 --- a/include/hw/virtio/vhost-user-rng.h +++ b/include/hw/virtio/vhost-user-rng.h @@ -12,22 +12,13 @@ #include "hw/virtio/virtio.h" #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user.h" -#include "chardev/char-fe.h" +#include "hw/virtio/vhost-user-base.h" #define TYPE_VHOST_USER_RNG "vhost-user-rng" OBJECT_DECLARE_SIMPLE_TYPE(VHostUserRNG, VHOST_USER_RNG) struct VHostUserRNG { - /*< private >*/ - VirtIODevice parent; - CharBackend chardev; - struct vhost_virtqueue *vhost_vq; - struct vhost_dev vhost_dev; - VhostUserState vhost_user; - VirtQueue *req_vq; - bool connected; - - /*< public >*/ + VHostUserBase parent_obj; }; #endif /* QEMU_VHOST_USER_RNG_H */ diff --git a/include/hw/virtio/vhost-user-snd.h b/include/hw/virtio/vhost-user-snd.h new file mode 100644 index 00000000000..f9260116a73 --- /dev/null +++ b/include/hw/virtio/vhost-user-snd.h @@ -0,0 +1,24 @@ +/* + * Vhost-user Sound virtio device + * + * Copyright (c) 2021 Mathieu Poirier + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef QEMU_VHOST_USER_SND_H +#define QEMU_VHOST_USER_SND_H + +#include "hw/virtio/virtio.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-user.h" +#include "hw/virtio/vhost-user-base.h" + +#define TYPE_VHOST_USER_SND "vhost-user-snd" +OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSound, VHOST_USER_SND) + +struct VHostUserSound { + VHostUserBase parent_obj; +}; + +#endif /* QEMU_VHOST_USER_SND_H */ diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h index 5407d54fd79..0a9575b469b 100644 --- a/include/hw/virtio/vhost-vdpa.h +++ b/include/hw/virtio/vhost-vdpa.h @@ -30,42 +30,61 @@ typedef struct VhostVDPAHostNotifier { void *addr; } VhostVDPAHostNotifier; -typedef struct vhost_vdpa { +typedef enum SVQTransitionState { + SVQ_TSTATE_DISABLING = -1, + SVQ_TSTATE_DONE, + SVQ_TSTATE_ENABLING +} SVQTransitionState; + +/* Info shared by all vhost_vdpa device models */ +typedef struct vhost_vdpa_shared { int device_fd; - int index; - uint32_t msg_type; - bool iotlb_batch_begin_sent; - uint32_t address_space_id; MemoryListener listener; struct vhost_vdpa_iova_range iova_range; - uint64_t acked_features; - bool shadow_vqs_enabled; + QLIST_HEAD(, vdpa_iommu) iommu_list; + + /* IOVA mapping used by the Shadow Virtqueue */ + VhostIOVATree *iova_tree; + + /* Copy of backend features */ + uint64_t backend_cap; + + bool iotlb_batch_begin_sent; + /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */ bool shadow_data; + + /* SVQ switching is in progress, or already completed? */ + SVQTransitionState svq_switching; +} VhostVDPAShared; + +typedef struct vhost_vdpa { + int index; + uint32_t address_space_id; + uint64_t acked_features; + bool shadow_vqs_enabled; /* Device suspended successfully */ bool suspended; - /* IOVA mapping used by the Shadow Virtqueue */ - VhostIOVATree *iova_tree; + VhostVDPAShared *shared; GPtrArray *shadow_vqs; const VhostShadowVirtqueueOps *shadow_vq_ops; void *shadow_vq_ops_opaque; struct vhost_dev *dev; Error *migration_blocker; VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; - QLIST_HEAD(, vdpa_iommu) iommu_list; IOMMUNotifier n; } VhostVDPA; int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range); int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx); -int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, +int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova, hwaddr size, void *vaddr, bool readonly); -int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, +int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova, hwaddr size); typedef struct vdpa_iommu { - struct vhost_vdpa *dev; + VhostVDPAShared *dev_shared; IOMMUMemoryRegion *iommu_mr; hwaddr iommu_offset; IOMMUNotifier n; diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h index 93c782101dd..75a74e8a995 100644 --- a/include/hw/virtio/vhost-vsock-common.h +++ b/include/hw/virtio/vhost-vsock-common.h @@ -11,6 +11,7 @@ #ifndef QEMU_VHOST_VSOCK_COMMON_H #define QEMU_VHOST_VSOCK_COMMON_H +#include "qapi/qapi-types-common.h" #include "hw/virtio/virtio.h" #include "hw/virtio/vhost.h" #include "qom/object.h" diff --git a/include/hw/virtio/virtio-acpi.h b/include/hw/virtio/virtio-acpi.h new file mode 100644 index 00000000000..cace2a315f4 --- /dev/null +++ b/include/hw/virtio/virtio-acpi.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ACPI support for virtio + */ + +#ifndef VIRTIO_ACPI_H +#define VIRTIO_ACPI_H + +#include "exec/hwaddr.h" + +void virtio_acpi_dsdt_add(Aml *scope, const hwaddr virtio_mmio_base, + const hwaddr virtio_mmio_size, uint32_t mmio_irq, + long int start_index, int num); + +#endif diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h index dafec432ce0..5c14110c4b1 100644 --- a/include/hw/virtio/virtio-blk.h +++ b/include/hw/virtio/virtio-blk.h @@ -21,6 +21,7 @@ #include "sysemu/block-backend.h" #include "sysemu/block-ram-registrar.h" #include "qom/object.h" +#include "qapi/qapi-types-virtio.h" #define TYPE_VIRTIO_BLK "virtio-blk-device" OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK) @@ -37,6 +38,7 @@ struct VirtIOBlkConf { BlockConf conf; IOThread *iothread; + IOThreadVirtQueueMappingList *iothread_vq_mapping_list; char *serial; uint32_t request_merging; uint16_t num_queues; @@ -48,20 +50,27 @@ struct VirtIOBlkConf bool x_enable_wce_if_config_wce; }; -struct VirtIOBlockDataPlane; - struct VirtIOBlockReq; struct VirtIOBlock { VirtIODevice parent_obj; BlockBackend *blk; - void *rq; + QemuMutex rq_lock; + struct VirtIOBlockReq *rq; /* protected by rq_lock */ VirtIOBlkConf conf; unsigned short sector_mask; bool original_wce; VMChangeStateEntry *change; - bool dataplane_disabled; - bool dataplane_started; - struct VirtIOBlockDataPlane *dataplane; + bool ioeventfd_disabled; + bool ioeventfd_started; + bool ioeventfd_starting; + bool ioeventfd_stopping; + + /* + * The AioContext for each virtqueue. The BlockDriverState will use the + * first element as its AioContext. + */ + AioContext **vq_aio_context; + uint64_t host_features; size_t config_size; BlockRAMRegistrar blk_ram_registrar; diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 584ba2ed734..ed44cdad6b3 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -81,6 +81,7 @@ struct virtio_gpu_scanout { uint32_t resource_id; struct virtio_gpu_update_cursor cursor; QEMUCursor *current_cursor; + struct virtio_gpu_framebuffer fb; }; struct virtio_gpu_requested_state { @@ -219,6 +220,9 @@ struct VirtIOGPUClass { void (*update_cursor_data)(VirtIOGPU *g, struct virtio_gpu_scanout *s, uint32_t resource_id); + void (*resource_destroy)(VirtIOGPU *g, + struct virtio_gpu_simple_resource *res, + Error **errp); }; struct VirtIOGPUGL { diff --git a/include/hw/virtio/virtio-input.h b/include/hw/virtio/virtio-input.h index a6c97036440..e69c0aeca38 100644 --- a/include/hw/virtio/virtio-input.h +++ b/include/hw/virtio/virtio-input.h @@ -1,6 +1,8 @@ #ifndef QEMU_VIRTIO_INPUT_H #define QEMU_VIRTIO_INPUT_H +#include "hw/virtio/vhost-user.h" +#include "hw/virtio/vhost-user-base.h" #include "ui/input.h" #include "sysemu/vhost-user-backend.h" @@ -97,9 +99,7 @@ struct VirtIOInputHost { }; struct VHostUserInput { - VirtIOInput parent_obj; - - VhostUserBackend *vhost; + VHostUserBase parent_obj; }; void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event); diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h index 781ebaea8fc..83a52cc446d 100644 --- a/include/hw/virtio/virtio-iommu.h +++ b/include/hw/virtio/virtio-iommu.h @@ -24,6 +24,7 @@ #include "hw/virtio/virtio.h" #include "hw/pci/pci.h" #include "qom/object.h" +#include "qapi/qapi-types-virtio.h" #define TYPE_VIRTIO_IOMMU "virtio-iommu-device" #define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-pci" @@ -66,6 +67,8 @@ struct VirtIOIOMMU { bool boot_bypass; Notifier machine_done; bool granule_frozen; + GranuleMode granule_mode; + uint8_t aw_bits; }; #endif diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h index 55977f01f0f..060c23c04d2 100644 --- a/include/hw/virtio/virtio-net.h +++ b/include/hw/virtio/virtio-net.h @@ -221,10 +221,12 @@ struct VirtIONet { DeviceListener primary_listener; QDict *primary_opts; bool primary_opts_from_json; - Notifier migration_state; + NotifierWithReturn migration_state; VirtioNetRssData rss_data; struct NetRxPkt *rx_pkt; struct EBPFRSSContext ebpf_rss; + uint32_t nr_ebpf_rss_fds; + char **ebpf_rss_fds; }; size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev, diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h index 779568ab5d2..7be01059185 100644 --- a/include/hw/virtio/virtio-scsi.h +++ b/include/hw/virtio/virtio-scsi.h @@ -51,6 +51,7 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig; struct VirtIOSCSIConf { uint32_t num_queues; uint32_t virtqueue_size; + bool worker_per_virtqueue; bool seg_max_adjust; uint32_t max_sectors; uint32_t cmd_per_lun; @@ -85,8 +86,9 @@ struct VirtIOSCSI { /* * TMFs deferred to main loop BH. These fields are protected by - * virtio_scsi_acquire(). + * tmf_bh_lock. */ + QemuMutex tmf_bh_lock; QEMUBH *tmf_bh; QTAILQ_HEAD(, VirtIOSCSIReq) tmf_bh_list; @@ -100,20 +102,6 @@ struct VirtIOSCSI { uint32_t host_features; }; -static inline void virtio_scsi_acquire(VirtIOSCSI *s) -{ - if (s->ctx) { - aio_context_acquire(s->ctx); - } -} - -static inline void virtio_scsi_release(VirtIOSCSI *s) -{ - if (s->ctx) { - aio_context_release(s->ctx); - } -} - void virtio_scsi_common_realize(DeviceState *dev, VirtIOHandleOutput ctrl, VirtIOHandleOutput evt, diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index c8f72850bc0..7d5ffdc145b 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -22,6 +22,7 @@ #include "standard-headers/linux/virtio_config.h" #include "standard-headers/linux/virtio_ring.h" #include "qom/object.h" +#include "block/aio.h" /* * A guest should never accept this. It implies negotiation is broken @@ -508,4 +509,10 @@ static inline bool virtio_device_disabled(VirtIODevice *vdev) bool virtio_legacy_allowed(VirtIODevice *vdev); bool virtio_legacy_check_disabled(VirtIODevice *vdev); +QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev, + QEMUBHFunc *cb, void *opaque, + const char *name); +#define virtio_bh_new_guarded(dev, cb, opaque) \ + virtio_bh_new_guarded_full((dev), (cb), (opaque), (stringify(cb))) + #endif diff --git a/include/hw/xen/xen-bus.h b/include/hw/xen/xen-bus.h index 334ddd1ff67..38d40afa379 100644 --- a/include/hw/xen/xen-bus.h +++ b/include/hw/xen/xen-bus.h @@ -75,7 +75,7 @@ struct XenBusClass { OBJECT_DECLARE_TYPE(XenBus, XenBusClass, XEN_BUS) -BusState *xen_bus_init(void); +void xen_bus_init(void); void xen_device_backend_set_state(XenDevice *xendev, enum xenbus_state state); diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h index 4e9904f1a65..65a51aac2e0 100644 --- a/include/hw/xen/xen-hvm-common.h +++ b/include/hw/xen/xen-hvm-common.h @@ -1,7 +1,6 @@ #ifndef HW_XEN_HVM_COMMON_H #define HW_XEN_HVM_COMMON_H -#include "qemu/osdep.h" #include "qemu/units.h" #include "cpu.h" @@ -16,7 +15,7 @@ #include "qemu/error-report.h" #include -extern MemoryRegion ram_memory; +extern MemoryRegion xen_memory; extern MemoryListener xen_io_listener; extern DeviceListener xen_device_listener; diff --git a/include/hw/xen/xen-legacy-backend.h b/include/hw/xen/xen-legacy-backend.h index fc42146bc2a..2cca1747786 100644 --- a/include/hw/xen/xen-legacy-backend.h +++ b/include/hw/xen/xen-legacy-backend.h @@ -81,7 +81,6 @@ extern struct XenDevOps xen_usb_ops; /* xen-usb.c */ /* configuration (aka xenbus setup) */ void xen_config_cleanup(void); -int xen_config_dev_nic(NICInfo *nic); int xen_config_dev_vfb(int vdev, const char *type); int xen_config_dev_vkbd(int vdev); int xen_config_dev_console(int vdev); diff --git a/include/hw/xen/xen_igd.h b/include/hw/xen/xen_igd.h new file mode 100644 index 00000000000..7ffca06c109 --- /dev/null +++ b/include/hw/xen/xen_igd.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2007, Neocleus Corporation. + * Copyright (c) 2007, Intel Corporation. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * Alex Novik + * Allen Kay + * Guy Zana + */ +#ifndef XEN_IGD_H +#define XEN_IGD_H + +#include "hw/xen/xen-host-pci-device.h" + +typedef struct XenPCIPassthroughState XenPCIPassthroughState; + +bool xen_igd_gfx_pt_enabled(void); +void xen_igd_gfx_pt_set(bool value, Error **errp); + +uint32_t igd_read_opregion(XenPCIPassthroughState *s); +void xen_igd_reserve_slot(PCIBus *pci_bus); +void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val); +void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s, + XenHostPCIDevice *dev); + +static inline bool is_igd_vga_passthrough(XenHostPCIDevice *dev) +{ + return (xen_igd_gfx_pt_enabled() + && ((dev->class_code >> 0x8) == PCI_CLASS_DISPLAY_VGA)); +} + +#endif diff --git a/include/io/channel-file.h b/include/io/channel-file.h index 50e8eb11386..d373a4e44d9 100644 --- a/include/io/channel-file.h +++ b/include/io/channel-file.h @@ -68,6 +68,24 @@ struct QIOChannelFile { QIOChannelFile * qio_channel_file_new_fd(int fd); +/** + * qio_channel_file_new_dupfd: + * @fd: the file descriptor + * @errp: pointer to initialized error object + * + * Create a new IO channel object for a file represented by the @fd + * parameter. Like qio_channel_file_new_fd(), but the @fd is first + * duplicated with dup(). + * + * The channel will own the duplicated file descriptor and will take + * responsibility for closing it, the original FD is owned by the + * caller. + * + * Returns: the new channel object + */ +QIOChannelFile * +qio_channel_file_new_dupfd(int fd, Error **errp); + /** * qio_channel_file_new_path: * @path: the file path diff --git a/include/io/channel.h b/include/io/channel.h index 5f9dbaab65b..7986c49c713 100644 --- a/include/io/channel.h +++ b/include/io/channel.h @@ -44,6 +44,7 @@ enum QIOChannelFeature { QIO_CHANNEL_FEATURE_LISTEN, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY, QIO_CHANNEL_FEATURE_READ_MSG_PEEK, + QIO_CHANNEL_FEATURE_SEEKABLE, }; @@ -130,6 +131,16 @@ struct QIOChannelClass { Error **errp); /* Optional callbacks */ + ssize_t (*io_pwritev)(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + off_t offset, + Error **errp); + ssize_t (*io_preadv)(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + off_t offset, + Error **errp); int (*io_shutdown)(QIOChannel *ioc, QIOChannelShutdown how, Error **errp); @@ -528,6 +539,78 @@ void qio_channel_set_follow_coroutine_ctx(QIOChannel *ioc, bool enabled); int qio_channel_close(QIOChannel *ioc, Error **errp); +/** + * qio_channel_pwritev + * @ioc: the channel object + * @iov: the array of memory regions to write data from + * @niov: the length of the @iov array + * @offset: offset in the channel where writes should begin + * @errp: pointer to a NULL-initialized error object + * + * Not all implementations will support this facility, so may report + * an error. To avoid errors, the caller may check for the feature + * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method. + * + * Behaves as qio_channel_writev_full, apart from not supporting + * sending of file handles as well as beginning the write at the + * passed @offset + * + */ +ssize_t qio_channel_pwritev(QIOChannel *ioc, const struct iovec *iov, + size_t niov, off_t offset, Error **errp); + +/** + * qio_channel_pwrite + * @ioc: the channel object + * @buf: the memory region to write data into + * @buflen: the number of bytes to @buf + * @offset: offset in the channel where writes should begin + * @errp: pointer to a NULL-initialized error object + * + * Not all implementations will support this facility, so may report + * an error. To avoid errors, the caller may check for the feature + * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method. + * + */ +ssize_t qio_channel_pwrite(QIOChannel *ioc, char *buf, size_t buflen, + off_t offset, Error **errp); + +/** + * qio_channel_preadv + * @ioc: the channel object + * @iov: the array of memory regions to read data into + * @niov: the length of the @iov array + * @offset: offset in the channel where writes should begin + * @errp: pointer to a NULL-initialized error object + * + * Not all implementations will support this facility, so may report + * an error. To avoid errors, the caller may check for the feature + * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method. + * + * Behaves as qio_channel_readv_full, apart from not supporting + * receiving of file handles as well as beginning the read at the + * passed @offset + * + */ +ssize_t qio_channel_preadv(QIOChannel *ioc, const struct iovec *iov, + size_t niov, off_t offset, Error **errp); + +/** + * qio_channel_pread + * @ioc: the channel object + * @buf: the memory region to write data into + * @buflen: the number of bytes to @buf + * @offset: offset in the channel where writes should begin + * @errp: pointer to a NULL-initialized error object + * + * Not all implementations will support this facility, so may report + * an error. To avoid errors, the caller may check for the feature + * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method. + * + */ +ssize_t qio_channel_pread(QIOChannel *ioc, char *buf, size_t buflen, + off_t offset, Error **errp); + /** * qio_channel_shutdown: * @ioc: the channel object diff --git a/include/io/task.h b/include/io/task.h index dc7d32ebd07..0b5342ee843 100644 --- a/include/io/task.h +++ b/include/io/task.h @@ -149,7 +149,7 @@ typedef void (*QIOTaskWorker)(QIOTask *task, * lookups) to be easily run non-blocking. Reporting the * results in the main thread context means that the caller * typically does not need to be concerned about thread - * safety wrt the QEMU global mutex. + * safety wrt the BQL. * * For example, the socket_listen() method will block the caller * while DNS lookups take place if given a name, instead of IP diff --git a/include/migration/client-options.h b/include/migration/client-options.h new file mode 100644 index 00000000000..59f4b55cf4f --- /dev/null +++ b/include/migration/client-options.h @@ -0,0 +1,25 @@ +/* + * QEMU public migration capabilities + * + * Copyright (c) 2012-2023 Red Hat Inc + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_MIGRATION_CLIENT_OPTIONS_H +#define QEMU_MIGRATION_CLIENT_OPTIONS_H + +/* capabilities */ + +bool migrate_background_snapshot(void); +bool migrate_dirty_limit(void); +bool migrate_postcopy_ram(void); +bool migrate_switchover_ack(void); + +/* parameters */ + +MigMode migrate_mode(void); +uint64_t migrate_vcpu_dirty_limit_period(void); + +#endif diff --git a/include/migration/misc.h b/include/migration/misc.h index 1bc8902e6d2..c9e200f4eb8 100644 --- a/include/migration/misc.h +++ b/include/migration/misc.h @@ -17,6 +17,7 @@ #include "qemu/notify.h" #include "qapi/qapi-types-migration.h" #include "qapi/qapi-types-net.h" +#include "migration/client-options.h" /* migration/ram.c */ @@ -31,7 +32,6 @@ typedef enum PrecopyNotifyReason { typedef struct PrecopyNotifyData { enum PrecopyNotifyReason reason; - Error **errp; } PrecopyNotifyData; void precopy_infrastructure_init(void); @@ -60,20 +60,57 @@ void dump_vmstate_json_to_file(FILE *out_fp); void migration_object_init(void); void migration_shutdown(void); bool migration_is_idle(void); -bool migration_is_active(MigrationState *); -void migration_add_notifier(Notifier *notify, - void (*func)(Notifier *notifier, void *data)); -void migration_remove_notifier(Notifier *notify); -void migration_call_notifiers(MigrationState *s); -bool migration_in_setup(MigrationState *); -bool migration_has_finished(MigrationState *); -bool migration_has_failed(MigrationState *); -/* ...and after the device transmission */ -bool migration_in_postcopy_after_devices(MigrationState *); +bool migration_is_active(void); +bool migration_is_device(void); +bool migration_thread_is_self(void); +bool migration_is_setup_or_active(void); + +typedef enum MigrationEventType { + MIG_EVENT_PRECOPY_SETUP, + MIG_EVENT_PRECOPY_DONE, + MIG_EVENT_PRECOPY_FAILED, + MIG_EVENT_MAX +} MigrationEventType; + +typedef struct MigrationEvent { + MigrationEventType type; +} MigrationEvent; + +/* + * A MigrationNotifyFunc may return an error code and an Error object, + * but only when @e->type is MIG_EVENT_PRECOPY_SETUP. The code is an int + * to allow for different failure modes and recovery actions. + */ +typedef int (*MigrationNotifyFunc)(NotifierWithReturn *notify, + MigrationEvent *e, Error **errp); + +/* + * Register the notifier @notify to be called when a migration event occurs + * for MIG_MODE_NORMAL, as specified by the MigrationEvent passed to @func. + * Notifiers may receive events in any of the following orders: + * - MIG_EVENT_PRECOPY_SETUP -> MIG_EVENT_PRECOPY_DONE + * - MIG_EVENT_PRECOPY_SETUP -> MIG_EVENT_PRECOPY_FAILED + * - MIG_EVENT_PRECOPY_FAILED + */ +void migration_add_notifier(NotifierWithReturn *notify, + MigrationNotifyFunc func); + +/* + * Same as migration_add_notifier, but applies to be specified @mode. + */ +void migration_add_notifier_mode(NotifierWithReturn *notify, + MigrationNotifyFunc func, MigMode mode); + +void migration_remove_notifier(NotifierWithReturn *notify); +bool migration_is_running(void); +void migration_file_set_error(int err); + /* True if incoming migration entered POSTCOPY_INCOMING_DISCARD */ bool migration_in_incoming_postcopy(void); + /* True if incoming migration entered POSTCOPY_INCOMING_ADVISE */ bool migration_incoming_postcopy_advised(void); + /* True if background snapshot is active */ bool migration_in_bg_snapshot(void); diff --git a/include/migration/qemu-file-types.h b/include/migration/qemu-file-types.h index 9ba163f333e..adec5abc071 100644 --- a/include/migration/qemu-file-types.h +++ b/include/migration/qemu-file-types.h @@ -50,6 +50,8 @@ unsigned int qemu_get_be16(QEMUFile *f); unsigned int qemu_get_be32(QEMUFile *f); uint64_t qemu_get_be64(QEMUFile *f); +bool qemu_file_is_seekable(QEMUFile *f); + static inline void qemu_put_be64s(QEMUFile *f, const uint64_t *pv) { qemu_put_be64(f, *pv); diff --git a/include/migration/register.h b/include/migration/register.h index fed1d04a3c3..d7b70a8be68 100644 --- a/include/migration/register.h +++ b/include/migration/register.h @@ -16,76 +16,287 @@ #include "hw/vmstate-if.h" +/** + * struct SaveVMHandlers: handler structure to finely control + * migration of complex subsystems and devices, such as RAM, block and + * VFIO. + */ typedef struct SaveVMHandlers { - /* This runs inside the iothread lock. */ - SaveStateHandler *save_state; - /* - * save_prepare is called early, even before migration starts, and can be - * used to perform early checks. + /* The following handlers run inside the BQL. */ + + /** + * @save_state + * + * Saves state section on the source using the latest state format + * version. + * + * Legacy method. Should be deprecated when all users are ported + * to VMStateDescription. + * + * @f: QEMUFile where to send the data + * @opaque: data pointer passed to register_savevm_live() + */ + void (*save_state)(QEMUFile *f, void *opaque); + + /** + * @save_prepare + * + * Called early, even before migration starts, and can be used to + * perform early checks. + * + * @opaque: data pointer passed to register_savevm_live() + * @errp: pointer to Error*, to store an error if it happens. + * + * Returns zero to indicate success and negative for error */ int (*save_prepare)(void *opaque, Error **errp); + + /** + * @save_setup + * + * Initializes the data structures on the source and transmits + * first section containing information on the device + * + * @f: QEMUFile where to send the data + * @opaque: data pointer passed to register_savevm_live() + * + * Returns zero to indicate success and negative for error + */ int (*save_setup)(QEMUFile *f, void *opaque); + + /** + * @save_cleanup + * + * Uninitializes the data structures on the source + * + * @opaque: data pointer passed to register_savevm_live() + */ void (*save_cleanup)(void *opaque); + + /** + * @save_live_complete_postcopy + * + * Called at the end of postcopy for all postcopyable devices. + * + * @f: QEMUFile where to send the data + * @opaque: data pointer passed to register_savevm_live() + * + * Returns zero to indicate success and negative for error + */ int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque); + + /** + * @save_live_complete_precopy + * + * Transmits the last section for the device containing any + * remaining data at the end of a precopy phase. When postcopy is + * enabled, devices that support postcopy will skip this step, + * where the final data will be flushed at the end of postcopy via + * @save_live_complete_postcopy instead. + * + * @f: QEMUFile where to send the data + * @opaque: data pointer passed to register_savevm_live() + * + * Returns zero to indicate success and negative for error + */ int (*save_live_complete_precopy)(QEMUFile *f, void *opaque); - /* This runs both outside and inside the iothread lock. */ + /* This runs both outside and inside the BQL. */ + + /** + * @is_active + * + * Will skip a state section if not active + * + * @opaque: data pointer passed to register_savevm_live() + * + * Returns true if state section is active else false + */ bool (*is_active)(void *opaque); + + /** + * @has_postcopy + * + * Checks if a device supports postcopy + * + * @opaque: data pointer passed to register_savevm_live() + * + * Returns true for postcopy support else false + */ bool (*has_postcopy)(void *opaque); - /* is_active_iterate - * If it is not NULL then qemu_savevm_state_iterate will skip iteration if - * it returns false. For example, it is needed for only-postcopy-states, - * which needs to be handled by qemu_savevm_state_setup and - * qemu_savevm_state_pending, but do not need iterations until not in - * postcopy stage. + /** + * @is_active_iterate + * + * As #SaveVMHandlers.is_active(), will skip an inactive state + * section in qemu_savevm_state_iterate. + * + * For example, it is needed for only-postcopy-states, which needs + * to be handled by qemu_savevm_state_setup() and + * qemu_savevm_state_pending(), but do not need iterations until + * not in postcopy stage. + * + * @opaque: data pointer passed to register_savevm_live() + * + * Returns true if state section is active else false */ bool (*is_active_iterate)(void *opaque); - /* This runs outside the iothread lock in the migration case, and + /* This runs outside the BQL in the migration case, and * within the lock in the savevm case. The callback had better only * use data that is local to the migration thread or protected * by other locks. */ + + /** + * @save_live_iterate + * + * Should send a chunk of data until the point that stream + * bandwidth limits tell it to stop. Each call generates one + * section. + * + * @f: QEMUFile where to send the data + * @opaque: data pointer passed to register_savevm_live() + * + * Returns 0 to indicate that there is still more data to send, + * 1 that there is no more data to send and + * negative to indicate an error. + */ int (*save_live_iterate)(QEMUFile *f, void *opaque); - /* This runs outside the iothread lock! */ - /* Note for save_live_pending: - * must_precopy: - * - must be migrated in precopy or in stopped state - * - i.e. must be migrated before target start + /* This runs outside the BQL! */ + + /** + * @state_pending_estimate * - * can_postcopy: - * - can migrate in postcopy or in stopped state - * - i.e. can migrate after target start - * - some can also be migrated during precopy (RAM) - * - some must be migrated after source stops (block-dirty-bitmap) + * This estimates the remaining data to transfer * - * Sum of can_postcopy and must_postcopy is the whole amount of + * Sum of @can_postcopy and @must_postcopy is the whole amount of * pending data. + * + * @opaque: data pointer passed to register_savevm_live() + * @must_precopy: amount of data that must be migrated in precopy + * or in stopped state, i.e. that must be migrated + * before target start. + * @can_postcopy: amount of data that can be migrated in postcopy + * or in stopped state, i.e. after target start. + * Some can also be migrated during precopy (RAM). + * Some must be migrated after source stops + * (block-dirty-bitmap) */ - /* This estimates the remaining data to transfer */ void (*state_pending_estimate)(void *opaque, uint64_t *must_precopy, uint64_t *can_postcopy); - /* This calculate the exact remaining data to transfer */ + + /** + * @state_pending_exact + * + * This calculates the exact remaining data to transfer + * + * Sum of @can_postcopy and @must_postcopy is the whole amount of + * pending data. + * + * @opaque: data pointer passed to register_savevm_live() + * @must_precopy: amount of data that must be migrated in precopy + * or in stopped state, i.e. that must be migrated + * before target start. + * @can_postcopy: amount of data that can be migrated in postcopy + * or in stopped state, i.e. after target start. + * Some can also be migrated during precopy (RAM). + * Some must be migrated after source stops + * (block-dirty-bitmap) + */ void (*state_pending_exact)(void *opaque, uint64_t *must_precopy, uint64_t *can_postcopy); - LoadStateHandler *load_state; + + /** + * @load_state + * + * Load sections generated by any of the save functions that + * generate sections. + * + * Legacy method. Should be deprecated when all users are ported + * to VMStateDescription. + * + * @f: QEMUFile where to receive the data + * @opaque: data pointer passed to register_savevm_live() + * @version_id: the maximum version_id supported + * + * Returns zero to indicate success and negative for error + */ + int (*load_state)(QEMUFile *f, void *opaque, int version_id); + + /** + * @load_setup + * + * Initializes the data structures on the destination. + * + * @f: QEMUFile where to receive the data + * @opaque: data pointer passed to register_savevm_live() + * + * Returns zero to indicate success and negative for error + */ int (*load_setup)(QEMUFile *f, void *opaque); + + /** + * @load_cleanup + * + * Uninitializes the data structures on the destination. + * + * @opaque: data pointer passed to register_savevm_live() + * + * Returns zero to indicate success and negative for error + */ int (*load_cleanup)(void *opaque); - /* Called when postcopy migration wants to resume from failure */ + + /** + * @resume_prepare + * + * Called when postcopy migration wants to resume from failure + * + * @s: Current migration state + * @opaque: data pointer passed to register_savevm_live() + * + * Returns zero to indicate success and negative for error + */ int (*resume_prepare)(MigrationState *s, void *opaque); - /* Checks if switchover ack should be used. Called only in dest */ + + /** + * @switchover_ack_needed + * + * Checks if switchover ack should be used. Called only on + * destination. + * + * @opaque: data pointer passed to register_savevm_live() + * + * Returns true if switchover ack should be used and false + * otherwise + */ bool (*switchover_ack_needed)(void *opaque); } SaveVMHandlers; +/** + * register_savevm_live: Register a set of custom migration handlers + * + * @idstr: state section identifier + * @instance_id: instance id + * @version_id: version id supported + * @ops: SaveVMHandlers structure + * @opaque: data pointer passed to SaveVMHandlers handlers + */ int register_savevm_live(const char *idstr, uint32_t instance_id, int version_id, const SaveVMHandlers *ops, void *opaque); +/** + * unregister_savevm: Unregister custom migration handlers + * + * @obj: object associated with state section + * @idstr: state section identifier + * @opaque: data pointer passed to register_savevm_live() + */ void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque); #endif diff --git a/include/migration/snapshot.h b/include/migration/snapshot.h index e72083b117a..9e4dcaaa751 100644 --- a/include/migration/snapshot.h +++ b/include/migration/snapshot.h @@ -16,6 +16,7 @@ #define QEMU_MIGRATION_SNAPSHOT_H #include "qapi/qapi-builtin-types.h" +#include "qapi/qapi-types-run-state.h" /** * save_snapshot: Save an internal snapshot. @@ -61,4 +62,10 @@ bool delete_snapshot(const char *name, bool has_devices, strList *devices, Error **errp); +/** + * load_snapshot_resume: Restore runstate after loading snapshot. + * @state: state to restore + */ +void load_snapshot_resume(RunState state); + #endif diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index 98219186316..294d2d84862 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -209,7 +209,7 @@ struct VMStateDescription { bool (*dev_unplug_pending)(void *opaque); const VMStateField *fields; - const VMStateDescription **subsections; + const VMStateDescription * const *subsections; }; extern const VMStateInfo vmstate_info_bool; diff --git a/include/net/filter.h b/include/net/filter.h index 27ffc630df4..f15f7932b29 100644 --- a/include/net/filter.h +++ b/include/net/filter.h @@ -9,7 +9,7 @@ #ifndef QEMU_NET_FILTER_H #define QEMU_NET_FILTER_H -#include "qapi/qapi-types-net.h" +#include "qapi/qapi-types-common.h" #include "qemu/queue.h" #include "qom/object.h" #include "net/queue.h" diff --git a/include/net/net.h b/include/net/net.h index ffbd2c8d565..b1f9b35fcca 100644 --- a/include/net/net.h +++ b/include/net/net.h @@ -203,11 +203,69 @@ void qemu_set_vnet_hdr_len(NetClientState *nc, int len); int qemu_set_vnet_le(NetClientState *nc, bool is_le); int qemu_set_vnet_be(NetClientState *nc, bool is_be); void qemu_macaddr_default_if_unset(MACAddr *macaddr); -int qemu_show_nic_models(const char *arg, const char *const *models); -void qemu_check_nic_model(NICInfo *nd, const char *model); -int qemu_find_nic_model(NICInfo *nd, const char * const *models, - const char *default_model); +/** + * qemu_find_nic_info: Obtain NIC configuration information + * @typename: Name of device object type + * @match_default: Match NIC configurations with no model specified + * @alias: Additional model string to match (for user convenience and + * backward compatibility). + * + * Search for a NIC configuration matching the NIC model constraints. + */ +NICInfo *qemu_find_nic_info(const char *typename, bool match_default, + const char *alias); +/** + * qemu_configure_nic_device: Apply NIC configuration to a given device + * @dev: Network device to be configured + * @match_default: Match NIC configurations with no model specified + * @alias: Additional model string to match + * + * Search for a NIC configuration for the provided device, using the + * additionally specified matching constraints. If found, apply the + * configuration using qdev_set_nic_properties() and return %true. + * + * This is used by platform code which creates the device anyway, + * regardless of whether there is a configuration for it. This tends + * to be platforms which ignore `--nodefaults` and create net devices + * anyway, for example because the Ethernet device on that board is + * always physically present. + */ +bool qemu_configure_nic_device(DeviceState *dev, bool match_default, + const char *alias); +/** + * qemu_create_nic_device: Create a NIC device if a configuration exists for it + * @typename: Object typename of network device + * @match_default: Match NIC configurations with no model specified + * @alias: Additional model string to match + * + * Search for a NIC configuration for the provided device type. If found, + * create an object of the corresponding type and return it. + */ +DeviceState *qemu_create_nic_device(const char *typename, bool match_default, + const char *alias); + +/* + * qemu_create_nic_bus_devices: Create configured NIC devices for a given bus + * @bus: Bus on which to create devices + * @parent_type: Object type for devices to be created (e.g. TYPE_PCI_DEVICE) + * @default_model: Object type name for default NIC model (or %NULL) + * @alias: Additional model string to replace, for user convenience + * @alias_target: Actual object type name to be used in place of @alias + * + * Instantiate dynamic NICs on a given bus, typically a PCI bus. This scans + * for available NIC configurations which either specify a model which is + * a child type of @parent_type, or which do not specify a model when + * @default_model is non-NULL. Each device is instantiated on the given @bus. + * + * A single substitution is supported, e.g. "xen" → "xen-net-device" for the + * Xen bus, or "virtio" → "virtio-net-pci" for PCI. This allows the user to + * specify a more understandable "model=" parameter on the command line, not + * only the real object typename. + */ +void qemu_create_nic_bus_devices(BusState *bus, const char *parent_type, + const char *default_model, + const char *alias, const char *alias_target); void print_net_client(Monitor *mon, NetClientState *nc); void net_socket_rs_init(SocketReadState *rs, SocketReadStateFinalize *finalize, @@ -243,10 +301,6 @@ struct NICInfo { int nvectors; }; -extern int nb_nics; -extern NICInfo nd_table[MAX_NICS]; -extern const char *host_net_devices[]; - /* from net.c */ extern NetClientStateList net_clients; bool netdev_is_modern(const char *optstr); diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h index c37aba35e65..c6a5361a2ae 100644 --- a/include/net/vhost_net.h +++ b/include/net/vhost_net.h @@ -4,9 +4,6 @@ #include "net/net.h" #include "hw/virtio/vhost-backend.h" -#define VHOST_NET_INIT_FAILED \ - "vhost-net requested but could not be initialized" - struct vhost_net; typedef struct vhost_net VHostNetState; diff --git a/include/qapi/error.h b/include/qapi/error.h index f21a231bb1a..71f8fb2c50e 100644 --- a/include/qapi/error.h +++ b/include/qapi/error.h @@ -207,7 +207,7 @@ * * Without ERRP_GUARD(), use of the @errp parameter is restricted: * - It must not be dereferenced, because it may be null. - * - It should not be passed to error_prepend() or + * - It should not be passed to error_prepend(), error_vprepend(), or * error_append_hint(), because that doesn't work with &error_fatal. * ERRP_GUARD() lifts these restrictions. * diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h index 8dd9fcb071a..0c2689cf8ad 100644 --- a/include/qapi/qmp/qerror.h +++ b/include/qapi/qmp/qerror.h @@ -23,9 +23,6 @@ #define QERR_DEVICE_HAS_NO_MEDIUM \ "Device '%s' has no medium" -#define QERR_DEVICE_IN_USE \ - "Device '%s' is in use" - #define QERR_DEVICE_NO_HOTPLUG \ "Device '%s' does not support hotplugging" diff --git a/include/qapi/string-output-visitor.h b/include/qapi/string-output-visitor.h index 268dfe9986b..b1ee473b306 100644 --- a/include/qapi/string-output-visitor.h +++ b/include/qapi/string-output-visitor.h @@ -26,9 +26,9 @@ typedef struct StringOutputVisitor StringOutputVisitor; * If everything else succeeds, pass @result to visit_complete() to * collect the result of the visit. * - * The string output visitor does not implement support for visiting - * QAPI structs, alternates, null, or arbitrary QTypes. It also - * requires a non-null list argument to visit_start_list(). + * The string output visitor does not implement support for alternates, null, + * or arbitrary QTypes. Struct fields are not shown. It also requires a + * non-null list argument to visit_start_list(). */ Visitor *string_output_visitor_new(bool human, char **result); diff --git a/include/qapi/type-helpers.h b/include/qapi/type-helpers.h index be1f1815264..fc8352cdec0 100644 --- a/include/qapi/type-helpers.h +++ b/include/qapi/type-helpers.h @@ -12,3 +12,11 @@ #include "qapi/qapi-types-common.h" HumanReadableText *human_readable_text_from_str(GString *str); + +/* + * Produce and return a NULL-terminated array of strings from @list. + * The result is g_malloc()'d and all strings are g_strdup()'d. It + * can be freed with g_strfreev(), or by g_auto(GStrv) automatic + * cleanup. + */ +char **strv_from_str_list(const strList *list); diff --git a/include/qapi/util.h b/include/qapi/util.h index 81a2b13a333..20dfea8a545 100644 --- a/include/qapi/util.h +++ b/include/qapi/util.h @@ -56,4 +56,17 @@ int parse_qapi_name(const char *name, bool complete); (tail) = &(*(tail))->next; \ } while (0) +/* + * For any GenericList @list, return its length. + */ +#define QAPI_LIST_LENGTH(list) \ + ({ \ + size_t _len = 0; \ + typeof(list) _tail; \ + for (_tail = list; _tail != NULL; _tail = _tail->next) { \ + _len++; \ + } \ + _len; \ + }) + #endif diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h index d53a84c9ba4..27b85d4700f 100644 --- a/include/qapi/visitor.h +++ b/include/qapi/visitor.h @@ -39,7 +39,7 @@ * limitations; see the documentation for each visitor for more * details on what it supports. Also, see visitor-impl.h for the * callback contracts implemented by each visitor, and - * docs/devel/qapi-code-gen.txt for more about the QAPI code + * docs/devel/qapi-code-gen.rst for more about the QAPI code * generator. * * All of the visitors are created via: diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index f1d3d1702a9..99110abefb3 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -202,7 +202,7 @@ qatomic_xchg__nocheck(ptr, i); \ }) -/* Returns the eventual value, failed or not */ +/* Returns the old value of '*ptr' (whether the cmpxchg failed or not) */ #define qatomic_cmpxchg__nocheck(ptr, old, new) ({ \ typeof_strip_qual(*ptr) _old = (old); \ (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \ diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h index cb3526d1f44..2c0a2fe7512 100644 --- a/include/qemu/bitops.h +++ b/include/qemu/bitops.h @@ -67,6 +67,19 @@ static inline void clear_bit(long nr, unsigned long *addr) *p &= ~mask; } +/** + * clear_bit_atomic - Clears a bit in memory atomically + * @nr: Bit to clear + * @addr: Address to start counting from + */ +static inline void clear_bit_atomic(long nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + + return qatomic_and(p, ~mask); +} + /** * change_bit - Toggle a bit in memory * @nr: Bit to change diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h index 933a66ee87e..bd67468e5e4 100644 --- a/include/qemu/bswap.h +++ b/include/qemu/bswap.h @@ -145,14 +145,14 @@ CPU_CONVERT(le, 64, uint64_t) */ #if HOST_BIG_ENDIAN # define const_le64(_x) \ - ((((_x) & 0x00000000000000ffU) << 56) | \ - (((_x) & 0x000000000000ff00U) << 40) | \ - (((_x) & 0x0000000000ff0000U) << 24) | \ - (((_x) & 0x00000000ff000000U) << 8) | \ - (((_x) & 0x000000ff00000000U) >> 8) | \ - (((_x) & 0x0000ff0000000000U) >> 24) | \ - (((_x) & 0x00ff000000000000U) >> 40) | \ - (((_x) & 0xff00000000000000U) >> 56)) + ((((_x) & 0x00000000000000ffULL) << 56) | \ + (((_x) & 0x000000000000ff00ULL) << 40) | \ + (((_x) & 0x0000000000ff0000ULL) << 24) | \ + (((_x) & 0x00000000ff000000ULL) << 8) | \ + (((_x) & 0x000000ff00000000ULL) >> 8) | \ + (((_x) & 0x0000ff0000000000ULL) >> 24) | \ + (((_x) & 0x00ff000000000000ULL) >> 40) | \ + (((_x) & 0xff00000000000000ULL) >> 56)) # define const_le32(_x) \ ((((_x) & 0x000000ffU) << 24) | \ (((_x) & 0x0000ff00U) << 8) | \ diff --git a/include/qemu/chardev_open.h b/include/qemu/chardev_open.h new file mode 100644 index 00000000000..64e8fcfdcb2 --- /dev/null +++ b/include/qemu/chardev_open.h @@ -0,0 +1,16 @@ +/* + * QEMU Chardev Helper + * + * Copyright (C) 2023 Intel Corporation. + * + * Authors: Yi Liu + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#ifndef QEMU_CHARDEV_OPEN_H +#define QEMU_CHARDEV_OPEN_H + +int open_cdev(const char *devpath, dev_t cdev); +#endif diff --git a/include/qemu/coroutine-core.h b/include/qemu/coroutine-core.h index 230bb565177..503bad6e0e6 100644 --- a/include/qemu/coroutine-core.h +++ b/include/qemu/coroutine-core.h @@ -22,7 +22,7 @@ * rather than callbacks, for operations that need to give up control while * waiting for events to complete. * - * These functions are re-entrant and may be used outside the global mutex. + * These functions are re-entrant and may be used outside the BQL. * * Functions that execute in coroutine context cannot be called * directly from normal functions. Use @coroutine_fn to mark such diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h index a65be6697f5..e6aff453017 100644 --- a/include/qemu/coroutine.h +++ b/include/qemu/coroutine.h @@ -26,7 +26,7 @@ * rather than callbacks, for operations that need to give up control while * waiting for events to complete. * - * These functions are re-entrant and may be used outside the global mutex. + * These functions are re-entrant and may be used outside the BQL. * * Functions that execute in coroutine context cannot be called * directly from normal functions. Use @coroutine_fn to mark such diff --git a/include/qemu/fifo8.h b/include/qemu/fifo8.h index 16be02f361f..c6295c6ff0c 100644 --- a/include/qemu/fifo8.h +++ b/include/qemu/fifo8.h @@ -71,7 +71,7 @@ uint8_t fifo8_pop(Fifo8 *fifo); * fifo8_pop_buf: * @fifo: FIFO to pop from * @max: maximum number of bytes to pop - * @num: actual number of returned bytes + * @numptr: pointer filled with number of bytes returned (can be NULL) * * Pop a number of elements from the FIFO up to a maximum of max. The buffer * containing the popped data is returned. This buffer points directly into @@ -82,16 +82,43 @@ uint8_t fifo8_pop(Fifo8 *fifo); * around in the ring buffer; in this case only a contiguous part of the data * is returned. * - * The number of valid bytes returned is populated in *num; will always return - * at least 1 byte. max must not be 0 or greater than the number of bytes in - * the FIFO. + * The number of valid bytes returned is populated in *numptr; will always + * return at least 1 byte. max must not be 0 or greater than the number of + * bytes in the FIFO. * * Clients are responsible for checking the availability of requested data * using fifo8_num_used(). * * Returns: A pointer to popped data. */ -const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num); +const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr); + +/** + * fifo8_peek_buf: read upto max bytes from the fifo + * @fifo: FIFO to read from + * @max: maximum number of bytes to peek + * @numptr: pointer filled with number of bytes returned (can be NULL) + * + * Peek into a number of elements from the FIFO up to a maximum of max. + * The buffer containing the data peeked into is returned. This buffer points + * directly into the FIFO backing store. Since data is invalidated once any + * of the fifo8_* APIs are called on the FIFO, it is the caller responsibility + * to access it before doing further API calls. + * + * The function may return fewer bytes than requested when the data wraps + * around in the ring buffer; in this case only a contiguous part of the data + * is returned. + * + * The number of valid bytes returned is populated in *numptr; will always + * return at least 1 byte. max must not be 0 or greater than the number of + * bytes in the FIFO. + * + * Clients are responsible for checking the availability of requested data + * using fifo8_num_used(). + * + * Returns: A pointer to peekable data. + */ +const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr); /** * fifo8_reset: diff --git a/include/qemu/help-texts.h b/include/qemu/help-texts.h index d0359f82e08..353ab2ad8b0 100644 --- a/include/qemu/help-texts.h +++ b/include/qemu/help-texts.h @@ -2,7 +2,7 @@ #define QEMU_HELP_TEXTS_H /* Copyright string for -version arguments, About dialogs, etc */ -#define QEMU_COPYRIGHT "Copyright (c) 2003-2023 " \ +#define QEMU_COPYRIGHT "Copyright (c) 2003-2024 " \ "Fabrice Bellard and the QEMU Project developers" /* Bug reporting information for --help arguments, About dialogs, etc */ diff --git a/include/qemu/job.h b/include/qemu/job.h index e502787dd87..2b873f25768 100644 --- a/include/qemu/job.h +++ b/include/qemu/job.h @@ -67,8 +67,6 @@ typedef struct Job { /** * The completion function that will be called when the job completes. - * Called with AioContext lock held, since many callback implementations - * use bdrv_* functions that require to hold the lock. */ BlockCompletionFunc *cb; @@ -264,9 +262,6 @@ struct JobDriver { * * This callback will not be invoked if the job has already failed. * If it fails, abort and then clean will be called. - * - * Called with AioContext lock held, since many callbacs implementations - * use bdrv_* functions that require to hold the lock. */ int (*prepare)(Job *job); @@ -277,9 +272,6 @@ struct JobDriver { * * All jobs will complete with a call to either .commit() or .abort() but * never both. - * - * Called with AioContext lock held, since many callback implementations - * use bdrv_* functions that require to hold the lock. */ void (*commit)(Job *job); @@ -290,9 +282,6 @@ struct JobDriver { * * All jobs will complete with a call to either .commit() or .abort() but * never both. - * - * Called with AioContext lock held, since many callback implementations - * use bdrv_* functions that require to hold the lock. */ void (*abort)(Job *job); @@ -301,9 +290,6 @@ struct JobDriver { * .commit() or .abort(). Regardless of which callback is invoked after * completion, .clean() will always be called, even if the job does not * belong to a transaction group. - * - * Called with AioContext lock held, since many callbacs implementations - * use bdrv_* functions that require to hold the lock. */ void (*clean)(Job *job); @@ -318,17 +304,12 @@ struct JobDriver { * READY). * (If the callback is NULL, the job is assumed to terminate * without I/O.) - * - * Called with AioContext lock held, since many callback implementations - * use bdrv_* functions that require to hold the lock. */ bool (*cancel)(Job *job, bool force); /** * Called when the job is freed. - * Called with AioContext lock held, since many callback implementations - * use bdrv_* functions that require to hold the lock. */ void (*free)(Job *job); }; @@ -424,7 +405,6 @@ void job_ref_locked(Job *job); * Release a reference that was previously acquired with job_ref_locked() or * job_create(). If it's the last reference to the object, it will be freed. * - * Takes AioContext lock internally to invoke a job->driver callback. * Called with job lock held. */ void job_unref_locked(Job *job); @@ -503,7 +483,7 @@ void job_enter(Job *job); * * Called with job_mutex *not* held. */ -void coroutine_fn job_pause_point(Job *job); +void coroutine_fn GRAPH_UNLOCKED job_pause_point(Job *job); /** * @job: The job that calls the function. diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h index 68e70e61aa5..5764db157c9 100644 --- a/include/qemu/main-loop.h +++ b/include/qemu/main-loop.h @@ -248,19 +248,19 @@ GSource *iohandler_get_g_source(void); AioContext *iohandler_get_aio_context(void); /** - * qemu_mutex_iothread_locked: Return lock status of the main loop mutex. + * bql_locked: Return lock status of the Big QEMU Lock (BQL) * - * The main loop mutex is the coarsest lock in QEMU, and as such it + * The Big QEMU Lock (BQL) is the coarsest lock in QEMU, and as such it * must always be taken outside other locks. This function helps * functions take different paths depending on whether the current - * thread is running within the main loop mutex. + * thread is running within the BQL. * * This function should never be used in the block layer, because * unit tests, block layer tools and qemu-storage-daemon do not * have a BQL. * Please instead refer to qemu_in_main_thread(). */ -bool qemu_mutex_iothread_locked(void); +bool bql_locked(void); /** * qemu_in_main_thread: return whether it's possible to safely access @@ -312,78 +312,76 @@ bool qemu_in_main_thread(void); } while (0) /** - * qemu_mutex_lock_iothread: Lock the main loop mutex. + * bql_lock: Lock the Big QEMU Lock (BQL). * - * This function locks the main loop mutex. The mutex is taken by + * This function locks the Big QEMU Lock (BQL). The lock is taken by * main() in vl.c and always taken except while waiting on - * external events (such as with select). The mutex should be taken + * external events (such as with select). The lock should be taken * by threads other than the main loop thread when calling * qemu_bh_new(), qemu_set_fd_handler() and basically all other * functions documented in this file. * - * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread + * NOTE: tools currently are single-threaded and bql_lock * is a no-op there. */ -#define qemu_mutex_lock_iothread() \ - qemu_mutex_lock_iothread_impl(__FILE__, __LINE__) -void qemu_mutex_lock_iothread_impl(const char *file, int line); +#define bql_lock() bql_lock_impl(__FILE__, __LINE__) +void bql_lock_impl(const char *file, int line); /** - * qemu_mutex_unlock_iothread: Unlock the main loop mutex. + * bql_unlock: Unlock the Big QEMU Lock (BQL). * - * This function unlocks the main loop mutex. The mutex is taken by + * This function unlocks the Big QEMU Lock. The lock is taken by * main() in vl.c and always taken except while waiting on - * external events (such as with select). The mutex should be unlocked + * external events (such as with select). The lock should be unlocked * as soon as possible by threads other than the main loop thread, * because it prevents the main loop from processing callbacks, * including timers and bottom halves. * - * NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread + * NOTE: tools currently are single-threaded and bql_unlock * is a no-op there. */ -void qemu_mutex_unlock_iothread(void); +void bql_unlock(void); /** - * QEMU_IOTHREAD_LOCK_GUARD + * BQL_LOCK_GUARD * - * Wrap a block of code in a conditional qemu_mutex_{lock,unlock}_iothread. + * Wrap a block of code in a conditional bql_{lock,unlock}. */ -typedef struct IOThreadLockAuto IOThreadLockAuto; +typedef struct BQLLockAuto BQLLockAuto; -static inline IOThreadLockAuto *qemu_iothread_auto_lock(const char *file, - int line) +static inline BQLLockAuto *bql_auto_lock(const char *file, int line) { - if (qemu_mutex_iothread_locked()) { + if (bql_locked()) { return NULL; } - qemu_mutex_lock_iothread_impl(file, line); + bql_lock_impl(file, line); /* Anything non-NULL causes the cleanup function to be called */ - return (IOThreadLockAuto *)(uintptr_t)1; + return (BQLLockAuto *)(uintptr_t)1; } -static inline void qemu_iothread_auto_unlock(IOThreadLockAuto *l) +static inline void bql_auto_unlock(BQLLockAuto *l) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } -G_DEFINE_AUTOPTR_CLEANUP_FUNC(IOThreadLockAuto, qemu_iothread_auto_unlock) +G_DEFINE_AUTOPTR_CLEANUP_FUNC(BQLLockAuto, bql_auto_unlock) -#define QEMU_IOTHREAD_LOCK_GUARD() \ - g_autoptr(IOThreadLockAuto) _iothread_lock_auto __attribute__((unused)) \ - = qemu_iothread_auto_lock(__FILE__, __LINE__) +#define BQL_LOCK_GUARD() \ + g_autoptr(BQLLockAuto) _bql_lock_auto __attribute__((unused)) \ + = bql_auto_lock(__FILE__, __LINE__) /* - * qemu_cond_wait_iothread: Wait on condition for the main loop mutex + * qemu_cond_wait_bql: Wait on condition for the Big QEMU Lock (BQL) * - * This function atomically releases the main loop mutex and causes + * This function atomically releases the Big QEMU Lock (BQL) and causes * the calling thread to block on the condition. */ -void qemu_cond_wait_iothread(QemuCond *cond); +void qemu_cond_wait_bql(QemuCond *cond); /* - * qemu_cond_timedwait_iothread: like the previous, but with timeout + * qemu_cond_timedwait_bql: like the previous, but with timeout */ -void qemu_cond_timedwait_iothread(QemuCond *cond, int ms); +void qemu_cond_timedwait_bql(QemuCond *cond, int ms); /* internal interfaces */ diff --git a/include/qemu/notify.h b/include/qemu/notify.h index bcfa70fb2ed..abf18dbf595 100644 --- a/include/qemu/notify.h +++ b/include/qemu/notify.h @@ -45,12 +45,16 @@ bool notifier_list_empty(NotifierList *list); /* Same as Notifier but allows .notify() to return errors */ typedef struct NotifierWithReturn NotifierWithReturn; +/* Return int to allow for different failure modes and recovery actions */ +typedef int (*NotifierWithReturnFunc)(NotifierWithReturn *notifier, void *data, + Error **errp); + struct NotifierWithReturn { /** * Return 0 on success (next notifier will be invoked), otherwise * notifier_with_return_list_notify() will stop and return the value. */ - int (*notify)(NotifierWithReturn *notifier, void *data); + NotifierWithReturnFunc notify; QLIST_ENTRY(NotifierWithReturn) node; }; @@ -69,6 +73,6 @@ void notifier_with_return_list_add(NotifierWithReturnList *list, void notifier_with_return_remove(NotifierWithReturn *notifier); int notifier_with_return_list_notify(NotifierWithReturnList *list, - void *data); + void *data, Error **errp); #endif diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index d30ba73eda2..c7053cdc2bc 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -508,11 +508,18 @@ void qemu_anon_ram_free(void *ptr, size_t size); #ifdef _WIN32 #define HAVE_CHARDEV_SERIAL 1 -#elif defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \ +#define HAVE_CHARDEV_PARALLEL 1 +#else +#if defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \ || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) \ || defined(__GLIBC__) || defined(__APPLE__) #define HAVE_CHARDEV_SERIAL 1 #endif +#if defined(__linux__) || defined(__FreeBSD__) \ + || defined(__FreeBSD_kernel__) || defined(__DragonFly__) +#define HAVE_CHARDEV_PARALLEL 1 +#endif +#endif #if defined(__HAIKU__) #define SIGIO SIGPOLL @@ -547,6 +554,14 @@ int madvise(char *, size_t, int); # define QEMU_VMALLOC_ALIGN (256 * 4096) #elif defined(__linux__) && defined(__sparc__) # define QEMU_VMALLOC_ALIGN MAX(qemu_real_host_page_size(), SHMLBA) +#elif defined(__linux__) && defined(__loongarch__) + /* + * For transparent hugepage optimization, it has better be huge page + * aligned. LoongArch host system supports two kinds of pagesize: 4K + * and 16K, here calculate huge page size from host page size + */ +# define QEMU_VMALLOC_ALIGN (qemu_real_host_page_size() * \ + qemu_real_host_page_size() / sizeof(long)) #else # define QEMU_VMALLOC_ALIGN qemu_real_host_page_size() #endif @@ -672,15 +687,33 @@ typedef struct ThreadContext ThreadContext; * @area: start address of the are to preallocate * @sz: the size of the area to preallocate * @max_threads: maximum number of threads to use + * @tc: prealloc context threads pointer, NULL if not in use + * @async: request asynchronous preallocation, requires @tc * @errp: returns an error if this function fails * * Preallocate memory (populate/prefault page tables writable) for the virtual * memory area starting at @area with the size of @sz. After a successful call, * each page in the area was faulted in writable at least once, for example, * after allocating file blocks for mapped files. + * + * When setting @async, allocation might be performed asynchronously. + * qemu_finish_async_prealloc_mem() must be called to finish any asynchronous + * preallocation. + * + * Return: true on success, else false setting @errp with error. */ -void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, - ThreadContext *tc, Error **errp); +bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, + ThreadContext *tc, bool async, Error **errp); + +/** + * qemu_finish_async_prealloc_mem: + * @errp: returns an error if this function fails + * + * Finish all outstanding asynchronous memory preallocation. + * + * Return: true on success, else false setting @errp with error. + */ +bool qemu_finish_async_prealloc_mem(Error **errp); /** * qemu_get_pid_name: @@ -779,16 +812,6 @@ static inline int platform_does_not_support_system(const char *command) } #endif /* !HAVE_SYSTEM_FUNCTION */ -/** - * If the load average was unobtainable, -1 is returned - */ -#ifndef HAVE_GETLOADAVG_FUNCTION -static inline int getloadavg(double loadavg[], int nelem) -{ - return -1; -} -#endif /* !HAVE_GETLOADAVG_FUNCTION */ - #ifdef __cplusplus } #endif diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h index 7fdc3a4849f..12a96cea2a4 100644 --- a/include/qemu/plugin.h +++ b/include/qemu/plugin.h @@ -73,6 +73,7 @@ enum plugin_dyn_cb_type { enum plugin_dyn_cb_subtype { PLUGIN_CB_REGULAR, + PLUGIN_CB_REGULAR_R, PLUGIN_CB_INLINE, PLUGIN_N_CB_SUBTYPES, }; @@ -91,6 +92,7 @@ struct qemu_plugin_dyn_cb { /* fields specific to each dyn_cb type go here */ union { struct { + qemu_plugin_u64 entry; enum qemu_plugin_op op; uint64_t imm; } inline_insn; @@ -111,6 +113,12 @@ struct qemu_plugin_insn { bool mem_only; }; +/* A scoreboard is an array of values, indexed by vcpu_index */ +struct qemu_plugin_scoreboard { + GArray *data; + QLIST_ENTRY(qemu_plugin_scoreboard) entry; +}; + /* * qemu_plugin_insn allocate and cleanup functions. We don't expect to * cleanup many of these structures. They are reused for each fresh @@ -185,6 +193,19 @@ struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb, return insn; } +/** + * struct CPUPluginState - per-CPU state for plugins + * @event_mask: plugin event bitmap. Modified only via async work. + */ +struct CPUPluginState { + DECLARE_BITMAP(event_mask, QEMU_PLUGIN_EV_MAX); +}; + +/** + * qemu_plugin_create_vcpu_state: allocate plugin state + */ +CPUPluginState *qemu_plugin_create_vcpu_state(void); + void qemu_plugin_vcpu_init_hook(CPUState *cpu); void qemu_plugin_vcpu_exit_hook(CPUState *cpu); void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb); diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h index 4daab6efd29..4fc6c3739b2 100644 --- a/include/qemu/qemu-plugin.h +++ b/include/qemu/qemu-plugin.h @@ -11,6 +11,7 @@ #ifndef QEMU_QEMU_PLUGIN_H #define QEMU_QEMU_PLUGIN_H +#include #include #include #include @@ -50,11 +51,17 @@ typedef uint64_t qemu_plugin_id_t; * * The plugins export the API they were built against by exposing the * symbol qemu_plugin_version which can be checked. + * + * version 2: + * - removed qemu_plugin_n_vcpus and qemu_plugin_n_max_vcpus + * - Remove qemu_plugin_register_vcpu_{tb, insn, mem}_exec_inline. + * Those functions are replaced by *_per_vcpu variants, which guarantee + * thread-safety for operations. */ extern QEMU_PLUGIN_EXPORT int qemu_plugin_version; -#define QEMU_PLUGIN_VERSION 1 +#define QEMU_PLUGIN_VERSION 2 /** * struct qemu_info_t - system information for plugins @@ -219,6 +226,19 @@ void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, struct qemu_plugin_tb; /** struct qemu_plugin_insn - Opaque handle for a translated instruction */ struct qemu_plugin_insn; +/** struct qemu_plugin_scoreboard - Opaque handle for a scoreboard */ +struct qemu_plugin_scoreboard; + +/** + * typedef qemu_plugin_u64 - uint64_t member of an entry in a scoreboard + * + * This field allows to access a specific uint64_t member in one given entry, + * located at a specified offset. Inline operations expect this as entry. + */ +typedef struct { + struct qemu_plugin_scoreboard *score; + size_t offset; +} qemu_plugin_u64; /** * enum qemu_plugin_cb_flags - type of callback @@ -227,8 +247,8 @@ struct qemu_plugin_insn; * @QEMU_PLUGIN_CB_R_REGS: callback reads the CPU's regs * @QEMU_PLUGIN_CB_RW_REGS: callback reads and writes the CPU's regs * - * Note: currently unused, plugins cannot read or change system - * register state. + * Note: currently QEMU_PLUGIN_CB_RW_REGS is unused, plugins cannot change + * system register state. */ enum qemu_plugin_cb_flags { QEMU_PLUGIN_CB_NO_REGS, @@ -294,23 +314,20 @@ enum qemu_plugin_op { }; /** - * qemu_plugin_register_vcpu_tb_exec_inline() - execution inline op + * qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu() - execution inline op * @tb: the opaque qemu_plugin_tb handle for the translation * @op: the type of qemu_plugin_op (e.g. ADD_U64) - * @ptr: the target memory location for the op + * @entry: entry to run op * @imm: the op data (e.g. 1) * - * Insert an inline op to every time a translated unit executes. - * Useful if you just want to increment a single counter somewhere in - * memory. - * - * Note: ops are not atomic so in multi-threaded/multi-smp situations - * you will get inexact results. + * Insert an inline op on a given scoreboard entry. */ QEMU_PLUGIN_API -void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb, - enum qemu_plugin_op op, - void *ptr, uint64_t imm); +void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + struct qemu_plugin_tb *tb, + enum qemu_plugin_op op, + qemu_plugin_u64 entry, + uint64_t imm); /** * qemu_plugin_register_vcpu_insn_exec_cb() - register insn execution cb @@ -328,19 +345,20 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn, void *userdata); /** - * qemu_plugin_register_vcpu_insn_exec_inline() - insn execution inline op + * qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu() - insn exec inline op * @insn: the opaque qemu_plugin_insn handle for an instruction * @op: the type of qemu_plugin_op (e.g. ADD_U64) - * @ptr: the target memory location for the op + * @entry: entry to run op * @imm: the op data (e.g. 1) * - * Insert an inline op to every time an instruction executes. Useful - * if you just want to increment a single counter somewhere in memory. + * Insert an inline op to every time an instruction executes. */ QEMU_PLUGIN_API -void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn, - enum qemu_plugin_op op, - void *ptr, uint64_t imm); +void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + struct qemu_plugin_insn *insn, + enum qemu_plugin_op op, + qemu_plugin_u64 entry, + uint64_t imm); /** * qemu_plugin_tb_n_insns() - query helper for number of insns in TB @@ -550,24 +568,23 @@ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn, void *userdata); /** - * qemu_plugin_register_vcpu_mem_inline() - register an inline op to any memory access + * qemu_plugin_register_vcpu_mem_inline_per_vcpu() - inline op for mem access * @insn: handle for instruction to instrument * @rw: apply to reads, writes or both * @op: the op, of type qemu_plugin_op - * @ptr: pointer memory for the op + * @entry: entry to run op * @imm: immediate data for @op * * This registers a inline op every memory access generated by the - * instruction. This provides for a lightweight but not thread-safe - * way of counting the number of operations done. + * instruction. */ QEMU_PLUGIN_API -void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, - enum qemu_plugin_mem_rw rw, - enum qemu_plugin_op op, void *ptr, - uint64_t imm); - - +void qemu_plugin_register_vcpu_mem_inline_per_vcpu( + struct qemu_plugin_insn *insn, + enum qemu_plugin_mem_rw rw, + enum qemu_plugin_op op, + qemu_plugin_u64 entry, + uint64_t imm); typedef void (*qemu_plugin_vcpu_syscall_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_index, @@ -643,11 +660,8 @@ QEMU_PLUGIN_API void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, qemu_plugin_udata_cb_t cb, void *userdata); -/* returns -1 in user-mode */ -int qemu_plugin_n_vcpus(void); - -/* returns -1 in user-mode */ -int qemu_plugin_n_max_vcpus(void); +/* returns how many vcpus were started at this point */ +int qemu_plugin_num_vcpus(void); /** * qemu_plugin_outs() - output string via QEMU's logging system @@ -708,4 +722,119 @@ uint64_t qemu_plugin_end_code(void); QEMU_PLUGIN_API uint64_t qemu_plugin_entry_code(void); +/** struct qemu_plugin_register - Opaque handle for register access */ +struct qemu_plugin_register; + +/** + * typedef qemu_plugin_reg_descriptor - register descriptions + * + * @handle: opaque handle for retrieving value with qemu_plugin_read_register + * @name: register name + * @feature: optional feature descriptor, can be NULL + */ +typedef struct { + struct qemu_plugin_register *handle; + const char *name; + const char *feature; +} qemu_plugin_reg_descriptor; + +/** + * qemu_plugin_get_registers() - return register list for current vCPU + * + * Returns a potentially empty GArray of qemu_plugin_reg_descriptor. + * Caller frees the array (but not the const strings). + * + * Should be used from a qemu_plugin_register_vcpu_init_cb() callback + * after the vCPU is initialised, i.e. in the vCPU context. + */ +QEMU_PLUGIN_API +GArray *qemu_plugin_get_registers(void); + +/** + * qemu_plugin_read_register() - read register for current vCPU + * + * @handle: a @qemu_plugin_reg_handle handle + * @buf: A GByteArray for the data owned by the plugin + * + * This function is only available in a context that register read access is + * explicitly requested via the QEMU_PLUGIN_CB_R_REGS flag. + * + * Returns the size of the read register. The content of @buf is in target byte + * order. On failure returns -1. + */ +QEMU_PLUGIN_API +int qemu_plugin_read_register(struct qemu_plugin_register *handle, + GByteArray *buf); + +/** + * qemu_plugin_scoreboard_new() - alloc a new scoreboard + * + * @element_size: size (in bytes) for one entry + * + * Returns a pointer to a new scoreboard. It must be freed using + * qemu_plugin_scoreboard_free. + */ +QEMU_PLUGIN_API +struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size); + +/** + * qemu_plugin_scoreboard_free() - free a scoreboard + * @score: scoreboard to free + */ +QEMU_PLUGIN_API +void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score); + +/** + * qemu_plugin_scoreboard_find() - get pointer to an entry of a scoreboard + * @score: scoreboard to query + * @vcpu_index: entry index + * + * Returns address of entry of a scoreboard matching a given vcpu_index. This + * address can be modified later if scoreboard is resized. + */ +QEMU_PLUGIN_API +void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score, + unsigned int vcpu_index); + +/* Macros to define a qemu_plugin_u64 */ +#define qemu_plugin_scoreboard_u64(score) \ + (qemu_plugin_u64) {score, 0} +#define qemu_plugin_scoreboard_u64_in_struct(score, type, member) \ + (qemu_plugin_u64) {score, offsetof(type, member)} + +/** + * qemu_plugin_u64_add() - add a value to a qemu_plugin_u64 for a given vcpu + * @entry: entry to query + * @vcpu_index: entry index + * @added: value to add + */ +QEMU_PLUGIN_API +void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index, + uint64_t added); + +/** + * qemu_plugin_u64_get() - get value of a qemu_plugin_u64 for a given vcpu + * @entry: entry to query + * @vcpu_index: entry index + */ +QEMU_PLUGIN_API +uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry, unsigned int vcpu_index); + +/** + * qemu_plugin_u64_set() - set value of a qemu_plugin_u64 for a given vcpu + * @entry: entry to query + * @vcpu_index: entry index + * @val: new value + */ +QEMU_PLUGIN_API +void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index, + uint64_t val); + +/** + * qemu_plugin_u64_sum() - return sum of all vcpu entries in a scoreboard + * @entry: entry to sum + */ +QEMU_PLUGIN_API +uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry); + #endif /* QEMU_QEMU_PLUGIN_H */ diff --git a/include/qemu/qtree.h b/include/qemu/qtree.h index 69fe74b50d0..dc2b14d2582 100644 --- a/include/qemu/qtree.h +++ b/include/qemu/qtree.h @@ -42,7 +42,6 @@ #ifndef QEMU_QTREE_H #define QEMU_QTREE_H -#include "qemu/osdep.h" #ifdef HAVE_GLIB_WITH_SLICE_ALLOCATOR diff --git a/include/qemu/thread.h b/include/qemu/thread.h index dd3822d7cee..fb74e21c08a 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -47,7 +47,7 @@ typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f, typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms, const char *f, int l); -extern QemuMutexLockFunc qemu_bql_mutex_lock_func; +extern QemuMutexLockFunc bql_mutex_lock_func; extern QemuMutexLockFunc qemu_mutex_lock_func; extern QemuMutexTrylockFunc qemu_mutex_trylock_func; extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func; diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index 5abdbc38747..50c277cf0b4 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -42,6 +42,7 @@ typedef struct CompatProperty CompatProperty; typedef struct ConfidentialGuestSupport ConfidentialGuestSupport; typedef struct CPUAddressSpace CPUAddressSpace; typedef struct CPUArchState CPUArchState; +typedef struct CPUPluginState CPUPluginState; typedef struct CpuInfoFast CpuInfoFast; typedef struct CPUJumpCache CPUJumpCache; typedef struct CPUState CPUState; @@ -131,6 +132,7 @@ typedef struct Range Range; typedef struct ReservedRegion ReservedRegion; typedef struct SHPCDevice SHPCDevice; typedef struct SSIBus SSIBus; +typedef struct TCGCPUOps TCGCPUOps; typedef struct TCGHelperInfo TCGHelperInfo; typedef struct TranslationBlock TranslationBlock; typedef struct VirtIODevice VirtIODevice; @@ -149,8 +151,6 @@ typedef struct IRQState *qemu_irq; /* * Function types */ -typedef void SaveStateHandler(QEMUFile *f, void *opaque); -typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id); typedef void (*qemu_irq_handler)(void *opaque, int n, int level); #endif /* QEMU_TYPEDEFS_H */ diff --git a/include/qemu/uri.h b/include/qemu/uri.h index 1855b764f2a..255e61f452d 100644 --- a/include/qemu/uri.h +++ b/include/qemu/uri.h @@ -72,14 +72,10 @@ typedef struct URI { } URI; URI *uri_new(void); -char *uri_resolve(const char *URI, const char *base); -char *uri_resolve_relative(const char *URI, const char *base); URI *uri_parse(const char *str); URI *uri_parse_raw(const char *str, int raw); int uri_parse_into(URI *uri, const char *str); char *uri_to_string(URI *uri); -char *uri_string_escape(const char *str, const char *list); -char *uri_string_unescape(const char *str, int len, char *target); void uri_free(URI *uri); /* Single web service query parameter 'name=value'. */ diff --git a/include/qemu/yank.h b/include/qemu/yank.h index 19071509336..3d88af6996f 100644 --- a/include/qemu/yank.h +++ b/include/qemu/yank.h @@ -45,7 +45,7 @@ void yank_unregister_instance(const YankInstance *instance); * yank_register_function: Register a yank function * * This registers a yank function. All limitations of qmp oob commands apply - * to the yank function as well. See docs/devel/qapi-code-gen.txt under + * to the yank function as well. See docs/devel/qapi-code-gen.rst under * "An OOB-capable command handler must satisfy the following conditions". * * This function is thread-safe. diff --git a/include/qom/object.h b/include/qom/object.h index afccd24ca7a..13d3a655ddf 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -259,31 +259,23 @@ struct Object /** - * OBJECT_DEFINE_TYPE_EXTENDED: + * DO_OBJECT_DEFINE_TYPE_EXTENDED: * @ModuleObjName: the object name with initial caps * @module_obj_name: the object name in lowercase with underscore separators * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore * separators * @ABSTRACT: boolean flag to indicate whether the object can be instantiated + * @CLASS_SIZE: size of the type's class * @...: list of initializers for "InterfaceInfo" to declare implemented interfaces * - * This macro is typically used in a source file, and will: - * - * - declare prototypes for _finalize, _class_init and _init methods - * - declare the TypeInfo struct instance - * - provide the constructor to register the type - * - * After using this macro, implementations of the _finalize, _class_init, - * and _init methods need to be written. Any of these can be zero-line - * no-op impls if no special logic is required for a given type. - * - * This macro should rarely be used, instead one of the more specialized - * macros is usually a better choice. + * This is the base macro used to implement all the OBJECT_DEFINE_* + * macros. It should never be used directly in a source file. */ -#define OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \ - MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \ - ABSTRACT, ...) \ +#define DO_OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \ + MODULE_OBJ_NAME, \ + PARENT_MODULE_OBJ_NAME, \ + ABSTRACT, CLASS_SIZE, ...) \ static void \ module_obj_name##_finalize(Object *obj); \ static void \ @@ -298,7 +290,7 @@ struct Object .instance_align = __alignof__(ModuleObjName), \ .instance_init = module_obj_name##_init, \ .instance_finalize = module_obj_name##_finalize, \ - .class_size = sizeof(ModuleObjName##Class), \ + .class_size = CLASS_SIZE, \ .class_init = module_obj_name##_class_init, \ .abstract = ABSTRACT, \ .interfaces = (InterfaceInfo[]) { __VA_ARGS__ } , \ @@ -311,6 +303,37 @@ struct Object } \ type_init(module_obj_name##_register_types); +/** + * OBJECT_DEFINE_TYPE_EXTENDED: + * @ModuleObjName: the object name with initial caps + * @module_obj_name: the object name in lowercase with underscore separators + * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators + * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore + * separators + * @ABSTRACT: boolean flag to indicate whether the object can be instantiated + * @...: list of initializers for "InterfaceInfo" to declare implemented interfaces + * + * This macro is typically used in a source file, and will: + * + * - declare prototypes for _finalize, _class_init and _init methods + * - declare the TypeInfo struct instance + * - provide the constructor to register the type + * + * After using this macro, implementations of the _finalize, _class_init, + * and _init methods need to be written. Any of these can be zero-line + * no-op impls if no special logic is required for a given type. + * + * This macro should rarely be used, instead one of the more specialized + * macros is usually a better choice. + */ +#define OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \ + MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \ + ABSTRACT, ...) \ + DO_OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \ + MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \ + ABSTRACT, sizeof(ModuleObjName##Class), \ + __VA_ARGS__) + /** * OBJECT_DEFINE_TYPE: * @ModuleObjName: the object name with initial caps @@ -368,6 +391,45 @@ struct Object MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \ true, { NULL }) +/** + * OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES: + * @ModuleObjName: the object name with initial caps + * @module_obj_name: the object name in lowercase with underscore separators + * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators + * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore + * separators + * + * This is a variant of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable for + * the case of a non-abstract type, with interfaces, and with no requirement + * for a class struct. + */ +#define OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(ModuleObjName, \ + module_obj_name, \ + MODULE_OBJ_NAME, \ + PARENT_MODULE_OBJ_NAME, ...) \ + DO_OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \ + MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \ + false, 0, __VA_ARGS__) + +/** + * OBJECT_DEFINE_SIMPLE_TYPE: + * @ModuleObjName: the object name with initial caps + * @module_obj_name: the object name in lowercase with underscore separators + * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators + * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore + * separators + * + * This is a variant of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable for + * the common case of a non-abstract type, without any interfaces, and with + * no requirement for a class struct. If you declared your type with + * OBJECT_DECLARE_SIMPLE_TYPE then this is probably the right choice for + * defining it. + */ +#define OBJECT_DEFINE_SIMPLE_TYPE(ModuleObjName, module_obj_name, \ + MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME) \ + OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(ModuleObjName, module_obj_name, \ + MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, { NULL }) + /** * struct TypeInfo: * @name: The name of the type. @@ -1550,6 +1612,19 @@ Object *object_resolve_path(const char *path, bool *ambiguous); Object *object_resolve_path_type(const char *path, const char *typename, bool *ambiguous); +/** + * object_resolve_type_unambiguous: + * @typename: the type to look for + * @errp: pointer to error object + * + * Return the only object in the QOM tree of type @typename. + * If no match or more than one match is found, an error is + * returned. + * + * Returns: The matched object or NULL on path lookup failure. + */ +Object *object_resolve_type_unambiguous(const char *typename, Error **errp); + /** * object_resolve_path_at: * @parent: the object in which to resolve the path diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h index 72279f4d25d..b72917073d8 100644 --- a/include/standard-headers/drm/drm_fourcc.h +++ b/include/standard-headers/drm/drm_fourcc.h @@ -53,7 +53,7 @@ extern "C" { * Format modifiers may change any property of the buffer, including the number * of planes and/or the required allocation size. Format modifiers are * vendor-namespaced, and as such the relationship between a fourcc code and a - * modifier is specific to the modifer being used. For example, some modifiers + * modifier is specific to the modifier being used. For example, some modifiers * may preserve meaning - such as number of planes - from the fourcc code, * whereas others may not. * @@ -78,7 +78,7 @@ extern "C" { * format. * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users * see modifiers as opaque tokens they can check for equality and intersect. - * These users musn't need to know to reason about the modifier value + * These users mustn't need to know to reason about the modifier value * (i.e. they are not expected to extract information out of the modifier). * * Vendors should document their modifier usage in as much detail as @@ -322,6 +322,8 @@ extern "C" { * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian */ #define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */ /* * 2 plane YCbCr MSB aligned @@ -537,7 +539,7 @@ extern "C" { * This is a tiled layout using 4Kb tiles in row-major layout. * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which * are arranged in four groups (two wide, two high) with column-major layout. - * Each group therefore consits out of four 256 byte units, which are also laid + * Each group therefore consists out of four 256 byte units, which are also laid * out as 2x2 column-major. * 256 byte units are made out of four 64 byte blocks of pixels, producing * either a square block or a 2:1 unit. @@ -1100,7 +1102,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) */ /* - * The top 4 bits (out of the 56 bits alloted for specifying vendor specific + * The top 4 bits (out of the 56 bits allotted for specifying vendor specific * modifiers) denote the category for modifiers. Currently we have three * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of * sixteen different categories. @@ -1416,7 +1418,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) * Amlogic FBC Memory Saving mode * * Indicates the storage is packed when pixel size is multiple of word - * boudaries, i.e. 8bit should be stored in this mode to save allocation + * boundaries, i.e. 8bit should be stored in this mode to save allocation * memory. * * This mode reduces body layout to 3072 bytes per 64x32 superblock with diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h index 99fcddf04f8..dfb54eff6f7 100644 --- a/include/standard-headers/linux/ethtool.h +++ b/include/standard-headers/linux/ethtool.h @@ -1266,6 +1266,8 @@ struct ethtool_rxfh_indir { * hardware hash key. * @hfunc: Defines the current RSS hash function used by HW (or to be set to). * Valid values are one of the %ETH_RSS_HASH_*. + * @input_xfrm: Defines how the input data is transformed. Valid values are one + * of %RXH_XFRM_*. * @rsvd8: Reserved for future use; see the note on reserved space. * @rsvd32: Reserved for future use; see the note on reserved space. * @rss_config: RX ring/queue index for each hash value i.e., indirection table @@ -1285,7 +1287,8 @@ struct ethtool_rxfh { uint32_t indir_size; uint32_t key_size; uint8_t hfunc; - uint8_t rsvd8[3]; + uint8_t input_xfrm; + uint8_t rsvd8[2]; uint32_t rsvd32; uint32_t rss_config[]; }; @@ -1992,6 +1995,15 @@ static inline int ethtool_validate_duplex(uint8_t duplex) #define WOL_MODE_COUNT 8 +/* RSS hash function data + * XOR the corresponding source and destination fields of each specified + * protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH + * calculation. Note that this XORing reduces the input set entropy and could + * be exploited to reduce the RSS queue spread. + */ +#define RXH_XFRM_SYM_XOR (1 << 0) +#define RXH_XFRM_NO_CHANGE 0xff + /* L2-L4 network traffic flow types */ #define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ #define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ @@ -2128,18 +2140,6 @@ enum ethtool_reset_flags { * refused. For drivers: ignore this field (use kernel's * __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will * be overwritten by kernel. - * @supported: Bitmap with each bit meaning given by - * %ethtool_link_mode_bit_indices for the link modes, physical - * connectors and other link features for which the interface - * supports autonegotiation or auto-detection. Read-only. - * @advertising: Bitmap with each bit meaning given by - * %ethtool_link_mode_bit_indices for the link modes, physical - * connectors and other link features that are advertised through - * autonegotiation or enabled for auto-detection. - * @lp_advertising: Bitmap with each bit meaning given by - * %ethtool_link_mode_bit_indices for the link modes, and other - * link features that the link partner advertised through - * autonegotiation; 0 if unknown or not applicable. Read-only. * @transceiver: Used to distinguish different possible PHY types, * reported consistently by PHYLIB. Read-only. * @master_slave_cfg: Master/slave port mode. @@ -2181,6 +2181,21 @@ enum ethtool_reset_flags { * %set_link_ksettings() should validate all fields other than @cmd * and @link_mode_masks_nwords that are not described as read-only or * deprecated, and must ignore all fields described as read-only. + * + * @link_mode_masks is divided into three bitfields, each of length + * @link_mode_masks_nwords: + * - supported: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, physical + * connectors and other link features for which the interface + * supports autonegotiation or auto-detection. Read-only. + * - advertising: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, physical + * connectors and other link features that are advertised through + * autonegotiation or enabled for auto-detection. + * - lp_advertising: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, and other + * link features that the link partner advertised through + * autonegotiation; 0 if unknown or not applicable. Read-only. */ struct ethtool_link_settings { uint32_t cmd; diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h index 6b9793842c9..fc0dcd10aed 100644 --- a/include/standard-headers/linux/fuse.h +++ b/include/standard-headers/linux/fuse.h @@ -209,7 +209,7 @@ * - add FUSE_HAS_EXPIRE_ONLY * * 7.39 - * - add FUSE_DIRECT_IO_RELAX + * - add FUSE_DIRECT_IO_ALLOW_MMAP * - add FUSE_STATX and related structures */ @@ -405,8 +405,7 @@ struct fuse_file_lock { * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir, * symlink and mknod (single group that matches parent) * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation - * FUSE_DIRECT_IO_RELAX: relax restrictions in FOPEN_DIRECT_IO mode, for now - * allow shared mmap + * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -445,7 +444,10 @@ struct fuse_file_lock { #define FUSE_HAS_INODE_DAX (1ULL << 33) #define FUSE_CREATE_SUPP_GROUP (1ULL << 34) #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) -#define FUSE_DIRECT_IO_RELAX (1ULL << 36) +#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) + +/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ +#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP /** * CUSE INIT request/reply flags diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h index e5f558d9649..a39193213ff 100644 --- a/include/standard-headers/linux/pci_regs.h +++ b/include/standard-headers/linux/pci_regs.h @@ -80,6 +80,7 @@ #define PCI_HEADER_TYPE_NORMAL 0 #define PCI_HEADER_TYPE_BRIDGE 1 #define PCI_HEADER_TYPE_CARDBUS 2 +#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */ #define PCI_BIST 0x0f /* 8 bits */ #define PCI_BIST_CODE_MASK 0x0f /* Return result */ @@ -637,6 +638,7 @@ #define PCI_EXP_RTCAP 0x1e /* Root Capabilities */ #define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ #define PCI_EXP_RTSTA 0x20 /* Root Status */ +#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */ #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ #define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ /* @@ -930,12 +932,13 @@ /* Process Address Space ID */ #define PCI_PASID_CAP 0x04 /* PASID feature register */ -#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */ -#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */ +#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */ +#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */ +#define PCI_PASID_CAP_WIDTH 0x1f00 #define PCI_PASID_CTRL 0x06 /* PASID control register */ -#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */ -#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */ -#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */ +#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */ +#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */ +#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */ #define PCI_EXT_CAP_PASID_SIZEOF 8 /* Single Root I/O Virtualization */ @@ -975,6 +978,8 @@ #define PCI_LTR_VALUE_MASK 0x000003ff #define PCI_LTR_SCALE_MASK 0x00001c00 #define PCI_LTR_SCALE_SHIFT 10 +#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */ +#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */ #define PCI_EXT_CAP_LTR_SIZEOF 8 /* Access Control Service */ @@ -1042,9 +1047,16 @@ #define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */ #define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */ #define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */ #define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */ +#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */ #define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */ @@ -1088,6 +1100,8 @@ #define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */ #define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ #define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ +#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */ +#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */ /* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */ #define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */ diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h index 5ad07e134ae..fd54044936f 100644 --- a/include/standard-headers/linux/vhost_types.h +++ b/include/standard-headers/linux/vhost_types.h @@ -185,5 +185,12 @@ struct vhost_vdpa_iova_range { * DRIVER_OK */ #define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6 +/* Device may expose the virtqueue's descriptor area, driver area and + * device area to a different group for ASID binding than where its + * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID. + */ +#define VHOST_BACKEND_F_DESC_ASID 0x7 +/* IOTLB don't flush memory mapping across device reset */ +#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8 #endif diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h index 8a7d0dc8b00..45be0fa1bcd 100644 --- a/include/standard-headers/linux/virtio_config.h +++ b/include/standard-headers/linux/virtio_config.h @@ -52,7 +52,7 @@ * rest are per-device feature bits. */ #define VIRTIO_TRANSPORT_F_START 28 -#define VIRTIO_TRANSPORT_F_END 41 +#define VIRTIO_TRANSPORT_F_END 42 #ifndef VIRTIO_CONFIG_NO_LEGACY /* Do we get callbacks when the ring is completely used, even if we've @@ -103,8 +103,19 @@ */ #define VIRTIO_F_NOTIFICATION_DATA 38 +/* This feature indicates that the driver uses the data provided by the device + * as a virtqueue identifier in available buffer notifications. + */ +#define VIRTIO_F_NOTIF_CONFIG_DATA 39 + /* * This feature indicates that the driver can reset a queue individually. */ #define VIRTIO_F_RING_RESET 40 + +/* + * This feature indicates that the device support administration virtqueues. + */ +#define VIRTIO_F_ADMIN_VQ 41 + #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h index be912cfc957..3e2bc2c97e6 100644 --- a/include/standard-headers/linux/virtio_pci.h +++ b/include/standard-headers/linux/virtio_pci.h @@ -166,6 +166,20 @@ struct virtio_pci_common_cfg { uint32_t queue_used_hi; /* read-write */ }; +/* + * Warning: do not use sizeof on this: use offsetofend for + * specific fields you need. + */ +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + + uint16_t queue_notify_data; /* read-write */ + uint16_t queue_reset; /* read-write */ + + uint16_t admin_queue_index; /* read-only */ + uint16_t admin_queue_num; /* read-only */ +}; + /* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ struct virtio_pci_cfg_cap { struct virtio_pci_cap cap; @@ -204,7 +218,72 @@ struct virtio_pci_cfg_cap { #define VIRTIO_PCI_COMMON_Q_USEDHI 52 #define VIRTIO_PCI_COMMON_Q_NDATA 56 #define VIRTIO_PCI_COMMON_Q_RESET 58 +#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60 +#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62 #endif /* VIRTIO_PCI_NO_MODERN */ +/* Admin command status. */ +#define VIRTIO_ADMIN_STATUS_OK 0 + +/* Admin command opcode. */ +#define VIRTIO_ADMIN_CMD_LIST_QUERY 0x0 +#define VIRTIO_ADMIN_CMD_LIST_USE 0x1 + +/* Admin command group type. */ +#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1 + +/* Transitional device admin command. */ +#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE 0x2 +#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ 0x3 +#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE 0x4 +#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5 +#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6 + +struct QEMU_PACKED virtio_admin_cmd_hdr { + uint16_t opcode; + /* + * 1 - SR-IOV + * 2-65535 - reserved + */ + uint16_t group_type; + /* Unused, reserved for future extensions. */ + uint8_t reserved1[12]; + uint64_t group_member_id; +}; + +struct QEMU_PACKED virtio_admin_cmd_status { + uint16_t status; + uint16_t status_qualifier; + /* Unused, reserved for future extensions. */ + uint8_t reserved2[4]; +}; + +struct QEMU_PACKED virtio_admin_cmd_legacy_wr_data { + uint8_t offset; /* Starting offset of the register(s) to write. */ + uint8_t reserved[7]; + uint8_t registers[]; +}; + +struct QEMU_PACKED virtio_admin_cmd_legacy_rd_data { + uint8_t offset; /* Starting offset of the register(s) to read. */ +}; + +#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END 0 +#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_DEV 0x1 +#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM 0x2 + +#define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4 + +struct QEMU_PACKED virtio_admin_cmd_notify_info_data { + uint8_t flags; /* 0 = end of list, 1 = owner device, 2 = member device */ + uint8_t bar; /* BAR of the member or the owner device */ + uint8_t padding[6]; + uint64_t offset; /* Offset within bar. */ +}; + +struct virtio_admin_cmd_notify_info_result { + struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO]; +}; + #endif diff --git a/include/standard-headers/linux/virtio_pmem.h b/include/standard-headers/linux/virtio_pmem.h index fc029de7988..1a2576d0178 100644 --- a/include/standard-headers/linux/virtio_pmem.h +++ b/include/standard-headers/linux/virtio_pmem.h @@ -14,6 +14,13 @@ #include "standard-headers/linux/virtio_ids.h" #include "standard-headers/linux/virtio_config.h" +/* Feature bits */ +/* guest physical address range will be indicated as shared memory region 0 */ +#define VIRTIO_PMEM_F_SHMEM_REGION 0 + +/* shmid of the shared memory region corresponding to the pmem */ +#define VIRTIO_PMEM_SHMEM_REGION_ID 0 + struct virtio_pmem_config { uint64_t start; uint64_t size; diff --git a/include/sysemu/cpu-timers.h b/include/sysemu/cpu-timers.h index 2e786fe7fb1..d86738a378d 100644 --- a/include/sysemu/cpu-timers.h +++ b/include/sysemu/cpu-timers.h @@ -17,18 +17,24 @@ void cpu_timers_init(void); /* icount - Instruction Counter API */ -/* - * icount enablement state: +/** + * ICountMode: icount enablement state: * - * 0 = Disabled - Do not count executed instructions. - * 1 = Enabled - Fixed conversion of insn to ns via "shift" option - * 2 = Enabled - Runtime adaptive algorithm to compute shift + * @ICOUNT_DISABLED: Disabled - Do not count executed instructions. + * @ICOUNT_PRECISE: Enabled - Fixed conversion of insn to ns via "shift" option + * @ICOUNT_ADAPTATIVE: Enabled - Runtime adaptive algorithm to compute shift */ -#ifdef CONFIG_TCG -extern int use_icount; +typedef enum { + ICOUNT_DISABLED = 0, + ICOUNT_PRECISE, + ICOUNT_ADAPTATIVE, +} ICountMode; + +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) +extern ICountMode use_icount; #define icount_enabled() (use_icount) #else -#define icount_enabled() 0 +#define icount_enabled() ICOUNT_DISABLED #endif /* @@ -50,8 +56,14 @@ int64_t icount_get(void); */ int64_t icount_to_ns(int64_t icount); -/* configure the icount options, including "shift" */ -void icount_configure(QemuOpts *opts, Error **errp); +/** + * icount_configure: configure the icount options, including "shift" + * @opts: Options to parse + * @errp: pointer to a NULL-initialized error object + * + * Return: true on success, else false setting @errp with error + */ +bool icount_configure(QemuOpts *opts, Error **errp); /* used by tcg vcpu thread to calc icount budget */ int64_t icount_round(int64_t count); diff --git a/include/sysemu/hostmem.h b/include/sysemu/hostmem.h index 39326f1d4f9..0e411aaa29e 100644 --- a/include/sysemu/hostmem.h +++ b/include/sysemu/hostmem.h @@ -47,7 +47,15 @@ OBJECT_DECLARE_TYPE(HostMemoryBackend, HostMemoryBackendClass, struct HostMemoryBackendClass { ObjectClass parent_class; - void (*alloc)(HostMemoryBackend *backend, Error **errp); + /** + * alloc: Allocate memory from backend. + * + * @backend: the #HostMemoryBackend. + * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. + */ + bool (*alloc)(HostMemoryBackend *backend, Error **errp); }; /** diff --git a/include/sysemu/iommufd.h b/include/sysemu/iommufd.h new file mode 100644 index 00000000000..9af27ebd6cc --- /dev/null +++ b/include/sysemu/iommufd.h @@ -0,0 +1,36 @@ +#ifndef SYSEMU_IOMMUFD_H +#define SYSEMU_IOMMUFD_H + +#include "qom/object.h" +#include "exec/hwaddr.h" +#include "exec/cpu-common.h" + +#define TYPE_IOMMUFD_BACKEND "iommufd" +OBJECT_DECLARE_TYPE(IOMMUFDBackend, IOMMUFDBackendClass, IOMMUFD_BACKEND) + +struct IOMMUFDBackendClass { + ObjectClass parent_class; +}; + +struct IOMMUFDBackend { + Object parent; + + /*< protected >*/ + int fd; /* /dev/iommu file descriptor */ + bool owned; /* is the /dev/iommu opened internally */ + uint32_t users; + + /*< public >*/ +}; + +int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp); +void iommufd_backend_disconnect(IOMMUFDBackend *be); + +int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id, + Error **errp); +void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id); +int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly); +int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, + hwaddr iova, ram_addr_t size); +#endif diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index d6148781642..fad9a7e8ff3 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -538,4 +538,10 @@ bool kvm_arch_cpu_check_are_resettable(void); bool kvm_dirty_ring_enabled(void); uint32_t kvm_dirty_ring_size(void); + +/** + * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page + * reported for the VM. + */ +bool kvm_hwpoisoned_mem(void); #endif diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h index fd846394be1..882e37e12c5 100644 --- a/include/sysemu/kvm_int.h +++ b/include/sysemu/kvm_int.h @@ -120,6 +120,7 @@ struct KVMState uint32_t xen_caps; uint16_t xen_gnttab_max_frames; uint16_t xen_evtchn_max_pirq; + char *device; }; void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h index 4173ef2afa7..825cfe86bc1 100644 --- a/include/sysemu/numa.h +++ b/include/sysemu/numa.h @@ -41,6 +41,7 @@ struct NodeInfo { struct HostMemoryBackend *node_memdev; bool present; bool has_cpu; + bool has_gi; uint8_t lb_info_provided; uint16_t initiator; uint8_t distance[MAX_NODES]; diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h index dff32ae185a..b881ac6c6f7 100644 --- a/include/sysemu/os-posix.h +++ b/include/sysemu/os-posix.h @@ -51,6 +51,7 @@ bool is_daemonized(void); void os_daemonize(void); bool os_set_runas(const char *user_id); void os_set_chroot(const char *path); +void os_setup_limits(void); void os_setup_post(void); int os_mlock(void); diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h index 1047d260cbd..b82a5d3ad93 100644 --- a/include/sysemu/os-win32.h +++ b/include/sysemu/os-win32.h @@ -128,6 +128,11 @@ static inline int os_mlock(void) return -ENOSYS; } +static inline void os_setup_limits(void) +{ + return; +} + #define fsync _commit #if !defined(lseek) diff --git a/include/sysemu/qtest.h b/include/sysemu/qtest.h index 85f05b0e46a..b5d5fd34637 100644 --- a/include/sysemu/qtest.h +++ b/include/sysemu/qtest.h @@ -23,6 +23,7 @@ static inline bool qtest_enabled(void) return qtest_allowed; } +#ifndef CONFIG_USER_ONLY void qtest_send_prefix(CharBackend *chr); void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...); void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words)); @@ -35,5 +36,6 @@ void qtest_server_set_send_handler(void (*send)(void *, const char *), void qtest_server_inproc_recv(void *opaque, const char *buf); int64_t qtest_get_virtual_clock(void); +#endif #endif diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h index 08aae5869fc..f229b2109c9 100644 --- a/include/sysemu/replay.h +++ b/include/sysemu/replay.h @@ -1,6 +1,3 @@ -#ifndef SYSEMU_REPLAY_H -#define SYSEMU_REPLAY_H - /* * QEMU replay (system interface) * @@ -11,6 +8,12 @@ * See the COPYING file in the top-level directory. * */ +#ifndef SYSEMU_REPLAY_H +#define SYSEMU_REPLAY_H + +#ifdef CONFIG_USER_ONLY +#error Cannot include this header from user emulation +#endif #include "exec/replay-core.h" #include "qapi/qapi-types-misc.h" @@ -70,6 +73,11 @@ int replay_get_instructions(void); /*! Updates instructions counter in replay mode. */ void replay_account_executed_instructions(void); +/** + * replay_can_wait: check if we should pause for wait-io + */ +bool replay_can_wait(void); + /* Processing clocks and other time sources */ /*! Save the specified clock */ @@ -79,12 +87,14 @@ int64_t replay_save_clock(ReplayClockKind kind, int64_t clock, int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount); /*! Saves or reads the clock depending on the current replay mode. */ #define REPLAY_CLOCK(clock, value) \ + !icount_enabled() ? (value) : \ (replay_mode == REPLAY_MODE_PLAY \ ? replay_read_clock((clock), icount_get_raw()) \ : replay_mode == REPLAY_MODE_RECORD \ ? replay_save_clock((clock), (value), icount_get_raw()) \ : (value)) #define REPLAY_CLOCK_LOCKED(clock, value) \ + !icount_enabled() ? (value) : \ (replay_mode == REPLAY_MODE_PLAY \ ? replay_read_clock((clock), icount_get_raw_locked()) \ : replay_mode == REPLAY_MODE_RECORD \ diff --git a/include/sysemu/reset.h b/include/sysemu/reset.h index 609e4d50c26..ae436044a92 100644 --- a/include/sysemu/reset.h +++ b/include/sysemu/reset.h @@ -1,3 +1,29 @@ +/* + * Reset handlers. + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2016 Red Hat, Inc. + * Copyright (c) 2024 Linaro, Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + #ifndef QEMU_SYSEMU_RESET_H #define QEMU_SYSEMU_RESET_H @@ -5,9 +31,96 @@ typedef void QEMUResetHandler(void *opaque); +/** + * qemu_register_resettable: Register an object to be reset + * @obj: object to be reset: it must implement the Resettable interface + * + * Register @obj on the list of objects which will be reset when the + * simulation is reset. These objects will be reset in the order + * they were added, using the three-phase Resettable protocol, + * so first all objects go through the enter phase, then all objects + * go through the hold phase, and then finally all go through the + * exit phase. + * + * It is not permitted to register or unregister reset functions or + * resettable objects from within any of the reset phase methods of @obj. + * + * We assume that the caller holds the BQL. + */ +void qemu_register_resettable(Object *obj); + +/** + * qemu_unregister_resettable: Unregister an object to be reset + * @obj: object to unregister + * + * Remove @obj from the list of objects which are reset when the + * simulation is reset. It must have been previously added to + * the list via qemu_register_resettable(). + * + * We assume that the caller holds the BQL. + */ +void qemu_unregister_resettable(Object *obj); + +/** + * qemu_register_reset: Register a callback for system reset + * @func: function to call + * @opaque: opaque data to pass to @func + * + * Register @func on the list of functions which are called when the + * entire system is reset. Functions registered with this API and + * Resettable objects registered with qemu_register_resettable() are + * handled together, in the order in which they were registered. + * Functions registered with this API are called in the 'hold' phase + * of the 3-phase reset. + * + * In general this function should not be used in new code where possible; + * for instance, device model reset is better accomplished using the + * methods on DeviceState. + * + * It is not permitted to register or unregister reset functions or + * resettable objects from within the @func callback. + * + * We assume that the caller holds the BQL. + */ void qemu_register_reset(QEMUResetHandler *func, void *opaque); + +/** + * qemu_register_reset_nosnapshotload: Register a callback for system reset + * @func: function to call + * @opaque: opaque data to pass to @func + * + * This is the same as qemu_register_reset(), except that @func is + * not called if the reason that the system is being reset is to + * put it into a clean state prior to loading a snapshot (i.e. for + * SHUTDOWN_CAUSE_SNAPSHOT_LOAD). + */ void qemu_register_reset_nosnapshotload(QEMUResetHandler *func, void *opaque); + +/** + * qemu_unregister_reset: Unregister a system reset callback + * @func: function registered with qemu_register_reset() + * @opaque: the same opaque data that was passed to qemu_register_reset() + * + * Undo the effects of a qemu_register_reset(). The @func and @opaque + * must both match the arguments originally used with qemu_register_reset(). + * + * We assume that the caller holds the BQL. + */ void qemu_unregister_reset(QEMUResetHandler *func, void *opaque); + +/** + * qemu_devices_reset: Perform a complete system reset + * @reason: reason for the reset + * + * This function performs the low-level work needed to do a complete reset + * of the system (calling all the callbacks registered with + * qemu_register_reset() and resetting all the Resettable objects registered + * with qemu_register_resettable()). It should only be called by the code in a + * MachineClass reset method. + * + * If you want to trigger a system reset from, for instance, a device + * model, don't use this function. Use qemu_system_reset_request(). + */ void qemu_devices_reset(ShutdownCause reason); #endif diff --git a/include/sysemu/runstate.h b/include/sysemu/runstate.h index c8c2bd8a61b..0117d243c4e 100644 --- a/include/sysemu/runstate.h +++ b/include/sysemu/runstate.h @@ -40,6 +40,15 @@ static inline bool shutdown_caused_by_guest(ShutdownCause cause) return cause >= SHUTDOWN_CAUSE_GUEST_SHUTDOWN; } +/* + * In a "live" state, the vcpu clock is ticking, and the runstate notifiers + * think we are running. + */ +static inline bool runstate_is_live(RunState state) +{ + return state == RUN_STATE_RUNNING || state == RUN_STATE_SUSPENDED; +} + void vm_start(void); /** @@ -48,9 +57,20 @@ void vm_start(void); * @step_pending: whether any of the CPUs is about to be single-stepped by gdb */ int vm_prepare_start(bool step_pending); + +/** + * vm_resume: If @state is a live state, start the vm and set the state, + * else just set the state. + * + * @state: the state to restore + */ +void vm_resume(RunState state); + int vm_stop(RunState state); int vm_stop_force_state(RunState state); int vm_shutdown(void); +void vm_set_suspended(bool suspended); +bool vm_get_suspended(void); typedef enum WakeupReason { /* Always keep QEMU_WAKEUP_REASON_NONE = 0 */ diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h index 73a37949c24..eb1dc1e4eda 100644 --- a/include/sysemu/sysemu.h +++ b/include/sysemu/sysemu.h @@ -41,7 +41,6 @@ extern int graphic_height; extern int graphic_depth; extern int display_opengl; extern const char *keyboard_layout; -extern int win2k_install_hack; extern int graphic_rotate; extern int old_param; extern uint8_t *boot_splash_filedata; diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h index c8e7c2f6cf5..10c2e3082a2 100644 --- a/include/sysemu/xen-mapcache.h +++ b/include/sysemu/xen-mapcache.h @@ -10,10 +10,11 @@ #define XEN_MAPCACHE_H #include "exec/cpu-common.h" +#include "sysemu/xen.h" typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr phys_offset, ram_addr_t size); -#ifdef CONFIG_XEN +#ifdef CONFIG_XEN_IS_POSSIBLE void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque); diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h index bc13ad56924..a9f591f26dd 100644 --- a/include/sysemu/xen.h +++ b/include/sysemu/xen.h @@ -10,6 +10,10 @@ #ifndef SYSEMU_XEN_H #define SYSEMU_XEN_H +#ifdef CONFIG_USER_ONLY +#error Cannot include sysemu/xen.h from user emulation +#endif + #include "exec/cpu-common.h" #ifdef NEED_CPU_H @@ -26,16 +30,13 @@ extern bool xen_allowed; #define xen_enabled() (xen_allowed) -#ifndef CONFIG_USER_ONLY void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length); void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, struct MemoryRegion *mr, Error **errp); -#endif #else /* !CONFIG_XEN_IS_POSSIBLE */ #define xen_enabled() 0 -#ifndef CONFIG_USER_ONLY static inline void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) { /* nothing */ @@ -45,7 +46,6 @@ static inline void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, { g_assert_not_reached(); } -#endif #endif /* CONFIG_XEN_IS_POSSIBLE */ diff --git a/accel/tcg/debuginfo.h b/include/tcg/debuginfo.h similarity index 96% rename from accel/tcg/debuginfo.h rename to include/tcg/debuginfo.h index f064e1c144b..858535b5da5 100644 --- a/accel/tcg/debuginfo.h +++ b/include/tcg/debuginfo.h @@ -4,8 +4,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef ACCEL_TCG_DEBUGINFO_H -#define ACCEL_TCG_DEBUGINFO_H +#ifndef TCG_DEBUGINFO_H +#define TCG_DEBUGINFO_H #include "qemu/bitops.h" diff --git a/accel/tcg/perf.h b/include/tcg/perf.h similarity index 95% rename from accel/tcg/perf.h rename to include/tcg/perf.h index f92dd52c699..c96b5920a3f 100644 --- a/accel/tcg/perf.h +++ b/include/tcg/perf.h @@ -4,8 +4,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef ACCEL_TCG_PERF_H -#define ACCEL_TCG_PERF_H +#ifndef TCG_PERF_H +#define TCG_PERF_H #if defined(CONFIG_TCG) && defined(CONFIG_LINUX) /* Start writing perf-.map. */ diff --git a/include/tcg/tcg-cond.h b/include/tcg/tcg-cond.h index 2a38a386d47..5cadbd6ff2c 100644 --- a/include/tcg/tcg-cond.h +++ b/include/tcg/tcg-cond.h @@ -29,26 +29,34 @@ * Conditions. Note that these are laid out for easy manipulation by * the functions below: * bit 0 is used for inverting; - * bit 1 is signed, - * bit 2 is unsigned, - * bit 3 is used with bit 0 for swapping signed/unsigned. + * bit 1 is used for conditions that need swapping (signed/unsigned). + * bit 2 is used with bit 1 for swapping. + * bit 3 is used for unsigned conditions. */ typedef enum { /* non-signed */ TCG_COND_NEVER = 0 | 0 | 0 | 0, TCG_COND_ALWAYS = 0 | 0 | 0 | 1, + + /* equality */ TCG_COND_EQ = 8 | 0 | 0 | 0, TCG_COND_NE = 8 | 0 | 0 | 1, + + /* "test" i.e. and then compare vs 0 */ + TCG_COND_TSTEQ = 8 | 4 | 0 | 0, + TCG_COND_TSTNE = 8 | 4 | 0 | 1, + /* signed */ TCG_COND_LT = 0 | 0 | 2 | 0, TCG_COND_GE = 0 | 0 | 2 | 1, - TCG_COND_LE = 8 | 0 | 2 | 0, - TCG_COND_GT = 8 | 0 | 2 | 1, + TCG_COND_GT = 0 | 4 | 2 | 0, + TCG_COND_LE = 0 | 4 | 2 | 1, + /* unsigned */ - TCG_COND_LTU = 0 | 4 | 0 | 0, - TCG_COND_GEU = 0 | 4 | 0 | 1, - TCG_COND_LEU = 8 | 4 | 0 | 0, - TCG_COND_GTU = 8 | 4 | 0 | 1, + TCG_COND_LTU = 8 | 0 | 2 | 0, + TCG_COND_GEU = 8 | 0 | 2 | 1, + TCG_COND_GTU = 8 | 4 | 2 | 0, + TCG_COND_LEU = 8 | 4 | 2 | 1, } TCGCond; /* Invert the sense of the comparison. */ @@ -60,25 +68,49 @@ static inline TCGCond tcg_invert_cond(TCGCond c) /* Swap the operands in a comparison. */ static inline TCGCond tcg_swap_cond(TCGCond c) { - return c & 6 ? (TCGCond)(c ^ 9) : c; + return (TCGCond)(c ^ ((c & 2) << 1)); +} + +/* Must a comparison be considered signed? */ +static inline bool is_signed_cond(TCGCond c) +{ + return (c & (8 | 2)) == 2; +} + +/* Must a comparison be considered unsigned? */ +static inline bool is_unsigned_cond(TCGCond c) +{ + return (c & (8 | 2)) == (8 | 2); +} + +/* Must a comparison be considered a test? */ +static inline bool is_tst_cond(TCGCond c) +{ + return (c | 1) == TCG_COND_TSTNE; } /* Create an "unsigned" version of a "signed" comparison. */ static inline TCGCond tcg_unsigned_cond(TCGCond c) { - return c & 2 ? (TCGCond)(c ^ 6) : c; + return is_signed_cond(c) ? (TCGCond)(c + 8) : c; } /* Create a "signed" version of an "unsigned" comparison. */ static inline TCGCond tcg_signed_cond(TCGCond c) { - return c & 4 ? (TCGCond)(c ^ 6) : c; + return is_unsigned_cond(c) ? (TCGCond)(c - 8) : c; } -/* Must a comparison be considered unsigned? */ -static inline bool is_unsigned_cond(TCGCond c) +/* Create the eq/ne version of a tsteq/tstne comparison. */ +static inline TCGCond tcg_tst_eqne_cond(TCGCond c) +{ + return is_tst_cond(c) ? (TCGCond)(c - 4) : c; +} + +/* Create the lt/ge version of a tstne/tsteq comparison of the sign. */ +static inline TCGCond tcg_tst_ltge_cond(TCGCond c) { - return (c & 4) != 0; + return is_tst_cond(c) ? (TCGCond)(c ^ 0xf) : c; } /* @@ -92,7 +124,7 @@ static inline TCGCond tcg_high_cond(TCGCond c) case TCG_COND_LE: case TCG_COND_GEU: case TCG_COND_LEU: - return (TCGCond)(c ^ 8); + return (TCGCond)(c ^ (4 | 1)); default: return c; } diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index a7bb7afc4fc..e37ef17c4f3 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -560,6 +560,12 @@ struct TCGContext { QTAILQ_HEAD(, TCGOp) ops, free_ops; QSIMPLEQ_HEAD(, TCGLabel) labels; + /* + * When clear, new ops are added to the tail of @ops. + * When set, new ops are added in front of @emit_before_op. + */ + TCGOp *emit_before_op; + /* Tells which temporary holds a given register. It does not take into account fixed registers */ TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; diff --git a/include/ui/console.h b/include/ui/console.h index a4a49ffc640..0bc7a00ac0b 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -413,6 +413,7 @@ void qemu_console_early_init(void); void qemu_console_set_display_gl_ctx(QemuConsole *con, DisplayGLCtx *ctx); +QemuConsole *qemu_console_lookup_default(void); QemuConsole *qemu_console_lookup_by_index(unsigned int index); QemuConsole *qemu_console_lookup_by_device(DeviceState *dev, uint32_t head); QemuConsole *qemu_console_lookup_by_device_name(const char *device_id, @@ -432,7 +433,6 @@ int qemu_console_get_window_id(QemuConsole *con); /* Set the low-level window id for the console */ void qemu_console_set_window_id(QemuConsole *con, int window_id); -void console_select(unsigned int index); void qemu_console_resize(QemuConsole *con, int width, int height); DisplaySurface *qemu_console_surface(QemuConsole *con); void coroutine_fn qemu_console_co_wait_update(QemuConsole *con); diff --git a/include/ui/kbd-state.h b/include/ui/kbd-state.h index fb79776128c..1f37b932eb6 100644 --- a/include/ui/kbd-state.h +++ b/include/ui/kbd-state.h @@ -99,4 +99,15 @@ bool qkbd_state_modifier_get(QKbdState *kbd, QKbdModifier mod); */ void qkbd_state_lift_all_keys(QKbdState *kbd); +/** + * qkbd_state_switch_console: Switch console. + * + * This sends key up events to the previous console for all keys which are in + * down state to prevent keys being stuck, and remembers the new console. + * + * @kbd: state tracker state. + * @con: new QemuConsole for this state tracker. + */ +void qkbd_state_switch_console(QKbdState *kbd, QemuConsole *con); + #endif /* QEMU_UI_KBD_STATE_H */ diff --git a/include/ui/rect.h b/include/ui/rect.h index 68f05d78a8e..7ebf47ebcdc 100644 --- a/include/ui/rect.h +++ b/include/ui/rect.h @@ -4,8 +4,6 @@ #ifndef QEMU_RECT_H #define QEMU_RECT_H -#include -#include typedef struct QemuRect { int16_t x; diff --git a/include/user/safe-syscall.h b/include/user/safe-syscall.h index 27b71cdbd8e..aa075f4d5cd 100644 --- a/include/user/safe-syscall.h +++ b/include/user/safe-syscall.h @@ -134,7 +134,7 @@ extern char safe_syscall_start[]; extern char safe_syscall_end[]; #define safe_syscall(...) \ - safe_syscall_base(&((TaskState *)thread_cpu->opaque)->signal_pending, \ + safe_syscall_base(&get_task_state(thread_cpu)->signal_pending, \ __VA_ARGS__) #endif diff --git a/include/user/syscall-trace.h b/include/user/syscall-trace.h index 557f881a79b..b48b2b2d0ae 100644 --- a/include/user/syscall-trace.h +++ b/include/user/syscall-trace.h @@ -11,6 +11,7 @@ #define SYSCALL_TRACE_H #include "exec/user/abitypes.h" +#include "gdbstub/user.h" #include "qemu/plugin.h" #include "trace/trace-root.h" @@ -20,7 +21,7 @@ * could potentially unify the -strace code here as well. */ -static inline void record_syscall_start(void *cpu, int num, +static inline void record_syscall_start(CPUState *cpu, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, @@ -29,11 +30,13 @@ static inline void record_syscall_start(void *cpu, int num, qemu_plugin_vcpu_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + gdb_syscall_entry(cpu, num); } -static inline void record_syscall_return(void *cpu, int num, abi_long ret) +static inline void record_syscall_return(CPUState *cpu, int num, abi_long ret) { qemu_plugin_vcpu_syscall_ret(cpu, num, ret); + gdb_syscall_return(cpu, num); } diff --git a/io/channel-file.c b/io/channel-file.c index 4a12c618860..6436cfb6ae4 100644 --- a/io/channel-file.c +++ b/io/channel-file.c @@ -36,11 +36,27 @@ qio_channel_file_new_fd(int fd) ioc->fd = fd; + if (lseek(fd, 0, SEEK_CUR) != (off_t)-1) { + qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_SEEKABLE); + } + trace_qio_channel_file_new_fd(ioc, fd); return ioc; } +QIOChannelFile * +qio_channel_file_new_dupfd(int fd, Error **errp) +{ + int newfd = dup(fd); + + if (newfd < 0) { + error_setg_errno(errp, errno, "Could not dup FD %d", fd); + return NULL; + } + + return qio_channel_file_new_fd(newfd); +} QIOChannelFile * qio_channel_file_new_path(const char *path, @@ -60,6 +76,10 @@ qio_channel_file_new_path(const char *path, return NULL; } + if (lseek(ioc->fd, 0, SEEK_CUR) != (off_t)-1) { + qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_SEEKABLE); + } + trace_qio_channel_file_new_path(ioc, path, flags, mode, ioc->fd); return ioc; @@ -138,6 +158,58 @@ static ssize_t qio_channel_file_writev(QIOChannel *ioc, return ret; } +#ifdef CONFIG_PREADV +static ssize_t qio_channel_file_preadv(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + off_t offset, + Error **errp) +{ + QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); + ssize_t ret; + + retry: + ret = preadv(fioc->fd, iov, niov, offset); + if (ret < 0) { + if (errno == EAGAIN) { + return QIO_CHANNEL_ERR_BLOCK; + } + if (errno == EINTR) { + goto retry; + } + + error_setg_errno(errp, errno, "Unable to read from file"); + return -1; + } + + return ret; +} + +static ssize_t qio_channel_file_pwritev(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + off_t offset, + Error **errp) +{ + QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); + ssize_t ret; + + retry: + ret = pwritev(fioc->fd, iov, niov, offset); + if (ret <= 0) { + if (errno == EAGAIN) { + return QIO_CHANNEL_ERR_BLOCK; + } + if (errno == EINTR) { + goto retry; + } + error_setg_errno(errp, errno, "Unable to write to file"); + return -1; + } + return ret; +} +#endif /* CONFIG_PREADV */ + static int qio_channel_file_set_blocking(QIOChannel *ioc, bool enabled, Error **errp) @@ -223,6 +295,10 @@ static void qio_channel_file_class_init(ObjectClass *klass, ioc_klass->io_writev = qio_channel_file_writev; ioc_klass->io_readv = qio_channel_file_readv; ioc_klass->io_set_blocking = qio_channel_file_set_blocking; +#ifdef CONFIG_PREADV + ioc_klass->io_pwritev = qio_channel_file_pwritev; + ioc_klass->io_preadv = qio_channel_file_preadv; +#endif ioc_klass->io_seek = qio_channel_file_seek; ioc_klass->io_close = qio_channel_file_close; ioc_klass->io_create_watch = qio_channel_file_create_watch; diff --git a/io/channel-tls.c b/io/channel-tls.c index 58fe1aceeea..1d9c9c72bfb 100644 --- a/io/channel-tls.c +++ b/io/channel-tls.c @@ -381,6 +381,7 @@ static int qio_channel_tls_close(QIOChannel *ioc, QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc); if (tioc->hs_ioc_tag) { + trace_qio_channel_tls_handshake_cancel(ioc); g_clear_handle_id(&tioc->hs_ioc_tag, g_source_remove); } diff --git a/io/channel.c b/io/channel.c index 86c5834510f..a1f12f8e909 100644 --- a/io/channel.c +++ b/io/channel.c @@ -454,6 +454,64 @@ GSource *qio_channel_add_watch_source(QIOChannel *ioc, } +ssize_t qio_channel_pwritev(QIOChannel *ioc, const struct iovec *iov, + size_t niov, off_t offset, Error **errp) +{ + QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc); + + if (!klass->io_pwritev) { + error_setg(errp, "Channel does not support pwritev"); + return -1; + } + + if (!qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_SEEKABLE)) { + error_setg_errno(errp, EINVAL, "Requested channel is not seekable"); + return -1; + } + + return klass->io_pwritev(ioc, iov, niov, offset, errp); +} + +ssize_t qio_channel_pwrite(QIOChannel *ioc, char *buf, size_t buflen, + off_t offset, Error **errp) +{ + struct iovec iov = { + .iov_base = buf, + .iov_len = buflen + }; + + return qio_channel_pwritev(ioc, &iov, 1, offset, errp); +} + +ssize_t qio_channel_preadv(QIOChannel *ioc, const struct iovec *iov, + size_t niov, off_t offset, Error **errp) +{ + QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc); + + if (!klass->io_preadv) { + error_setg(errp, "Channel does not support preadv"); + return -1; + } + + if (!qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_SEEKABLE)) { + error_setg_errno(errp, EINVAL, "Requested channel is not seekable"); + return -1; + } + + return klass->io_preadv(ioc, iov, niov, offset, errp); +} + +ssize_t qio_channel_pread(QIOChannel *ioc, char *buf, size_t buflen, + off_t offset, Error **errp) +{ + struct iovec iov = { + .iov_base = buf, + .iov_len = buflen + }; + + return qio_channel_preadv(ioc, &iov, 1, offset, errp); +} + int qio_channel_shutdown(QIOChannel *ioc, QIOChannelShutdown how, Error **errp) diff --git a/io/trace-events b/io/trace-events index 3cc5cf1efdf..d4c0f84a9a2 100644 --- a/io/trace-events +++ b/io/trace-events @@ -43,6 +43,7 @@ qio_channel_tls_handshake_start(void *ioc) "TLS handshake start ioc=%p" qio_channel_tls_handshake_pending(void *ioc, int status) "TLS handshake pending ioc=%p status=%d" qio_channel_tls_handshake_fail(void *ioc) "TLS handshake fail ioc=%p" qio_channel_tls_handshake_complete(void *ioc) "TLS handshake complete ioc=%p" +qio_channel_tls_handshake_cancel(void *ioc) "TLS handshake cancel ioc=%p" qio_channel_tls_credentials_allow(void *ioc) "TLS credentials allow ioc=%p" qio_channel_tls_credentials_deny(void *ioc) "TLS credentials deny ioc=%p" diff --git a/iothread.c b/iothread.c index b753286414a..e1e9e047365 100644 --- a/iothread.c +++ b/iothread.c @@ -170,8 +170,7 @@ static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp) } aio_context_set_aio_params(iothread->ctx, - iothread->parent_obj.aio_max_batch, - errp); + iothread->parent_obj.aio_max_batch); aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min, base->thread_pool_max, errp); @@ -405,6 +404,5 @@ IOThread *iothread_by_id(const char *id) bool qemu_in_iothread(void) { - return qemu_get_current_aio_context() == qemu_get_aio_context() ? - false : true; + return qemu_get_current_aio_context() != qemu_get_aio_context(); } diff --git a/job.c b/job.c index 99a2e54b54a..660ce22c56b 100644 --- a/job.c +++ b/job.c @@ -464,12 +464,8 @@ void job_unref_locked(Job *job) assert(!job->txn); if (job->driver->free) { - AioContext *aio_context = job->aio_context; job_unlock(); - /* FIXME: aiocontext lock is required because cb calls blk_unref */ - aio_context_acquire(aio_context); job->driver->free(job); - aio_context_release(aio_context); job_lock(); } @@ -840,12 +836,10 @@ static void job_clean(Job *job) /* * Called with job_mutex held, but releases it temporarily. - * Takes AioContext lock internally to invoke a job->driver callback. */ static int job_finalize_single_locked(Job *job) { int job_ret; - AioContext *ctx = job->aio_context; assert(job_is_completed_locked(job)); @@ -854,7 +848,6 @@ static int job_finalize_single_locked(Job *job) job_ret = job->ret; job_unlock(); - aio_context_acquire(ctx); if (!job_ret) { job_commit(job); @@ -867,7 +860,6 @@ static int job_finalize_single_locked(Job *job) job->cb(job->opaque, job_ret); } - aio_context_release(ctx); job_lock(); /* Emit events only if we actually started */ @@ -886,17 +878,13 @@ static int job_finalize_single_locked(Job *job) /* * Called with job_mutex held, but releases it temporarily. - * Takes AioContext lock internally to invoke a job->driver callback. */ static void job_cancel_async_locked(Job *job, bool force) { - AioContext *ctx = job->aio_context; GLOBAL_STATE_CODE(); if (job->driver->cancel) { job_unlock(); - aio_context_acquire(ctx); force = job->driver->cancel(job, force); - aio_context_release(ctx); job_lock(); } else { /* No .cancel() means the job will behave as if force-cancelled */ @@ -931,7 +919,6 @@ static void job_cancel_async_locked(Job *job, bool force) /* * Called with job_mutex held, but releases it temporarily. - * Takes AioContext lock internally to invoke a job->driver callback. */ static void job_completed_txn_abort_locked(Job *job) { @@ -979,15 +966,12 @@ static void job_completed_txn_abort_locked(Job *job) static int job_prepare_locked(Job *job) { int ret; - AioContext *ctx = job->aio_context; GLOBAL_STATE_CODE(); if (job->ret == 0 && job->driver->prepare) { job_unlock(); - aio_context_acquire(ctx); ret = job->driver->prepare(job); - aio_context_release(ctx); job_lock(); job->ret = ret; job_update_rc_locked(job); diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h index 38e5957526c..c59ea55cd8e 100644 --- a/linux-headers/asm-arm64/kvm.h +++ b/linux-headers/asm-arm64/kvm.h @@ -491,6 +491,38 @@ struct kvm_smccc_filter { #define KVM_HYPERCALL_EXIT_SMC (1U << 0) #define KVM_HYPERCALL_EXIT_16BIT (1U << 1) +/* + * Get feature ID registers userspace writable mask. + * + * From DDI0487J.a, D19.2.66 ("ID_AA64MMFR2_EL1, AArch64 Memory Model + * Feature Register 2"): + * + * "The Feature ID space is defined as the System register space in + * AArch64 with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7}, + * op2=={0-7}." + * + * This covers all currently known R/O registers that indicate + * anything useful feature wise, including the ID registers. + * + * If we ever need to introduce a new range, it will be described as + * such in the range field. + */ +#define KVM_ARM_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2) \ + ({ \ + __u64 __op1 = (op1) & 3; \ + __op1 -= (__op1 == 3); \ + (__op1 << 6 | ((crm) & 7) << 3 | (op2)); \ + }) + +#define KVM_ARM_FEATURE_ID_RANGE 0 +#define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8) + +struct reg_mask_range { + __u64 addr; /* Pointer to mask array */ + __u32 range; /* Requested range */ + __u32 reserved[13]; +}; + #endif #endif /* __ARM_KVM_H__ */ diff --git a/linux-headers/asm-generic/unistd.h b/linux-headers/asm-generic/unistd.h index abe087c53b4..75f00965ab1 100644 --- a/linux-headers/asm-generic/unistd.h +++ b/linux-headers/asm-generic/unistd.h @@ -71,7 +71,7 @@ __SYSCALL(__NR_fremovexattr, sys_fremovexattr) #define __NR_getcwd 17 __SYSCALL(__NR_getcwd, sys_getcwd) #define __NR_lookup_dcookie 18 -__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie) +__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall) #define __NR_eventfd2 19 __SYSCALL(__NR_eventfd2, sys_eventfd2) #define __NR_epoll_create1 20 @@ -816,15 +816,34 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease) __SYSCALL(__NR_futex_waitv, sys_futex_waitv) #define __NR_set_mempolicy_home_node 450 __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) - #define __NR_cachestat 451 __SYSCALL(__NR_cachestat, sys_cachestat) - #define __NR_fchmodat2 452 __SYSCALL(__NR_fchmodat2, sys_fchmodat2) +#define __NR_map_shadow_stack 453 +__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack) +#define __NR_futex_wake 454 +__SYSCALL(__NR_futex_wake, sys_futex_wake) +#define __NR_futex_wait 455 +__SYSCALL(__NR_futex_wait, sys_futex_wait) +#define __NR_futex_requeue 456 +__SYSCALL(__NR_futex_requeue, sys_futex_requeue) + +#define __NR_statmount 457 +__SYSCALL(__NR_statmount, sys_statmount) + +#define __NR_listmount 458 +__SYSCALL(__NR_listmount, sys_listmount) + +#define __NR_lsm_get_self_attr 459 +__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr) +#define __NR_lsm_set_self_attr 460 +__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr) +#define __NR_lsm_list_modules 461 +__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) #undef __NR_syscalls -#define __NR_syscalls 453 +#define __NR_syscalls 462 /* * 32 bit systems traditionally used different diff --git a/linux-headers/asm-loongarch/bitsperlong.h b/linux-headers/asm-loongarch/bitsperlong.h new file mode 100644 index 00000000000..6dc0bb0c13b --- /dev/null +++ b/linux-headers/asm-loongarch/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/linux-headers/asm-loongarch/kvm.h b/linux-headers/asm-loongarch/kvm.h new file mode 100644 index 00000000000..923d0bd3829 --- /dev/null +++ b/linux-headers/asm-loongarch/kvm.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __UAPI_ASM_LOONGARCH_KVM_H +#define __UAPI_ASM_LOONGARCH_KVM_H + +#include + +/* + * KVM LoongArch specific structures and definitions. + * + * Some parts derived from the x86 version of this file. + */ + +#define __KVM_HAVE_READONLY_MEM + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_DIRTY_LOG_PAGE_OFFSET 64 + +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 pc; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { + __u32 fcsr; + __u64 fcc; /* 8x8 */ + struct kvm_fpureg { + __u64 val64[4]; + } fpr[32]; +}; + +/* + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various + * registers. The id field is broken down as follows: + * + * bits[63..52] - As per linux/kvm.h + * bits[51..32] - Must be zero. + * bits[31..16] - Register set. + * + * Register set = 0: GP registers from kvm_regs (see definitions below). + * + * Register set = 1: CSR registers. + * + * Register set = 2: KVM specific registers (see definitions below). + * + * Register set = 3: FPU / SIMD registers (see definitions below). + * + * Other sets registers may be added in the future. Each set would + * have its own identifier in bits[31..16]. + */ + +#define KVM_REG_LOONGARCH_GPR (KVM_REG_LOONGARCH | 0x00000ULL) +#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL) +#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL) +#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL) +#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL) +#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL) +#define KVM_CSR_IDX_MASK 0x7fff +#define KVM_CPUCFG_IDX_MASK 0x7fff + +/* + * KVM_REG_LOONGARCH_KVM - KVM specific control registers. + */ + +#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2) + +#define LOONGARCH_REG_SHIFT 3 +#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) +#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) +#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) +#define KVM_LOONGARCH_VCPU_CPUCFG 0 + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct kvm_iocsr_entry { + __u32 addr; + __u32 pad; + __u64 data; +}; + +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 64 +#define KVM_MAX_CORES 256 + +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/linux-headers/asm-loongarch/mman.h b/linux-headers/asm-loongarch/mman.h new file mode 100644 index 00000000000..8eebf89f5ab --- /dev/null +++ b/linux-headers/asm-loongarch/mman.h @@ -0,0 +1 @@ +#include diff --git a/linux-headers/asm-loongarch/unistd.h b/linux-headers/asm-loongarch/unistd.h new file mode 100644 index 00000000000..fcb668984f0 --- /dev/null +++ b/linux-headers/asm-loongarch/unistd.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 + +#include diff --git a/linux-headers/asm-mips/mman.h b/linux-headers/asm-mips/mman.h index c6e1fc77c99..9c48d9a21aa 100644 --- a/linux-headers/asm-mips/mman.h +++ b/linux-headers/asm-mips/mman.h @@ -88,7 +88,7 @@ #define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ #define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ -#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, +#define MADV_DONTDUMP 16 /* Explicitly exclude from core dump, overrides the coredump filter bits */ #define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ diff --git a/linux-headers/asm-mips/unistd_n32.h b/linux-headers/asm-mips/unistd_n32.h index 46d8500654c..ce2e050a9ba 100644 --- a/linux-headers/asm-mips/unistd_n32.h +++ b/linux-headers/asm-mips/unistd_n32.h @@ -381,5 +381,14 @@ #define __NR_set_mempolicy_home_node (__NR_Linux + 450) #define __NR_cachestat (__NR_Linux + 451) #define __NR_fchmodat2 (__NR_Linux + 452) +#define __NR_map_shadow_stack (__NR_Linux + 453) +#define __NR_futex_wake (__NR_Linux + 454) +#define __NR_futex_wait (__NR_Linux + 455) +#define __NR_futex_requeue (__NR_Linux + 456) +#define __NR_statmount (__NR_Linux + 457) +#define __NR_listmount (__NR_Linux + 458) +#define __NR_lsm_get_self_attr (__NR_Linux + 459) +#define __NR_lsm_set_self_attr (__NR_Linux + 460) +#define __NR_lsm_list_modules (__NR_Linux + 461) #endif /* _ASM_UNISTD_N32_H */ diff --git a/linux-headers/asm-mips/unistd_n64.h b/linux-headers/asm-mips/unistd_n64.h index c2f7ac673bb..5bfb3733ffd 100644 --- a/linux-headers/asm-mips/unistd_n64.h +++ b/linux-headers/asm-mips/unistd_n64.h @@ -357,5 +357,14 @@ #define __NR_set_mempolicy_home_node (__NR_Linux + 450) #define __NR_cachestat (__NR_Linux + 451) #define __NR_fchmodat2 (__NR_Linux + 452) +#define __NR_map_shadow_stack (__NR_Linux + 453) +#define __NR_futex_wake (__NR_Linux + 454) +#define __NR_futex_wait (__NR_Linux + 455) +#define __NR_futex_requeue (__NR_Linux + 456) +#define __NR_statmount (__NR_Linux + 457) +#define __NR_listmount (__NR_Linux + 458) +#define __NR_lsm_get_self_attr (__NR_Linux + 459) +#define __NR_lsm_set_self_attr (__NR_Linux + 460) +#define __NR_lsm_list_modules (__NR_Linux + 461) #endif /* _ASM_UNISTD_N64_H */ diff --git a/linux-headers/asm-mips/unistd_o32.h b/linux-headers/asm-mips/unistd_o32.h index 757c68f2add..02eaecd020e 100644 --- a/linux-headers/asm-mips/unistd_o32.h +++ b/linux-headers/asm-mips/unistd_o32.h @@ -427,5 +427,14 @@ #define __NR_set_mempolicy_home_node (__NR_Linux + 450) #define __NR_cachestat (__NR_Linux + 451) #define __NR_fchmodat2 (__NR_Linux + 452) +#define __NR_map_shadow_stack (__NR_Linux + 453) +#define __NR_futex_wake (__NR_Linux + 454) +#define __NR_futex_wait (__NR_Linux + 455) +#define __NR_futex_requeue (__NR_Linux + 456) +#define __NR_statmount (__NR_Linux + 457) +#define __NR_listmount (__NR_Linux + 458) +#define __NR_lsm_get_self_attr (__NR_Linux + 459) +#define __NR_lsm_set_self_attr (__NR_Linux + 460) +#define __NR_lsm_list_modules (__NR_Linux + 461) #endif /* _ASM_UNISTD_O32_H */ diff --git a/linux-headers/asm-powerpc/unistd_32.h b/linux-headers/asm-powerpc/unistd_32.h index 8ef94bbac13..bbab08d6ec2 100644 --- a/linux-headers/asm-powerpc/unistd_32.h +++ b/linux-headers/asm-powerpc/unistd_32.h @@ -434,6 +434,15 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 +#define __NR_statmount 457 +#define __NR_listmount 458 +#define __NR_lsm_get_self_attr 459 +#define __NR_lsm_set_self_attr 460 +#define __NR_lsm_list_modules 461 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-powerpc/unistd_64.h b/linux-headers/asm-powerpc/unistd_64.h index 0e7ee43e884..af34cde70f2 100644 --- a/linux-headers/asm-powerpc/unistd_64.h +++ b/linux-headers/asm-powerpc/unistd_64.h @@ -406,6 +406,15 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 +#define __NR_statmount 457 +#define __NR_listmount 458 +#define __NR_lsm_get_self_attr 459 +#define __NR_lsm_set_self_attr 460 +#define __NR_lsm_list_modules 461 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h index 992c5e40710..7499e88a947 100644 --- a/linux-headers/asm-riscv/kvm.h +++ b/linux-headers/asm-riscv/kvm.h @@ -80,6 +80,7 @@ struct kvm_riscv_csr { unsigned long sip; unsigned long satp; unsigned long scounteren; + unsigned long senvcfg; }; /* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ @@ -93,6 +94,11 @@ struct kvm_riscv_aia_csr { unsigned long iprio2h; }; +/* Smstateen CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_smstateen_csr { + unsigned long sstateen0; +}; + /* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ struct kvm_riscv_timer { __u64 frequency; @@ -131,6 +137,35 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_ZICSR, KVM_RISCV_ISA_EXT_ZIFENCEI, KVM_RISCV_ISA_EXT_ZIHPM, + KVM_RISCV_ISA_EXT_SMSTATEEN, + KVM_RISCV_ISA_EXT_ZICOND, + KVM_RISCV_ISA_EXT_ZBC, + KVM_RISCV_ISA_EXT_ZBKB, + KVM_RISCV_ISA_EXT_ZBKC, + KVM_RISCV_ISA_EXT_ZBKX, + KVM_RISCV_ISA_EXT_ZKND, + KVM_RISCV_ISA_EXT_ZKNE, + KVM_RISCV_ISA_EXT_ZKNH, + KVM_RISCV_ISA_EXT_ZKR, + KVM_RISCV_ISA_EXT_ZKSED, + KVM_RISCV_ISA_EXT_ZKSH, + KVM_RISCV_ISA_EXT_ZKT, + KVM_RISCV_ISA_EXT_ZVBB, + KVM_RISCV_ISA_EXT_ZVBC, + KVM_RISCV_ISA_EXT_ZVKB, + KVM_RISCV_ISA_EXT_ZVKG, + KVM_RISCV_ISA_EXT_ZVKNED, + KVM_RISCV_ISA_EXT_ZVKNHA, + KVM_RISCV_ISA_EXT_ZVKNHB, + KVM_RISCV_ISA_EXT_ZVKSED, + KVM_RISCV_ISA_EXT_ZVKSH, + KVM_RISCV_ISA_EXT_ZVKT, + KVM_RISCV_ISA_EXT_ZFH, + KVM_RISCV_ISA_EXT_ZFHMIN, + KVM_RISCV_ISA_EXT_ZIHINTNTL, + KVM_RISCV_ISA_EXT_ZVFH, + KVM_RISCV_ISA_EXT_ZVFHMIN, + KVM_RISCV_ISA_EXT_ZFA, KVM_RISCV_ISA_EXT_MAX, }; @@ -148,9 +183,17 @@ enum KVM_RISCV_SBI_EXT_ID { KVM_RISCV_SBI_EXT_PMU, KVM_RISCV_SBI_EXT_EXPERIMENTAL, KVM_RISCV_SBI_EXT_VENDOR, + KVM_RISCV_SBI_EXT_DBCN, + KVM_RISCV_SBI_EXT_STA, KVM_RISCV_SBI_EXT_MAX, }; +/* SBI STA extension registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_sbi_sta { + unsigned long shmem_lo; + unsigned long shmem_hi; +}; + /* Possible states for kvm_riscv_timer */ #define KVM_RISCV_TIMER_STATE_OFF 0 #define KVM_RISCV_TIMER_STATE_ON 1 @@ -178,10 +221,13 @@ enum KVM_RISCV_SBI_EXT_ID { #define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT) #define KVM_REG_RISCV_CSR_GENERAL (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) #define KVM_REG_RISCV_CSR_AIA (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_CSR_SMSTATEEN (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT) #define KVM_REG_RISCV_CSR_REG(name) \ (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long)) #define KVM_REG_RISCV_CSR_AIA_REG(name) \ (offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long)) +#define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name) \ + (offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long)) /* Timer registers are mapped as type 4 */ #define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT) @@ -229,6 +275,12 @@ enum KVM_RISCV_SBI_EXT_ID { #define KVM_REG_RISCV_VECTOR_REG(n) \ ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long)) +/* Registers for specific SBI extensions are mapped as type 10 */ +#define KVM_REG_RISCV_SBI_STATE (0x0a << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_SBI_STA (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_SBI_STA_REG(name) \ + (offsetof(struct kvm_riscv_sbi_sta, name) / sizeof(unsigned long)) + /* Device Control API: RISC-V AIA */ #define KVM_DEV_RISCV_APLIC_ALIGN 0x1000 #define KVM_DEV_RISCV_APLIC_SIZE 0x4000 diff --git a/linux-headers/asm-riscv/ptrace.h b/linux-headers/asm-riscv/ptrace.h new file mode 100644 index 00000000000..1e3166caca8 --- /dev/null +++ b/linux-headers/asm-riscv/ptrace.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_PTRACE_H +#define _ASM_RISCV_PTRACE_H + +#ifndef __ASSEMBLY__ + +#include + +#define PTRACE_GETFDPIC 33 + +#define PTRACE_GETFDPIC_EXEC 0 +#define PTRACE_GETFDPIC_INTERP 1 + +/* + * User-mode register state for core dumps, ptrace, sigcontext + * + * This decouples struct pt_regs from the userspace ABI. + * struct user_regs_struct must form a prefix of struct pt_regs. + */ +struct user_regs_struct { + unsigned long pc; + unsigned long ra; + unsigned long sp; + unsigned long gp; + unsigned long tp; + unsigned long t0; + unsigned long t1; + unsigned long t2; + unsigned long s0; + unsigned long s1; + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + unsigned long s2; + unsigned long s3; + unsigned long s4; + unsigned long s5; + unsigned long s6; + unsigned long s7; + unsigned long s8; + unsigned long s9; + unsigned long s10; + unsigned long s11; + unsigned long t3; + unsigned long t4; + unsigned long t5; + unsigned long t6; +}; + +struct __riscv_f_ext_state { + __u32 f[32]; + __u32 fcsr; +}; + +struct __riscv_d_ext_state { + __u64 f[32]; + __u32 fcsr; +}; + +struct __riscv_q_ext_state { + __u64 f[64] __attribute__((aligned(16))); + __u32 fcsr; + /* + * Reserved for expansion of sigcontext structure. Currently zeroed + * upon signal, and must be zero upon sigreturn. + */ + __u32 reserved[3]; +}; + +struct __riscv_ctx_hdr { + __u32 magic; + __u32 size; +}; + +struct __riscv_extra_ext_header { + __u32 __padding[129] __attribute__((aligned(16))); + /* + * Reserved for expansion of sigcontext structure. Currently zeroed + * upon signal, and must be zero upon sigreturn. + */ + __u32 reserved; + struct __riscv_ctx_hdr hdr; +}; + +union __riscv_fp_state { + struct __riscv_f_ext_state f; + struct __riscv_d_ext_state d; + struct __riscv_q_ext_state q; +}; + +struct __riscv_v_ext_state { + unsigned long vstart; + unsigned long vl; + unsigned long vtype; + unsigned long vcsr; + unsigned long vlenb; + void *datap; + /* + * In signal handler, datap will be set a correct user stack offset + * and vector registers will be copied to the address of datap + * pointer. + */ +}; + +struct __riscv_v_regset_state { + unsigned long vstart; + unsigned long vl; + unsigned long vtype; + unsigned long vcsr; + unsigned long vlenb; + char vreg[]; +}; + +/* + * According to spec: The number of bits in a single vector register, + * VLEN >= ELEN, which must be a power of 2, and must be no greater than + * 2^16 = 65536bits = 8192bytes + */ +#define RISCV_MAX_VLENB (8192) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_PTRACE_H */ diff --git a/linux-headers/asm-s390/unistd_32.h b/linux-headers/asm-s390/unistd_32.h index 716fa368ca7..a3ece69d824 100644 --- a/linux-headers/asm-s390/unistd_32.h +++ b/linux-headers/asm-s390/unistd_32.h @@ -425,5 +425,14 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 +#define __NR_statmount 457 +#define __NR_listmount 458 +#define __NR_lsm_get_self_attr 459 +#define __NR_lsm_set_self_attr 460 +#define __NR_lsm_list_modules 461 #endif /* _ASM_S390_UNISTD_32_H */ diff --git a/linux-headers/asm-s390/unistd_64.h b/linux-headers/asm-s390/unistd_64.h index b2a11b1d139..8c5fd93495c 100644 --- a/linux-headers/asm-s390/unistd_64.h +++ b/linux-headers/asm-s390/unistd_64.h @@ -373,5 +373,14 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 +#define __NR_statmount 457 +#define __NR_listmount 458 +#define __NR_lsm_get_self_attr 459 +#define __NR_lsm_set_self_attr 460 +#define __NR_lsm_list_modules 461 #endif /* _ASM_S390_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h index 2b3a8f7bd2c..003fb745347 100644 --- a/linux-headers/asm-x86/kvm.h +++ b/linux-headers/asm-x86/kvm.h @@ -560,4 +560,7 @@ struct kvm_pmu_event_filter { /* x86-specific KVM_EXIT_HYPERCALL flags. */ #define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0) +#define KVM_X86_DEFAULT_VM 0 +#define KVM_X86_SW_PROTECTED_VM 1 + #endif /* _ASM_X86_KVM_H */ diff --git a/linux-headers/asm-x86/unistd_32.h b/linux-headers/asm-x86/unistd_32.h index d749ad1c24e..5c9c329e939 100644 --- a/linux-headers/asm-x86/unistd_32.h +++ b/linux-headers/asm-x86/unistd_32.h @@ -443,6 +443,15 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 +#define __NR_statmount 457 +#define __NR_listmount 458 +#define __NR_lsm_get_self_attr 459 +#define __NR_lsm_set_self_attr 460 +#define __NR_lsm_list_modules 461 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-x86/unistd_64.h b/linux-headers/asm-x86/unistd_64.h index cea67282ebf..d9aab7ae87d 100644 --- a/linux-headers/asm-x86/unistd_64.h +++ b/linux-headers/asm-x86/unistd_64.h @@ -366,6 +366,14 @@ #define __NR_cachestat 451 #define __NR_fchmodat2 452 #define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 +#define __NR_statmount 457 +#define __NR_listmount 458 +#define __NR_lsm_get_self_attr 459 +#define __NR_lsm_set_self_attr 460 +#define __NR_lsm_list_modules 461 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/unistd_x32.h b/linux-headers/asm-x86/unistd_x32.h index 5b2e79bf4c4..63cdd1ee43d 100644 --- a/linux-headers/asm-x86/unistd_x32.h +++ b/linux-headers/asm-x86/unistd_x32.h @@ -318,6 +318,14 @@ #define __NR_set_mempolicy_home_node (__X32_SYSCALL_BIT + 450) #define __NR_cachestat (__X32_SYSCALL_BIT + 451) #define __NR_fchmodat2 (__X32_SYSCALL_BIT + 452) +#define __NR_futex_wake (__X32_SYSCALL_BIT + 454) +#define __NR_futex_wait (__X32_SYSCALL_BIT + 455) +#define __NR_futex_requeue (__X32_SYSCALL_BIT + 456) +#define __NR_statmount (__X32_SYSCALL_BIT + 457) +#define __NR_listmount (__X32_SYSCALL_BIT + 458) +#define __NR_lsm_get_self_attr (__X32_SYSCALL_BIT + 459) +#define __NR_lsm_set_self_attr (__X32_SYSCALL_BIT + 460) +#define __NR_lsm_list_modules (__X32_SYSCALL_BIT + 461) #define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512) #define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513) #define __NR_ioctl (__X32_SYSCALL_BIT + 514) diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h index 218bf7ac98d..72e8f4b9dd0 100644 --- a/linux-headers/linux/iommufd.h +++ b/linux-headers/linux/iommufd.h @@ -47,6 +47,9 @@ enum { IOMMUFD_CMD_VFIO_IOAS, IOMMUFD_CMD_HWPT_ALLOC, IOMMUFD_CMD_GET_HW_INFO, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, + IOMMUFD_CMD_HWPT_INVALIDATE, }; /** @@ -347,20 +350,86 @@ struct iommu_vfio_ioas { }; #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) +/** + * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as + * the parent HWPT in a nesting configuration. + * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is + * enforced on device attachment + */ +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, +}; + +/** + * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table + * entry attributes + * @IOMMU_VTD_S1_SRE: Supervisor request + * @IOMMU_VTD_S1_EAFE: Extended access enable + * @IOMMU_VTD_S1_WPE: Write protect enable + */ +enum iommu_hwpt_vtd_s1_flags { + IOMMU_VTD_S1_SRE = 1 << 0, + IOMMU_VTD_S1_EAFE = 1 << 1, + IOMMU_VTD_S1_WPE = 1 << 2, +}; + +/** + * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table + * info (IOMMU_HWPT_DATA_VTD_S1) + * @flags: Combination of enum iommu_hwpt_vtd_s1_flags + * @pgtbl_addr: The base address of the stage-1 page table. + * @addr_width: The address width of the stage-1 page table + * @__reserved: Must be 0 + */ +struct iommu_hwpt_vtd_s1 { + __aligned_u64 flags; + __aligned_u64 pgtbl_addr; + __u32 addr_width; + __u32 __reserved; +}; + +/** + * enum iommu_hwpt_data_type - IOMMU HWPT Data Type + * @IOMMU_HWPT_DATA_NONE: no data + * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table + */ +enum iommu_hwpt_data_type { + IOMMU_HWPT_DATA_NONE, + IOMMU_HWPT_DATA_VTD_S1, +}; + /** * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC) * @size: sizeof(struct iommu_hwpt_alloc) - * @flags: Must be 0 + * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for - * @pt_id: The IOAS to connect this HWPT to + * @pt_id: The IOAS or HWPT to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT * @__reserved: Must be 0 + * @data_type: One of enum iommu_hwpt_data_type + * @data_len: Length of the type specific data + * @data_uptr: User pointer to the type specific data * * Explicitly allocate a hardware page table object. This is the same object * type that is returned by iommufd_device_attach() and represents the * underlying iommu driver's iommu_domain kernel object. * - * A HWPT will be created with the IOVA mappings from the given IOAS. + * A kernel-managed HWPT will be created with the mappings from the given + * IOAS via the @pt_id. The @data_type for this allocation must be set to + * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a + * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags. + * + * A user-managed nested HWPT will be created from a given parent HWPT via + * @pt_id, in which the parent HWPT must be allocated previously via the + * same ioctl from a given IOAS (@pt_id). In this case, the @data_type + * must be set to a pre-defined type corresponding to an I/O page table + * type supported by the underlying IOMMU hardware. + * + * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and + * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr + * must be given. */ struct iommu_hwpt_alloc { __u32 size; @@ -369,13 +438,26 @@ struct iommu_hwpt_alloc { __u32 pt_id; __u32 out_hwpt_id; __u32 __reserved; + __u32 data_type; + __u32 data_len; + __aligned_u64 data_uptr; }; #define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC) +/** + * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info + * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings + * on a nested_parent domain. + * https://www.intel.com/content/www/us/en/content-details/772415/content-details.html + */ +enum iommu_hw_info_vtd_flags { + IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0, +}; + /** * struct iommu_hw_info_vtd - Intel VT-d hardware information * - * @flags: Must be 0 + * @flags: Combination of enum iommu_hw_info_vtd_flags * @__reserved: Must be 0 * * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec @@ -404,6 +486,20 @@ enum iommu_hw_info_type { IOMMU_HW_INFO_TYPE_INTEL_VTD, }; +/** + * enum iommufd_hw_capabilities + * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking + * If available, it means the following APIs + * are supported: + * + * IOMMU_HWPT_GET_DIRTY_BITMAP + * IOMMU_HWPT_SET_DIRTY_TRACKING + * + */ +enum iommufd_hw_capabilities { + IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, +}; + /** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) @@ -415,6 +511,8 @@ enum iommu_hw_info_type { * the iommu type specific hardware information data * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. + * @out_capabilities: Output the generic iommu capability info type as defined + * in the enum iommu_hw_capabilities. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -439,6 +537,159 @@ struct iommu_hw_info { __aligned_u64 data_uptr; __u32 out_data_type; __u32 __reserved; + __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) + +/* + * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty + * tracking + * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking + */ +enum iommufd_hwpt_set_dirty_tracking_flags { + IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1, +}; + +/** + * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING) + * @size: sizeof(struct iommu_hwpt_set_dirty_tracking) + * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @__reserved: Must be 0 + * + * Toggle dirty tracking on an HW pagetable. + */ +struct iommu_hwpt_set_dirty_tracking { + __u32 size; + __u32 flags; + __u32 hwpt_id; + __u32 __reserved; +}; +#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) + +/** + * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits + * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing + * any dirty bits metadata. This flag + * can be passed in the expectation + * where the next operation is an unmap + * of the same IOVA range. + * + */ +enum iommufd_hwpt_get_dirty_bitmap_flags { + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1, +}; + +/** + * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP) + * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap) + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags + * @__reserved: Must be 0 + * @iova: base IOVA of the bitmap first bit + * @length: IOVA range size + * @page_size: page size granularity of each bit in the bitmap + * @data: bitmap where to set the dirty bits. The bitmap bits each + * represent a page_size which you deviate from an arbitrary iova. + * + * Checking a given IOVA is dirty: + * + * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64)) + * + * Walk the IOMMU pagetables for a given IOVA range to return a bitmap + * with the dirty IOVAs. In doing so it will also by default clear any + * dirty bit metadata set in the IOPTE. + */ +struct iommu_hwpt_get_dirty_bitmap { + __u32 size; + __u32 hwpt_id; + __u32 flags; + __u32 __reserved; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 data; +}; +#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP) + +/** + * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation + * Data Type + * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1 + */ +enum iommu_hwpt_invalidate_data_type { + IOMMU_HWPT_INVALIDATE_DATA_VTD_S1, +}; + +/** + * enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d + * stage-1 cache invalidation + * @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies + * to all-levels page structure cache or just + * the leaf PTE cache. + */ +enum iommu_hwpt_vtd_s1_invalidate_flags { + IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0, +}; + +/** + * struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation + * (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) + * @addr: The start address of the range to be invalidated. It needs to + * be 4KB aligned. + * @npages: Number of contiguous 4K pages to be invalidated. + * @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags + * @__reserved: Must be 0 + * + * The Intel VT-d specific invalidation data for user-managed stage-1 cache + * invalidation in nested translation. Userspace uses this structure to + * tell the impacted cache scope after modifying the stage-1 page table. + * + * Invalidating all the caches related to the page table by setting @addr + * to be 0 and @npages to be U64_MAX. + * + * The device TLB will be invalidated automatically if ATS is enabled. + */ +struct iommu_hwpt_vtd_s1_invalidate { + __aligned_u64 addr; + __aligned_u64 npages; + __u32 flags; + __u32 __reserved; +}; + +/** + * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE) + * @size: sizeof(struct iommu_hwpt_invalidate) + * @hwpt_id: ID of a nested HWPT for cache invalidation + * @data_uptr: User pointer to an array of driver-specific cache invalidation + * data. + * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data + * type of all the entries in the invalidation request array. It + * should be a type supported by the hwpt pointed by @hwpt_id. + * @entry_len: Length (in bytes) of a request entry in the request array + * @entry_num: Input the number of cache invalidation requests in the array. + * Output the number of requests successfully handled by kernel. + * @__reserved: Must be 0. + * + * Invalidate the iommu cache for user-managed page table. Modifications on a + * user-managed page table should be followed by this operation to sync cache. + * Each ioctl can support one or more cache invalidation requests in the array + * that has a total size of @entry_len * @entry_num. + * + * An empty invalidation request array by setting @entry_num==0 is allowed, and + * @entry_len and @data_uptr would be ignored in this case. This can be used to + * check if the given @data_type is supported or not by kernel. + */ +struct iommu_hwpt_invalidate { + __u32 size; + __u32 hwpt_id; + __aligned_u64 data_uptr; + __u32 data_type; + __u32 entry_len; + __u32 entry_num; + __u32 __reserved; +}; +#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE) #endif diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index 0d74ee999aa..17839229b2a 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -16,76 +16,6 @@ #define KVM_API_VERSION 12 -/* *** Deprecated interfaces *** */ - -#define KVM_TRC_SHIFT 16 - -#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT) -#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) - -#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01) -#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02) -#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01) - -#define KVM_TRC_HEAD_SIZE 12 -#define KVM_TRC_CYCLE_SIZE 8 -#define KVM_TRC_EXTRA_MAX 7 - -#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) -#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) -#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04) -#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05) -#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06) -#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07) -#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08) -#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09) -#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A) -#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B) -#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C) -#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D) -#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E) -#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F) -#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10) -#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11) -#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) -#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) -#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) -#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) -#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16) -#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17) -#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) -#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) - -struct kvm_user_trace_setup { - __u32 buf_size; - __u32 buf_nr; -}; - -#define __KVM_DEPRECATED_MAIN_W_0x06 \ - _IOW(KVMIO, 0x06, struct kvm_user_trace_setup) -#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07) -#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08) - -#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq) - -struct kvm_breakpoint { - __u32 enabled; - __u32 padding; - __u64 address; -}; - -struct kvm_debug_guest { - __u32 enabled; - __u32 pad; - struct kvm_breakpoint breakpoints[4]; - __u32 singlestep; -}; - -#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest) - -/* *** End of deprecated interfaces *** */ - - /* for KVM_SET_USER_MEMORY_REGION */ struct kvm_userspace_memory_region { __u32 slot; @@ -95,6 +25,19 @@ struct kvm_userspace_memory_region { __u64 userspace_addr; /* start of the userspace allocated memory */ }; +/* for KVM_SET_USER_MEMORY_REGION2 */ +struct kvm_userspace_memory_region2 { + __u32 slot; + __u32 flags; + __u64 guest_phys_addr; + __u64 memory_size; + __u64 userspace_addr; + __u64 guest_memfd_offset; + __u32 guest_memfd; + __u32 pad1; + __u64 pad2[14]; +}; + /* * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for * userspace, other bits are reserved for kvm internal use which are defined @@ -102,6 +45,7 @@ struct kvm_userspace_memory_region { */ #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) #define KVM_MEM_READONLY (1UL << 1) +#define KVM_MEM_GUEST_MEMFD (1UL << 2) /* for KVM_IRQ_LINE */ struct kvm_irq_level { @@ -264,6 +208,8 @@ struct kvm_xen_exit { #define KVM_EXIT_RISCV_SBI 35 #define KVM_EXIT_RISCV_CSR 36 #define KVM_EXIT_NOTIFY 37 +#define KVM_EXIT_LOONGARCH_IOCSR 38 +#define KVM_EXIT_MEMORY_FAULT 39 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -336,6 +282,13 @@ struct kvm_run { __u32 len; __u8 is_write; } mmio; + /* KVM_EXIT_LOONGARCH_IOCSR */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; /* KVM_EXIT_HYPERCALL */ struct { __u64 nr; @@ -506,6 +459,13 @@ struct kvm_run { #define KVM_NOTIFY_CONTEXT_INVALID (1 << 0) __u32 flags; } notify; + /* KVM_EXIT_MEMORY_FAULT */ + struct { +#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3) + __u64 flags; + __u64 gpa; + __u64 size; + } memory_fault; /* Fix the size of the union. */ char padding[256]; }; @@ -933,9 +893,6 @@ struct kvm_ppc_resize_hpt { */ #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) -#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06 -#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 -#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) #define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list) @@ -1188,6 +1145,12 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_COUNTER_OFFSET 227 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 +#define KVM_CAP_USER_MEMORY2 231 +#define KVM_CAP_MEMORY_FAULT_INFO 232 +#define KVM_CAP_MEMORY_ATTRIBUTES 233 +#define KVM_CAP_GUEST_MEMFD 234 +#define KVM_CAP_VM_TYPES 235 #ifdef KVM_CAP_IRQ_ROUTING @@ -1278,6 +1241,7 @@ struct kvm_x86_mce { #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6) +#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7) struct kvm_xen_hvm_config { __u32 flags; @@ -1358,6 +1322,7 @@ struct kvm_dirty_tlb { #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL #define KVM_REG_RISCV 0x8000000000000000ULL +#define KVM_REG_LOONGARCH 0x9000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL @@ -1469,6 +1434,8 @@ struct kvm_vfio_spapr_tce { struct kvm_userspace_memory_region) #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) +#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \ + struct kvm_userspace_memory_region2) /* enable ucontrol for s390 */ struct kvm_s390_ucas_mapping { @@ -1493,20 +1460,8 @@ struct kvm_s390_ucas_mapping { _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) #define KVM_UNREGISTER_COALESCED_MMIO \ _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) -#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ - struct kvm_assigned_pci_dev) #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) -/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ -#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70 -#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) -#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ - struct kvm_assigned_pci_dev) -#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \ - struct kvm_assigned_msix_nr) -#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \ - struct kvm_assigned_msix_entry) -#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) @@ -1523,9 +1478,6 @@ struct kvm_s390_ucas_mapping { * KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */ #define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2) #define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3) -/* Available with KVM_CAP_PCI_2_3 */ -#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \ - struct kvm_assigned_pci_dev) /* Available with KVM_CAP_SIGNAL_MSI */ #define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi) /* Available with KVM_CAP_PPC_GET_SMMU_INFO */ @@ -1558,6 +1510,7 @@ struct kvm_s390_ucas_mapping { #define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags) /* Available with KVM_CAP_COUNTER_OFFSET */ #define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) +#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) @@ -1577,8 +1530,6 @@ struct kvm_s390_ucas_mapping { #define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) -/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */ -#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87 #define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) @@ -2252,4 +2203,24 @@ struct kvm_s390_zpci_op { /* flags for kvm_s390_zpci_op->u.reg_aen.flags */ #define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) +/* Available with KVM_CAP_MEMORY_ATTRIBUTES */ +#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes) + +struct kvm_memory_attributes { + __u64 address; + __u64 size; + __u64 attributes; + __u64 flags; +}; + +#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3) + +#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd) + +struct kvm_create_guest_memfd { + __u64 size; + __u64 flags; + __u64 reserved[6]; +}; + #endif /* __LINUX_KVM_H */ diff --git a/linux-headers/linux/psp-sev.h b/linux-headers/linux/psp-sev.h index 12ccb70099d..bcb21339ee3 100644 --- a/linux-headers/linux/psp-sev.h +++ b/linux-headers/linux/psp-sev.h @@ -68,6 +68,7 @@ typedef enum { SEV_RET_INVALID_PARAM, SEV_RET_RESOURCE_LIMIT, SEV_RET_SECURE_DATA_INVALID, + SEV_RET_INVALID_KEY = 0x27, SEV_RET_MAX, } sev_ret_code; diff --git a/linux-headers/linux/stddef.h b/linux-headers/linux/stddef.h index 9bb07083ac8..bf9749dd142 100644 --- a/linux-headers/linux/stddef.h +++ b/linux-headers/linux/stddef.h @@ -27,8 +27,13 @@ union { \ struct { MEMBERS } ATTRS; \ struct TAG { MEMBERS } ATTRS NAME; \ - } + } ATTRS +#ifdef __cplusplus +/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */ +#define __DECLARE_FLEX_ARRAY(T, member) \ + T member[0] +#else /** * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union * @@ -49,3 +54,5 @@ #ifndef __counted_by #define __counted_by(m) #endif + +#endif /* _LINUX_STDDEF_H */ diff --git a/linux-headers/linux/userfaultfd.h b/linux-headers/linux/userfaultfd.h index 59978fbaae3..4283de22d5b 100644 --- a/linux-headers/linux/userfaultfd.h +++ b/linux-headers/linux/userfaultfd.h @@ -40,7 +40,9 @@ UFFD_FEATURE_EXACT_ADDRESS | \ UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \ UFFD_FEATURE_WP_UNPOPULATED | \ - UFFD_FEATURE_POISON) + UFFD_FEATURE_POISON | \ + UFFD_FEATURE_WP_ASYNC | \ + UFFD_FEATURE_MOVE) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -49,6 +51,7 @@ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ (__u64)1 << _UFFDIO_ZEROPAGE | \ + (__u64)1 << _UFFDIO_MOVE | \ (__u64)1 << _UFFDIO_WRITEPROTECT | \ (__u64)1 << _UFFDIO_CONTINUE | \ (__u64)1 << _UFFDIO_POISON) @@ -72,6 +75,7 @@ #define _UFFDIO_WAKE (0x02) #define _UFFDIO_COPY (0x03) #define _UFFDIO_ZEROPAGE (0x04) +#define _UFFDIO_MOVE (0x05) #define _UFFDIO_WRITEPROTECT (0x06) #define _UFFDIO_CONTINUE (0x07) #define _UFFDIO_POISON (0x08) @@ -91,6 +95,8 @@ struct uffdio_copy) #define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \ struct uffdio_zeropage) +#define UFFDIO_MOVE _IOWR(UFFDIO, _UFFDIO_MOVE, \ + struct uffdio_move) #define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \ struct uffdio_writeprotect) #define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \ @@ -216,6 +222,14 @@ struct uffdio_api { * (i.e. empty ptes). This will be the default behavior for shmem * & hugetlbfs, so this flag only affects anonymous memory behavior * when userfault write-protection mode is registered. + * + * UFFD_FEATURE_WP_ASYNC indicates that userfaultfd write-protection + * asynchronous mode is supported in which the write fault is + * automatically resolved and write-protection is un-set. + * It implies UFFD_FEATURE_WP_UNPOPULATED. + * + * UFFD_FEATURE_MOVE indicates that the kernel supports moving an + * existing page contents from userspace. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -232,6 +246,8 @@ struct uffdio_api { #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12) #define UFFD_FEATURE_WP_UNPOPULATED (1<<13) #define UFFD_FEATURE_POISON (1<<14) +#define UFFD_FEATURE_WP_ASYNC (1<<15) +#define UFFD_FEATURE_MOVE (1<<16) __u64 features; __u64 ioctls; @@ -340,6 +356,24 @@ struct uffdio_poison { __s64 updated; }; +struct uffdio_move { + __u64 dst; + __u64 src; + __u64 len; + /* + * Especially if used to atomically remove memory from the + * address space the wake on the dst range is not needed. + */ +#define UFFDIO_MOVE_MODE_DONTWAKE ((__u64)1<<0) +#define UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES ((__u64)1<<1) + __u64 mode; + /* + * "move" is written by the ioctl and must be at the end: the + * copy_from_user will not read the last 8 bytes. + */ + __s64 move; +}; + /* * Flags for the userfaultfd(2) system call itself. */ diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index acf72b4999f..b4be37b2255 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -277,8 +277,8 @@ struct vfio_region_info { #define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */ __u32 index; /* Region index */ __u32 cap_offset; /* Offset within info struct of first cap */ - __u64 size; /* Region size (bytes) */ - __u64 offset; /* Region offset from start of device fd */ + __aligned_u64 size; /* Region size (bytes) */ + __aligned_u64 offset; /* Region offset from start of device fd */ }; #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8) @@ -294,8 +294,8 @@ struct vfio_region_info { #define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1 struct vfio_region_sparse_mmap_area { - __u64 offset; /* Offset of mmap'able area within region */ - __u64 size; /* Size of mmap'able area */ + __aligned_u64 offset; /* Offset of mmap'able area within region */ + __aligned_u64 size; /* Size of mmap'able area */ }; struct vfio_region_info_cap_sparse_mmap { @@ -450,9 +450,9 @@ struct vfio_device_migration_info { VFIO_DEVICE_STATE_V1_RESUMING) __u32 reserved; - __u64 pending_bytes; - __u64 data_offset; - __u64 data_size; + __aligned_u64 pending_bytes; + __aligned_u64 data_offset; + __aligned_u64 data_size; }; /* @@ -476,7 +476,7 @@ struct vfio_device_migration_info { struct vfio_region_info_cap_nvlink2_ssatgt { struct vfio_info_cap_header header; - __u64 tgt; + __aligned_u64 tgt; }; /* @@ -816,7 +816,7 @@ struct vfio_device_gfx_plane_info { __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */ /* out */ __u32 drm_format; /* drm format of plane */ - __u64 drm_format_mod; /* tiled mode */ + __aligned_u64 drm_format_mod; /* tiled mode */ __u32 width; /* width of plane */ __u32 height; /* height of plane */ __u32 stride; /* stride of plane */ @@ -829,6 +829,7 @@ struct vfio_device_gfx_plane_info { __u32 region_index; /* region index */ __u32 dmabuf_id; /* dma-buf id */ }; + __u32 reserved; }; #define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14) @@ -863,9 +864,10 @@ struct vfio_device_ioeventfd { #define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */ #define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */ #define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf) - __u64 offset; /* device fd offset of write */ - __u64 data; /* data to be written */ + __aligned_u64 offset; /* device fd offset of write */ + __aligned_u64 data; /* data to be written */ __s32 fd; /* -1 for de-assignment */ + __u32 reserved; }; #define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16) @@ -1217,6 +1219,7 @@ enum vfio_device_mig_state { VFIO_DEVICE_STATE_RUNNING_P2P = 5, VFIO_DEVICE_STATE_PRE_COPY = 6, VFIO_DEVICE_STATE_PRE_COPY_P2P = 7, + VFIO_DEVICE_STATE_NR, }; /** @@ -1434,6 +1437,27 @@ struct vfio_device_feature_mig_data_size { #define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9 +/** + * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device + * based on the operation specified in op flag. + * + * The functionality is incorporated for devices that needs bus master control, + * but the in-band device interface lacks the support. Consequently, it is not + * applicable to PCI devices, as bus master control for PCI devices is managed + * in-band through the configuration space. At present, this feature is supported + * only for CDX devices. + * When the device's BUS MASTER setting is configured as CLEAR, it will result in + * blocking all incoming DMA requests from the device. On the other hand, configuring + * the device's BUS MASTER setting as SET (enable) will grant the device the + * capability to perform DMA to the host memory. + */ +struct vfio_device_feature_bus_master { + __u32 op; +#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */ +#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */ +}; +#define VFIO_DEVICE_FEATURE_BUS_MASTER 10 + /* -------- API for Type1 VFIO IOMMU -------- */ /** @@ -1449,7 +1473,7 @@ struct vfio_iommu_type1_info { __u32 flags; #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ #define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */ - __u64 iova_pgsizes; /* Bitmap of supported page sizes */ + __aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */ __u32 cap_offset; /* Offset within info struct of first cap */ __u32 pad; }; diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h index f5c48b61ab6..649560c685f 100644 --- a/linux-headers/linux/vhost.h +++ b/linux-headers/linux/vhost.h @@ -219,4 +219,12 @@ */ #define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E) +/* Get the group for the descriptor table including driver & device areas + * of a virtqueue: read index, write group in num. + * The virtqueue index is stored in the index field of vhost_vring_state. + * The group ID of the descriptor table for this specific virtqueue + * is returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \ + struct vhost_vring_state) #endif diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c index 8c20dc8a39a..71cdc8be50c 100644 --- a/linux-user/aarch64/cpu_loop.c +++ b/linux-user/aarch64/cpu_loop.c @@ -189,7 +189,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) { ARMCPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); struct image_info *info = ts->info; int i; diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index a1e22d526d8..bc7a13800da 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -670,7 +670,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK); if (info) { - tswap_siginfo(&frame->info, info); + frame->info = *info; env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); } diff --git a/linux-user/alpha/signal.c b/linux-user/alpha/signal.c index 4ec42994d4a..896c2c148a1 100644 --- a/linux-user/alpha/signal.c +++ b/linux-user/alpha/signal.c @@ -173,7 +173,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, goto give_sigsegv; } - tswap_siginfo(&frame->info, info); + frame->info = *info; __put_user(0, &frame->uc.tuc_flags); __put_user(0, &frame->uc.tuc_link); diff --git a/linux-user/alpha/target_elf.h b/linux-user/alpha/target_elf.h index 344e9f4d395..b77d638f6d4 100644 --- a/linux-user/alpha/target_elf.h +++ b/linux-user/alpha/target_elf.h @@ -9,6 +9,6 @@ #define ALPHA_TARGET_ELF_H static inline const char *cpu_get_model(uint32_t eflags) { - return "any"; + return "ev67"; } #endif diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c index b404117ff30..db1a41e27f4 100644 --- a/linux-user/arm/cpu_loop.c +++ b/linux-user/arm/cpu_loop.c @@ -263,7 +263,7 @@ static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb) static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode) { - TaskState *ts = env_cpu(env)->opaque; + TaskState *ts = get_task_state(env_cpu(env)); int rc = EmulateAll(opcode, &ts->fpa, env); int raise, enabled; @@ -514,7 +514,7 @@ void cpu_loop(CPUARMState *env) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) { CPUState *cpu = env_cpu(env); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); struct image_info *info = ts->info; int i; diff --git a/linux-user/arm/signal.c b/linux-user/arm/signal.c index f77f692c63f..8db1c4b2338 100644 --- a/linux-user/arm/signal.c +++ b/linux-user/arm/signal.c @@ -177,7 +177,7 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, int usig, abi_ulong handler = 0; abi_ulong handler_fdpic_GOT = 0; abi_ulong retcode; - bool is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info); + bool is_fdpic = info_is_fdpic(get_task_state(thread_cpu)->info); bool is_rt = ka->sa_flags & TARGET_SA_SIGINFO; bool thumb; @@ -357,7 +357,7 @@ void setup_rt_frame(int usig, struct target_sigaction *ka, info_addr = frame_addr + offsetof(struct rt_sigframe, info); uc_addr = frame_addr + offsetof(struct rt_sigframe, sig.uc); - tswap_siginfo(&frame->info, info); + frame->info = *info; setup_sigframe(&frame->sig.uc, set, env); diff --git a/linux-user/cris/cpu_loop.c b/linux-user/cris/cpu_loop.c index 01e6ff16fc9..04c9086b6dc 100644 --- a/linux-user/cris/cpu_loop.c +++ b/linux-user/cris/cpu_loop.c @@ -72,7 +72,7 @@ void cpu_loop(CPUCRISState *env) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) { CPUState *cpu = env_cpu(env); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); struct image_info *info = ts->info; env->regs[0] = regs->r0; diff --git a/linux-user/elfload.c b/linux-user/elfload.c index cf9e74468b1..60cf55b36cd 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -2,6 +2,7 @@ #include "qemu/osdep.h" #include +#include #include #include @@ -21,7 +22,7 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "target_signal.h" -#include "accel/tcg/debuginfo.h" +#include "tcg/debuginfo.h" #ifdef TARGET_ARM #include "target/arm/cpu-features.h" @@ -459,6 +460,7 @@ enum { static bool init_guest_commpage(void) { ARMCPU *cpu = ARM_CPU(thread_cpu); + int host_page_size = qemu_real_host_page_size(); abi_ptr commpage; void *want; void *addr; @@ -471,10 +473,12 @@ static bool init_guest_commpage(void) return true; } - commpage = HI_COMMPAGE & -qemu_host_page_size; + commpage = HI_COMMPAGE & -host_page_size; want = g2h_untagged(commpage); - addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE, - MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); + addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE | + (commpage < reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE), + -1, 0); if (addr == MAP_FAILED) { perror("Allocating guest commpage"); @@ -487,12 +491,12 @@ static bool init_guest_commpage(void) /* Set kernel helper versions; rest of page is 0. */ __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu)); - if (mprotect(addr, qemu_host_page_size, PROT_READ)) { + if (mprotect(addr, host_page_size, PROT_READ)) { perror("Protecting guest commpage"); exit(EXIT_FAILURE); } - page_set_flags(commpage, commpage | ~qemu_host_page_mask, + page_set_flags(commpage, commpage | (host_page_size - 1), PAGE_READ | PAGE_EXEC | PAGE_VALID); return true; } @@ -1531,10 +1535,14 @@ static bool init_guest_commpage(void) 0x3a, 0x68, 0x3b, 0x00, /* trap 0 */ }; - void *want = g2h_untagged(LO_COMMPAGE & -qemu_host_page_size); - void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE, - MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); + int host_page_size = qemu_real_host_page_size(); + void *want, *addr; + want = g2h_untagged(LO_COMMPAGE & -host_page_size); + addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE | + (reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE), + -1, 0); if (addr == MAP_FAILED) { perror("Allocating guest commpage"); exit(EXIT_FAILURE); @@ -1543,9 +1551,9 @@ static bool init_guest_commpage(void) return false; } - memcpy(addr, kuser_page, sizeof(kuser_page)); + memcpy(g2h_untagged(LO_COMMPAGE), kuser_page, sizeof(kuser_page)); - if (mprotect(addr, qemu_host_page_size, PROT_READ)) { + if (mprotect(addr, host_page_size, PROT_READ)) { perror("Protecting guest commpage"); exit(EXIT_FAILURE); } @@ -1969,16 +1977,20 @@ static inline void init_thread(struct target_pt_regs *regs, static bool init_guest_commpage(void) { - void *want = g2h_untagged(LO_COMMPAGE); - void *addr = mmap(want, qemu_host_page_size, PROT_NONE, - MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); + /* If reserved_va, then we have already mapped 0 page on the host. */ + if (!reserved_va) { + void *want, *addr; - if (addr == MAP_FAILED) { - perror("Allocating guest commpage"); - exit(EXIT_FAILURE); - } - if (addr != want) { - return false; + want = g2h_untagged(LO_COMMPAGE); + addr = mmap(want, TARGET_PAGE_SIZE, PROT_NONE, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0); + if (addr == MAP_FAILED) { + perror("Allocating guest commpage"); + exit(EXIT_FAILURE); + } + if (addr != want) { + return false; + } } /* @@ -2678,13 +2690,7 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff)); NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); - if ((info->alignment & ~qemu_host_page_mask) != 0) { - /* Target doesn't support host page size alignment */ - NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); - } else { - NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE, - qemu_host_page_size))); - } + NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); NEW_AUX_ENT(AT_ENTRY, info->entry); @@ -2892,7 +2898,7 @@ static bool pgb_addr_set(PGBAddrs *ga, abi_ulong guest_loaddr, /* Add any HI_COMMPAGE not covered by reserved_va. */ if (reserved_va < HI_COMMPAGE) { - ga->bounds[n][0] = HI_COMMPAGE & qemu_host_page_mask; + ga->bounds[n][0] = HI_COMMPAGE & qemu_real_host_page_mask(); ga->bounds[n][1] = HI_COMMPAGE + TARGET_PAGE_SIZE - 1; n++; } @@ -3016,8 +3022,6 @@ static void pgb_dynamic(const char *image_name, uintptr_t guest_loaddr, uintptr_t brk, ret; PGBAddrs ga; - assert(QEMU_IS_ALIGNED(guest_loaddr, align)); - /* Try the identity map first. */ if (pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, true)) { brk = (uintptr_t)sbrk(0); @@ -3074,7 +3078,7 @@ void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, abi_ulong guest_hiaddr) { /* In order to use host shmat, we must be able to honor SHMLBA. */ - uintptr_t align = MAX(SHMLBA, qemu_host_page_size); + uintptr_t align = MAX(SHMLBA, TARGET_PAGE_SIZE); /* Sanity check the guest binary. */ if (reserved_va) { @@ -3911,8 +3915,9 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) and some applications "depend" upon this behavior. Since we do not have the power to recompile these, we emulate the SVr4 behavior. Sigh. */ - target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC, - MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + target_mmap(0, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC, + MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS, + -1, 0); } #ifdef TARGET_MIPS info->interp_fp_abi = interp_info.fp_abi; @@ -3962,6 +3967,8 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) } #ifdef USE_ELF_CORE_DUMP +#include "exec/translate-all.h" + /* * Definitions to generate Intel SVR4-like core files. * These mostly have the same names as the SVR4 types with "target_elf_" @@ -4001,18 +4008,6 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) * Example for ARM target is provided in this file. */ -/* An ELF note in memory */ -struct memelfnote { - const char *name; - size_t namesz; - size_t namesz_rounded; - int type; - size_t datasz; - size_t datasz_rounded; - void *data; - size_t notesz; -}; - struct target_elf_siginfo { abi_int si_signo; /* signal number */ abi_int si_code; /* extra code */ @@ -4052,77 +4047,6 @@ struct target_elf_prpsinfo { char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; -/* Here is the structure in which status of each thread is captured. */ -struct elf_thread_status { - QTAILQ_ENTRY(elf_thread_status) ets_link; - struct target_elf_prstatus prstatus; /* NT_PRSTATUS */ -#if 0 - elf_fpregset_t fpu; /* NT_PRFPREG */ - struct task_struct *thread; - elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ -#endif - struct memelfnote notes[1]; - int num_notes; -}; - -struct elf_note_info { - struct memelfnote *notes; - struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */ - struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */ - - QTAILQ_HEAD(, elf_thread_status) thread_list; -#if 0 - /* - * Current version of ELF coredump doesn't support - * dumping fp regs etc. - */ - elf_fpregset_t *fpu; - elf_fpxregset_t *xfpu; - int thread_status_size; -#endif - int notes_size; - int numnote; -}; - -struct vm_area_struct { - target_ulong vma_start; /* start vaddr of memory region */ - target_ulong vma_end; /* end vaddr of memory region */ - abi_ulong vma_flags; /* protection etc. flags for the region */ - QTAILQ_ENTRY(vm_area_struct) vma_link; -}; - -struct mm_struct { - QTAILQ_HEAD(, vm_area_struct) mm_mmap; - int mm_count; /* number of mappings */ -}; - -static struct mm_struct *vma_init(void); -static void vma_delete(struct mm_struct *); -static int vma_add_mapping(struct mm_struct *, target_ulong, - target_ulong, abi_ulong); -static int vma_get_mapping_count(const struct mm_struct *); -static struct vm_area_struct *vma_first(const struct mm_struct *); -static struct vm_area_struct *vma_next(struct vm_area_struct *); -static abi_ulong vma_dump_size(const struct vm_area_struct *); -static int vma_walker(void *priv, target_ulong start, target_ulong end, - unsigned long flags); - -static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t); -static void fill_note(struct memelfnote *, const char *, int, - unsigned int, void *); -static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int); -static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *); -static void fill_auxv_note(struct memelfnote *, const TaskState *); -static void fill_elf_note_phdr(struct elf_phdr *, int, off_t); -static size_t note_size(const struct memelfnote *); -static void free_note_info(struct elf_note_info *); -static int fill_note_info(struct elf_note_info *, long, const CPUArchState *); -static void fill_thread_info(struct elf_note_info *, const CPUArchState *); - -static int dump_write(int, const void *, size_t); -static int write_note(struct memelfnote *, int); -static int write_note_info(struct elf_note_info *, int); - #ifdef BSWAP_NEEDED static void bswap_prstatus(struct target_elf_prstatus *prstatus) { @@ -4164,146 +4088,67 @@ static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {} static inline void bswap_note(struct elf_note *en) { } #endif /* BSWAP_NEEDED */ -/* - * Minimal support for linux memory regions. These are needed - * when we are finding out what memory exactly belongs to - * emulated process. No locks needed here, as long as - * thread that received the signal is stopped. - */ - -static struct mm_struct *vma_init(void) -{ - struct mm_struct *mm; - - if ((mm = g_malloc(sizeof (*mm))) == NULL) - return (NULL); - - mm->mm_count = 0; - QTAILQ_INIT(&mm->mm_mmap); - - return (mm); -} - -static void vma_delete(struct mm_struct *mm) -{ - struct vm_area_struct *vma; - - while ((vma = vma_first(mm)) != NULL) { - QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link); - g_free(vma); - } - g_free(mm); -} - -static int vma_add_mapping(struct mm_struct *mm, target_ulong start, - target_ulong end, abi_ulong flags) -{ - struct vm_area_struct *vma; - - if ((vma = g_malloc0(sizeof (*vma))) == NULL) - return (-1); - - vma->vma_start = start; - vma->vma_end = end; - vma->vma_flags = flags; - - QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link); - mm->mm_count++; - - return (0); -} - -static struct vm_area_struct *vma_first(const struct mm_struct *mm) -{ - return (QTAILQ_FIRST(&mm->mm_mmap)); -} - -static struct vm_area_struct *vma_next(struct vm_area_struct *vma) -{ - return (QTAILQ_NEXT(vma, vma_link)); -} - -static int vma_get_mapping_count(const struct mm_struct *mm) -{ - return (mm->mm_count); -} - /* * Calculate file (dump) size of given memory region. */ -static abi_ulong vma_dump_size(const struct vm_area_struct *vma) +static size_t vma_dump_size(target_ulong start, target_ulong end, + unsigned long flags) { - /* if we cannot even read the first page, skip it */ - if (!access_ok_untagged(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE)) - return (0); + /* The area must be readable. */ + if (!(flags & PAGE_READ)) { + return 0; + } /* * Usually we don't dump executable pages as they contain * non-writable code that debugger can read directly from - * target library etc. However, thread stacks are marked - * also executable so we read in first page of given region - * and check whether it contains elf header. If there is - * no elf header, we dump it. + * target library etc. If there is no elf header, we dump it. */ - if (vma->vma_flags & PROT_EXEC) { - char page[TARGET_PAGE_SIZE]; - - if (copy_from_user(page, vma->vma_start, sizeof (page))) { - return 0; - } - if ((page[EI_MAG0] == ELFMAG0) && - (page[EI_MAG1] == ELFMAG1) && - (page[EI_MAG2] == ELFMAG2) && - (page[EI_MAG3] == ELFMAG3)) { - /* - * Mappings are possibly from ELF binary. Don't dump - * them. - */ - return (0); - } + if (!(flags & PAGE_WRITE_ORG) && + (flags & PAGE_EXEC) && + memcmp(g2h_untagged(start), ELFMAG, SELFMAG) == 0) { + return 0; } - return (vma->vma_end - vma->vma_start); + return end - start; } -static int vma_walker(void *priv, target_ulong start, target_ulong end, - unsigned long flags) +static size_t size_note(const char *name, size_t datasz) { - struct mm_struct *mm = (struct mm_struct *)priv; + size_t namesz = strlen(name) + 1; - vma_add_mapping(mm, start, end, flags); - return (0); + namesz = ROUND_UP(namesz, 4); + datasz = ROUND_UP(datasz, 4); + + return sizeof(struct elf_note) + namesz + datasz; } -static void fill_note(struct memelfnote *note, const char *name, int type, - unsigned int sz, void *data) +static void *fill_note(void **pptr, int type, const char *name, size_t datasz) { - unsigned int namesz; + void *ptr = *pptr; + struct elf_note *n = ptr; + size_t namesz = strlen(name) + 1; - namesz = strlen(name) + 1; - note->name = name; - note->namesz = namesz; - note->namesz_rounded = roundup(namesz, sizeof (int32_t)); - note->type = type; - note->datasz = sz; - note->datasz_rounded = roundup(sz, sizeof (int32_t)); + n->n_namesz = namesz; + n->n_descsz = datasz; + n->n_type = type; + bswap_note(n); - note->data = data; + ptr += sizeof(*n); + memcpy(ptr, name, namesz); - /* - * We calculate rounded up note size here as specified by - * ELF document. - */ - note->notesz = sizeof (struct elf_note) + - note->namesz_rounded + note->datasz_rounded; + namesz = ROUND_UP(namesz, 4); + datasz = ROUND_UP(datasz, 4); + + *pptr = ptr + namesz + datasz; + return ptr + namesz; } static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, uint32_t flags) { - (void) memset(elf, 0, sizeof(*elf)); + memcpy(elf->e_ident, ELFMAG, SELFMAG); - (void) memcpy(elf->e_ident, ELFMAG, SELFMAG); elf->e_ident[EI_CLASS] = ELF_CLASS; elf->e_ident[EI_DATA] = ELF_DATA; elf->e_ident[EI_VERSION] = EV_CURRENT; @@ -4321,95 +4166,79 @@ static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, bswap_ehdr(elf); } -static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset) +static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, off_t offset) { phdr->p_type = PT_NOTE; phdr->p_offset = offset; - phdr->p_vaddr = 0; - phdr->p_paddr = 0; phdr->p_filesz = sz; - phdr->p_memsz = 0; - phdr->p_flags = 0; - phdr->p_align = 0; bswap_phdr(phdr, 1); } -static size_t note_size(const struct memelfnote *note) +static void fill_prstatus_note(void *data, const TaskState *ts, + CPUState *cpu, int signr) { - return (note->notesz); -} + /* + * Because note memory is only aligned to 4, and target_elf_prstatus + * may well have higher alignment requirements, fill locally and + * memcpy to the destination afterward. + */ + struct target_elf_prstatus prstatus = { + .pr_info.si_signo = signr, + .pr_cursig = signr, + .pr_pid = ts->ts_tid, + .pr_ppid = getppid(), + .pr_pgrp = getpgrp(), + .pr_sid = getsid(0), + }; -static void fill_prstatus(struct target_elf_prstatus *prstatus, - const TaskState *ts, int signr) -{ - (void) memset(prstatus, 0, sizeof (*prstatus)); - prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; - prstatus->pr_pid = ts->ts_tid; - prstatus->pr_ppid = getppid(); - prstatus->pr_pgrp = getpgrp(); - prstatus->pr_sid = getsid(0); - - bswap_prstatus(prstatus); + elf_core_copy_regs(&prstatus.pr_reg, cpu_env(cpu)); + bswap_prstatus(&prstatus); + memcpy(data, &prstatus, sizeof(prstatus)); } -static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts) +static void fill_prpsinfo_note(void *data, const TaskState *ts) { + /* + * Because note memory is only aligned to 4, and target_elf_prpsinfo + * may well have higher alignment requirements, fill locally and + * memcpy to the destination afterward. + */ + struct target_elf_prpsinfo psinfo = { + .pr_pid = getpid(), + .pr_ppid = getppid(), + .pr_pgrp = getpgrp(), + .pr_sid = getsid(0), + .pr_uid = getuid(), + .pr_gid = getgid(), + }; char *base_filename; - unsigned int i, len; - - (void) memset(psinfo, 0, sizeof (*psinfo)); + size_t len; len = ts->info->env_strings - ts->info->arg_strings; - if (len >= ELF_PRARGSZ) - len = ELF_PRARGSZ - 1; - if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_strings, len)) { - return -EFAULT; - } - for (i = 0; i < len; i++) - if (psinfo->pr_psargs[i] == 0) - psinfo->pr_psargs[i] = ' '; - psinfo->pr_psargs[len] = 0; - - psinfo->pr_pid = getpid(); - psinfo->pr_ppid = getppid(); - psinfo->pr_pgrp = getpgrp(); - psinfo->pr_sid = getsid(0); - psinfo->pr_uid = getuid(); - psinfo->pr_gid = getgid(); + len = MIN(len, ELF_PRARGSZ); + memcpy(&psinfo.pr_psargs, g2h_untagged(ts->info->arg_strings), len); + for (size_t i = 0; i < len; i++) { + if (psinfo.pr_psargs[i] == 0) { + psinfo.pr_psargs[i] = ' '; + } + } base_filename = g_path_get_basename(ts->bprm->filename); /* * Using strncpy here is fine: at max-length, * this field is not NUL-terminated. */ - (void) strncpy(psinfo->pr_fname, base_filename, - sizeof(psinfo->pr_fname)); - + strncpy(psinfo.pr_fname, base_filename, sizeof(psinfo.pr_fname)); g_free(base_filename); - bswap_psinfo(psinfo); - return (0); + + bswap_psinfo(&psinfo); + memcpy(data, &psinfo, sizeof(psinfo)); } -static void fill_auxv_note(struct memelfnote *note, const TaskState *ts) +static void fill_auxv_note(void *data, const TaskState *ts) { - elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv; - elf_addr_t orig_auxv = auxv; - void *ptr; - int len = ts->info->auxv_len; - - /* - * Auxiliary vector is stored in target process stack. It contains - * {type, value} pairs that we need to dump into note. This is not - * strictly necessary but we do it here for sake of completeness. - */ - - /* read in whole auxv vector and copy it to memelfnote */ - ptr = lock_user(VERIFY_READ, orig_auxv, len, 0); - if (ptr != NULL) { - fill_note(note, "CORE", NT_AUXV, len, ptr); - unlock_user(ptr, auxv, len); - } + memcpy(data, g2h_untagged(ts->info->saved_auxv), ts->info->auxv_len); } /* @@ -4433,27 +4262,9 @@ static int dump_write(int fd, const void *ptr, size_t size) { const char *bufp = (const char *)ptr; ssize_t bytes_written, bytes_left; - struct rlimit dumpsize; - off_t pos; bytes_written = 0; - getrlimit(RLIMIT_CORE, &dumpsize); - if ((pos = lseek(fd, 0, SEEK_CUR))==-1) { - if (errno == ESPIPE) { /* not a seekable stream */ - bytes_left = size; - } else { - return pos; - } - } else { - if (dumpsize.rlim_cur <= pos) { - return -1; - } else if (dumpsize.rlim_cur == RLIM_INFINITY) { - bytes_left = size; - } else { - size_t limit_left=dumpsize.rlim_cur - pos; - bytes_left = limit_left >= size ? size : limit_left ; - } - } + bytes_left = size; /* * In normal conditions, single write(2) should do but @@ -4475,135 +4286,76 @@ static int dump_write(int fd, const void *ptr, size_t size) return (0); } -static int write_note(struct memelfnote *men, int fd) +static int wmr_page_unprotect_regions(void *opaque, target_ulong start, + target_ulong end, unsigned long flags) { - struct elf_note en; - - en.n_namesz = men->namesz; - en.n_type = men->type; - en.n_descsz = men->datasz; - - bswap_note(&en); + if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) { + size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size()); - if (dump_write(fd, &en, sizeof(en)) != 0) - return (-1); - if (dump_write(fd, men->name, men->namesz_rounded) != 0) - return (-1); - if (dump_write(fd, men->data, men->datasz_rounded) != 0) - return (-1); - - return (0); -} - -static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env) -{ - CPUState *cpu = env_cpu((CPUArchState *)env); - TaskState *ts = (TaskState *)cpu->opaque; - struct elf_thread_status *ets; - - ets = g_malloc0(sizeof (*ets)); - ets->num_notes = 1; /* only prstatus is dumped */ - fill_prstatus(&ets->prstatus, ts, 0); - elf_core_copy_regs(&ets->prstatus.pr_reg, env); - fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus), - &ets->prstatus); - - QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link); - - info->notes_size += note_size(&ets->notes[0]); + while (1) { + page_unprotect(start, 0); + if (end - start <= step) { + break; + } + start += step; + } + } + return 0; } -static void init_note_info(struct elf_note_info *info) -{ - /* Initialize the elf_note_info structure so that it is at - * least safe to call free_note_info() on it. Must be - * called before calling fill_note_info(). - */ - memset(info, 0, sizeof (*info)); - QTAILQ_INIT(&info->thread_list); -} +typedef struct { + unsigned count; + size_t size; +} CountAndSizeRegions; -static int fill_note_info(struct elf_note_info *info, - long signr, const CPUArchState *env) +static int wmr_count_and_size_regions(void *opaque, target_ulong start, + target_ulong end, unsigned long flags) { -#define NUMNOTES 3 - CPUState *cpu = env_cpu((CPUArchState *)env); - TaskState *ts = (TaskState *)cpu->opaque; - int i; - - info->notes = g_new0(struct memelfnote, NUMNOTES); - if (info->notes == NULL) - return (-ENOMEM); - info->prstatus = g_malloc0(sizeof (*info->prstatus)); - if (info->prstatus == NULL) - return (-ENOMEM); - info->psinfo = g_malloc0(sizeof (*info->psinfo)); - if (info->prstatus == NULL) - return (-ENOMEM); + CountAndSizeRegions *css = opaque; - /* - * First fill in status (and registers) of current thread - * including process info & aux vector. - */ - fill_prstatus(info->prstatus, ts, signr); - elf_core_copy_regs(&info->prstatus->pr_reg, env); - fill_note(&info->notes[0], "CORE", NT_PRSTATUS, - sizeof (*info->prstatus), info->prstatus); - fill_psinfo(info->psinfo, ts); - fill_note(&info->notes[1], "CORE", NT_PRPSINFO, - sizeof (*info->psinfo), info->psinfo); - fill_auxv_note(&info->notes[2], ts); - info->numnote = 3; - - info->notes_size = 0; - for (i = 0; i < info->numnote; i++) - info->notes_size += note_size(&info->notes[i]); - - /* read and fill status of all threads */ - WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) { - CPU_FOREACH(cpu) { - if (cpu == thread_cpu) { - continue; - } - fill_thread_info(info, cpu_env(cpu)); - } - } - - return (0); + css->count++; + css->size += vma_dump_size(start, end, flags); + return 0; } -static void free_note_info(struct elf_note_info *info) +typedef struct { + struct elf_phdr *phdr; + off_t offset; +} FillRegionPhdr; + +static int wmr_fill_region_phdr(void *opaque, target_ulong start, + target_ulong end, unsigned long flags) { - struct elf_thread_status *ets; + FillRegionPhdr *d = opaque; + struct elf_phdr *phdr = d->phdr; - while (!QTAILQ_EMPTY(&info->thread_list)) { - ets = QTAILQ_FIRST(&info->thread_list); - QTAILQ_REMOVE(&info->thread_list, ets, ets_link); - g_free(ets); - } + phdr->p_type = PT_LOAD; + phdr->p_vaddr = start; + phdr->p_paddr = 0; + phdr->p_filesz = vma_dump_size(start, end, flags); + phdr->p_offset = d->offset; + d->offset += phdr->p_filesz; + phdr->p_memsz = end - start; + phdr->p_flags = (flags & PAGE_READ ? PF_R : 0) + | (flags & PAGE_WRITE_ORG ? PF_W : 0) + | (flags & PAGE_EXEC ? PF_X : 0); + phdr->p_align = ELF_EXEC_PAGESIZE; - g_free(info->prstatus); - g_free(info->psinfo); - g_free(info->notes); + bswap_phdr(phdr, 1); + d->phdr = phdr + 1; + return 0; } -static int write_note_info(struct elf_note_info *info, int fd) +static int wmr_write_region(void *opaque, target_ulong start, + target_ulong end, unsigned long flags) { - struct elf_thread_status *ets; - int i, error = 0; - - /* write prstatus, psinfo and auxv for current thread */ - for (i = 0; i < info->numnote; i++) - if ((error = write_note(&info->notes[i], fd)) != 0) - return (error); + int fd = *(int *)opaque; + size_t size = vma_dump_size(start, end, flags); - /* write prstatus for each thread */ - QTAILQ_FOREACH(ets, &info->thread_list, ets_link) { - if ((error = write_note(&ets->notes[0], fd)) != 0) - return (error); + if (!size) { + return 0; } - - return (0); + return dump_write(fd, g2h_untagged(start), size); } /* @@ -4652,147 +4404,128 @@ static int write_note_info(struct elf_note_info *info, int fd) static int elf_core_dump(int signr, const CPUArchState *env) { const CPUState *cpu = env_cpu((CPUArchState *)env); - const TaskState *ts = (const TaskState *)cpu->opaque; - struct vm_area_struct *vma = NULL; - g_autofree char *corefile = NULL; - struct elf_note_info info; - struct elfhdr elf; - struct elf_phdr phdr; + const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu); struct rlimit dumpsize; - struct mm_struct *mm = NULL; - off_t offset = 0, data_offset = 0; - int segs = 0; + CountAndSizeRegions css; + off_t offset, note_offset, data_offset; + size_t note_size; + int cpus, ret; int fd = -1; + CPUState *cpu_iter; - init_note_info(&info); + if (prctl(PR_GET_DUMPABLE) == 0) { + return 0; + } - errno = 0; - getrlimit(RLIMIT_CORE, &dumpsize); - if (dumpsize.rlim_cur == 0) + if (getrlimit(RLIMIT_CORE, &dumpsize) < 0 || dumpsize.rlim_cur == 0) { return 0; + } - corefile = core_dump_filename(ts); + cpu_list_lock(); + mmap_lock(); - if ((fd = open(corefile, O_WRONLY | O_CREAT, - S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0) - return (-errno); + /* By unprotecting, we merge vmas that might be split. */ + walk_memory_regions(NULL, wmr_page_unprotect_regions); /* * Walk through target process memory mappings and - * set up structure containing this information. After - * this point vma_xxx functions can be used. + * set up structure containing this information. */ - if ((mm = vma_init()) == NULL) - goto out; - - walk_memory_regions(mm, vma_walker); - segs = vma_get_mapping_count(mm); + memset(&css, 0, sizeof(css)); + walk_memory_regions(&css, wmr_count_and_size_regions); - /* - * Construct valid coredump ELF header. We also - * add one more segment for notes. - */ - fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0); - if (dump_write(fd, &elf, sizeof (elf)) != 0) - goto out; + cpus = 0; + CPU_FOREACH(cpu_iter) { + cpus++; + } - /* fill in the in-memory version of notes */ - if (fill_note_info(&info, signr, env) < 0) - goto out; + offset = sizeof(struct elfhdr); + offset += (css.count + 1) * sizeof(struct elf_phdr); + note_offset = offset; - offset += sizeof (elf); /* elf header */ - offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */ + offset += size_note("CORE", ts->info->auxv_len); + offset += size_note("CORE", sizeof(struct target_elf_prpsinfo)); + offset += size_note("CORE", sizeof(struct target_elf_prstatus)) * cpus; + note_size = offset - note_offset; + data_offset = ROUND_UP(offset, ELF_EXEC_PAGESIZE); - /* write out notes program header */ - fill_elf_note_phdr(&phdr, info.notes_size, offset); + /* Do not dump if the corefile size exceeds the limit. */ + if (dumpsize.rlim_cur != RLIM_INFINITY + && dumpsize.rlim_cur < data_offset + css.size) { + errno = 0; + goto out; + } - offset += info.notes_size; - if (dump_write(fd, &phdr, sizeof (phdr)) != 0) + { + g_autofree char *corefile = core_dump_filename(ts); + fd = open(corefile, O_WRONLY | O_CREAT | O_TRUNC, + S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); + } + if (fd < 0) { goto out; + } /* - * ELF specification wants data to start at page boundary so - * we align it here. + * There is a fair amount of alignment padding within the notes + * as well as preceeding the process memory. Allocate a zeroed + * block to hold it all. Write all of the headers directly into + * this buffer and then write it out as a block. */ - data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE); + { + g_autofree void *header = g_malloc0(data_offset); + FillRegionPhdr frp; + void *hptr, *dptr; + + /* Create elf file header. */ + hptr = header; + fill_elf_header(hptr, css.count + 1, ELF_MACHINE, 0); + hptr += sizeof(struct elfhdr); + + /* Create elf program headers. */ + fill_elf_note_phdr(hptr, note_size, note_offset); + hptr += sizeof(struct elf_phdr); + + frp.phdr = hptr; + frp.offset = data_offset; + walk_memory_regions(&frp, wmr_fill_region_phdr); + hptr = frp.phdr; + + /* Create the notes. */ + dptr = fill_note(&hptr, NT_AUXV, "CORE", ts->info->auxv_len); + fill_auxv_note(dptr, ts); + + dptr = fill_note(&hptr, NT_PRPSINFO, "CORE", + sizeof(struct target_elf_prpsinfo)); + fill_prpsinfo_note(dptr, ts); + + CPU_FOREACH(cpu_iter) { + dptr = fill_note(&hptr, NT_PRSTATUS, "CORE", + sizeof(struct target_elf_prstatus)); + fill_prstatus_note(dptr, ts, cpu_iter, + cpu_iter == cpu ? signr : 0); + } - /* - * Write program headers for memory regions mapped in - * the target process. - */ - for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) { - (void) memset(&phdr, 0, sizeof (phdr)); - - phdr.p_type = PT_LOAD; - phdr.p_offset = offset; - phdr.p_vaddr = vma->vma_start; - phdr.p_paddr = 0; - phdr.p_filesz = vma_dump_size(vma); - offset += phdr.p_filesz; - phdr.p_memsz = vma->vma_end - vma->vma_start; - phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0; - if (vma->vma_flags & PROT_WRITE) - phdr.p_flags |= PF_W; - if (vma->vma_flags & PROT_EXEC) - phdr.p_flags |= PF_X; - phdr.p_align = ELF_EXEC_PAGESIZE; - - bswap_phdr(&phdr, 1); - if (dump_write(fd, &phdr, sizeof(phdr)) != 0) { + if (dump_write(fd, header, data_offset) < 0) { goto out; } } /* - * Next we write notes just after program headers. No - * alignment needed here. + * Finally write process memory into the corefile as well. */ - if (write_note_info(&info, fd) < 0) - goto out; - - /* align data to page boundary */ - if (lseek(fd, data_offset, SEEK_SET) != data_offset) + if (walk_memory_regions(&fd, wmr_write_region) < 0) { goto out; - - /* - * Finally we can dump process memory into corefile as well. - */ - for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) { - abi_ulong addr; - abi_ulong end; - - end = vma->vma_start + vma_dump_size(vma); - - for (addr = vma->vma_start; addr < end; - addr += TARGET_PAGE_SIZE) { - char page[TARGET_PAGE_SIZE]; - int error; - - /* - * Read in page from target process memory and - * write it to coredump file. - */ - error = copy_from_user(page, addr, sizeof (page)); - if (error != 0) { - (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n", - addr); - errno = -error; - goto out; - } - if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0) - goto out; - } } + errno = 0; out: - free_note_info(&info); - if (mm != NULL) - vma_delete(mm); - (void) close(fd); - - if (errno != 0) - return (-errno); - return (0); + ret = -errno; + mmap_unlock(); + cpu_list_unlock(); + if (fd >= 0) { + close(fd); + } + return ret; } #endif /* USE_ELF_CORE_DUMP */ diff --git a/linux-user/exit.c b/linux-user/exit.c index 50266314e0a..1ff8fe4f072 100644 --- a/linux-user/exit.c +++ b/linux-user/exit.c @@ -17,7 +17,7 @@ * along with this program; if not, see . */ #include "qemu/osdep.h" -#include "accel/tcg/perf.h" +#include "tcg/perf.h" #include "gdbstub/syscalls.h" #include "qemu.h" #include "user-internals.h" diff --git a/linux-user/hexagon/signal.c b/linux-user/hexagon/signal.c index 60fa7e1bcee..492b51f1550 100644 --- a/linux-user/hexagon/signal.c +++ b/linux-user/hexagon/signal.c @@ -162,7 +162,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, } setup_ucontext(&frame->uc, env, set); - tswap_siginfo(&frame->info, info); + frame->info = *info; /* * The on-stack signal trampoline is no longer executed; * however, the libgcc signal frame unwinding code checks diff --git a/linux-user/hppa/signal.c b/linux-user/hppa/signal.c index d08a97dae61..682ba25922e 100644 --- a/linux-user/hppa/signal.c +++ b/linux-user/hppa/signal.c @@ -112,7 +112,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, abi_ulong frame_addr, sp, haddr; struct target_rt_sigframe *frame; int i; - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); sp = get_sp_from_cpustate(env); if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { @@ -127,7 +127,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, goto give_sigsegv; } - tswap_siginfo(&frame->info, info); + frame->info = *info; frame->uc.tuc_flags = 0; frame->uc.tuc_link = 0; diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c index 42ecb4bf0a2..92beb6830cc 100644 --- a/linux-user/i386/cpu_loop.c +++ b/linux-user/i386/cpu_loop.c @@ -323,8 +323,8 @@ void cpu_loop(CPUX86State *env) static void target_cpu_free(void *obj) { - CPUArchState *env = cpu_env(obj); - target_munmap(env->gdt.base, sizeof(uint64_t) * TARGET_GDT_ENTRIES); + target_munmap(cpu_env(obj)->gdt.base, + sizeof(uint64_t) * TARGET_GDT_ENTRIES); g_free(obj); } diff --git a/linux-user/i386/signal.c b/linux-user/i386/signal.c index bc5d45302ea..cfe70fc5cff 100644 --- a/linux-user/i386/signal.c +++ b/linux-user/i386/signal.c @@ -430,7 +430,7 @@ void setup_frame(int sig, struct target_sigaction *ka, setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], frame_addr + offsetof(struct sigframe, fpstate)); - for(i = 1; i < TARGET_NSIG_WORDS; i++) { + for (i = 1; i < TARGET_NSIG_WORDS; i++) { __put_user(set->sig[i], &frame->extramask[i - 1]); } @@ -490,7 +490,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, __put_user(addr, &frame->puc); #endif if (ka->sa_flags & TARGET_SA_SIGINFO) { - tswap_siginfo(&frame->info, info); + frame->info = *info; } /* Create the ucontext. */ @@ -504,7 +504,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); - for(i = 0; i < TARGET_NSIG_WORDS; i++) { + for (i = 0; i < TARGET_NSIG_WORDS; i++) { __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); } diff --git a/linux-user/ioctls.h b/linux-user/ioctls.h index 071f7ca2537..1aec9d58368 100644 --- a/linux-user/ioctls.h +++ b/linux-user/ioctls.h @@ -134,6 +134,12 @@ IOCTL(FICLONE, IOC_W, TYPE_INT) IOCTL(FICLONERANGE, IOC_W, MK_PTR(MK_STRUCT(STRUCT_file_clone_range))) #endif +#ifdef FIFREEZE + IOCTL(FIFREEZE, IOC_W | IOC_R, TYPE_INT) +#endif +#ifdef FITHAW + IOCTL(FITHAW, IOC_W | IOC_R, TYPE_INT) +#endif IOCTL(FIGETBSZ, IOC_R, MK_PTR(TYPE_LONG)) #ifdef CONFIG_FIEMAP diff --git a/linux-user/linuxload.c b/linux-user/linuxload.c index 4a794f8cea1..37f132be4af 100644 --- a/linux-user/linuxload.c +++ b/linux-user/linuxload.c @@ -89,7 +89,7 @@ static int prepare_binprm(struct linux_binprm *bprm) abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, abi_ulong stringp, int push_ptr) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); int n = sizeof(abi_ulong); abi_ulong envp; abi_ulong argv; diff --git a/linux-user/loongarch64/signal.c b/linux-user/loongarch64/signal.c index 39ea82c8140..1a322f96973 100644 --- a/linux-user/loongarch64/signal.c +++ b/linux-user/loongarch64/signal.c @@ -376,7 +376,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr); } - tswap_siginfo(&frame->rs_info, info); + frame->rs_info = *info; __put_user(0, &frame->rs_uc.tuc_flags); __put_user(0, &frame->rs_uc.tuc_link); diff --git a/linux-user/loongarch64/target_syscall.h b/linux-user/loongarch64/target_syscall.h index 8b5de521243..39f229bb9c8 100644 --- a/linux-user/loongarch64/target_syscall.h +++ b/linux-user/loongarch64/target_syscall.h @@ -38,11 +38,4 @@ struct target_pt_regs { #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 -#define TARGET_FORCE_SHMLBA - -static inline abi_ulong target_shmlba(CPULoongArchState *env) -{ - return 64 * KiB; -} - #endif diff --git a/linux-user/m68k/cpu_loop.c b/linux-user/m68k/cpu_loop.c index caead1cb741..f79b8e4ab05 100644 --- a/linux-user/m68k/cpu_loop.c +++ b/linux-user/m68k/cpu_loop.c @@ -95,7 +95,7 @@ void cpu_loop(CPUM68KState *env) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) { CPUState *cpu = env_cpu(env); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); struct image_info *info = ts->info; env->pc = regs->pc; diff --git a/linux-user/m68k/signal.c b/linux-user/m68k/signal.c index 5f35354487b..77555781aa8 100644 --- a/linux-user/m68k/signal.c +++ b/linux-user/m68k/signal.c @@ -295,7 +295,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); __put_user(uc_addr, &frame->puc); - tswap_siginfo(&frame->info, info); + frame->info = *info; /* Create the ucontext */ @@ -307,7 +307,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, if (err) goto give_sigsegv; - for(i = 0; i < TARGET_NSIG_WORDS; i++) { + for (i = 0; i < TARGET_NSIG_WORDS; i++) { __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); } diff --git a/linux-user/m68k/target_cpu.h b/linux-user/m68k/target_cpu.h index c3f288dfe83..4b40c09a8d6 100644 --- a/linux-user/m68k/target_cpu.h +++ b/linux-user/m68k/target_cpu.h @@ -37,7 +37,7 @@ static inline void cpu_clone_regs_parent(CPUM68KState *env, unsigned flags) static inline void cpu_set_tls(CPUM68KState *env, target_ulong newtls) { CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); ts->tp_value = newtls; } diff --git a/linux-user/main.c b/linux-user/main.c index ee767ff76f0..b05697ac831 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -54,7 +54,8 @@ #include "signal-common.h" #include "loader.h" #include "user-mmap.h" -#include "accel/tcg/perf.h" +#include "tcg/perf.h" +#include "exec/page-vary.h" #ifdef CONFIG_SEMIHOSTING #include "semihosting/semihost.h" @@ -147,10 +148,13 @@ void fork_start(void) mmap_fork_start(); cpu_list_lock(); qemu_plugin_user_prefork_lock(); + gdbserver_fork_start(); } -void fork_end(int child) +void fork_end(pid_t pid) { + bool child = pid == 0; + qemu_plugin_user_postfork(child); mmap_fork_end(child); if (child) { @@ -163,10 +167,11 @@ void fork_end(int child) } } qemu_init_cpu_list(); - gdbserver_fork(thread_cpu); + get_task_state(thread_cpu)->ts_tid = qemu_get_thread_id(); } else { cpu_list_unlock(); } + gdbserver_fork_end(thread_cpu, pid); /* * qemu_init_cpu_list() reinitialized the child exclusive state, but we * also need to keep current_cpu consistent, so call end_exclusive() for @@ -335,11 +340,11 @@ static void handle_arg_ld_prefix(const char *arg) static void handle_arg_pagesize(const char *arg) { - qemu_host_page_size = atoi(arg); - if (qemu_host_page_size == 0 || - (qemu_host_page_size & (qemu_host_page_size - 1)) != 0) { - fprintf(stderr, "page size must be a power of two\n"); - exit(EXIT_FAILURE); + unsigned size, want = qemu_real_host_page_size(); + + if (qemu_strtoui(arg, NULL, 10, &size) || size != want) { + warn_report("Deprecated page size option cannot " + "change host page size (%u)", want); } } @@ -499,12 +504,10 @@ static const struct qemu_argument arg_table[] = { {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename, "logfile", "write logs to 'logfile' (default stderr)"}, {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize, - "pagesize", "set the host page size to 'pagesize'"}, + "pagesize", "deprecated change to host page size"}, {"one-insn-per-tb", "QEMU_ONE_INSN_PER_TB", false, handle_arg_one_insn_per_tb, "", "run with one guest instruction per emulated TB"}, - {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_one_insn_per_tb, - "", "deprecated synonym for -one-insn-per-tb"}, {"strace", "QEMU_STRACE", false, handle_arg_strace, "", "log system calls"}, {"seed", "QEMU_RAND_SEED", true, handle_arg_seed, @@ -685,6 +688,7 @@ int main(int argc, char **argv, char **envp) int i; int ret; int execfd; + int host_page_size; unsigned long max_reserved_va; bool preserve_argv0; @@ -789,7 +793,7 @@ int main(int argc, char **argv, char **envp) } cpu_type = parse_cpu_option(cpu_model); - /* init tcg before creating CPUs and to get qemu_host_page_size */ + /* init tcg before creating CPUs */ { AccelState *accel = current_accel(); AccelClass *ac = ACCEL_GET_CLASS(accel); @@ -799,6 +803,16 @@ int main(int argc, char **argv, char **envp) opt_one_insn_per_tb, &error_abort); ac->init_machine(NULL); } + + /* + * Finalize page size before creating CPUs. + * This will do nothing if !TARGET_PAGE_BITS_VARY. + * The most efficient setting is to match the host. + */ + host_page_size = qemu_real_host_page_size(); + set_preferred_target_page_bits(ctz32(host_page_size)); + finalize_target_page_bits(); + cpu = cpu_create(cpu_type); env = cpu_env(cpu); cpu_reset(cpu); @@ -812,8 +826,8 @@ int main(int argc, char **argv, char **envp) */ max_reserved_va = MAX_RESERVED_VA(cpu); if (reserved_va != 0) { - if ((reserved_va + 1) % qemu_host_page_size) { - char *s = size_to_str(qemu_host_page_size); + if ((reserved_va + 1) % host_page_size) { + char *s = size_to_str(host_page_size); fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s); g_free(s); exit(EXIT_FAILURE); @@ -897,7 +911,7 @@ int main(int argc, char **argv, char **envp) if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) { unsigned long tmp; if (fscanf(fp, "%lu", &tmp) == 1 && tmp != 0) { - mmap_min_addr = tmp; + mmap_min_addr = MAX(tmp, host_page_size); qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx\n", mmap_min_addr); } @@ -910,7 +924,7 @@ int main(int argc, char **argv, char **envp) * If we're in a chroot with no /proc, fall back to 1 page. */ if (mmap_min_addr == 0) { - mmap_min_addr = qemu_host_page_size; + mmap_min_addr = host_page_size; qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx (fallback)\n", mmap_min_addr); @@ -920,11 +934,7 @@ int main(int argc, char **argv, char **envp) * Prepare copy of argv vector for target. */ target_argc = argc - optind; - target_argv = calloc(target_argc + 1, sizeof (char *)); - if (target_argv == NULL) { - (void) fprintf(stderr, "Unable to allocate memory for target_argv\n"); - exit(EXIT_FAILURE); - } + target_argv = g_new0(char *, target_argc + 1); /* * If argv0 is specified (using '-0' switch) we replace @@ -1010,7 +1020,7 @@ int main(int argc, char **argv, char **envp) gdbstub); exit(EXIT_FAILURE); } - gdb_handlesig(cpu, 0); + gdb_handlesig(cpu, 0, NULL, NULL, 0); } #ifdef CONFIG_SEMIHOSTING diff --git a/linux-user/microblaze/signal.c b/linux-user/microblaze/signal.c index 5188d740252..f6d47d76ff6 100644 --- a/linux-user/microblaze/signal.c +++ b/linux-user/microblaze/signal.c @@ -147,7 +147,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, return; } - tswap_siginfo(&frame->info, info); + frame->info = *info; __put_user(0, &frame->uc.tuc_flags); __put_user(0, &frame->uc.tuc_link); diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c index 990b03e727b..462387a0737 100644 --- a/linux-user/mips/cpu_loop.c +++ b/linux-user/mips/cpu_loop.c @@ -214,7 +214,7 @@ void cpu_loop(CPUMIPSState *env) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) { CPUState *cpu = env_cpu(env); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); struct image_info *info = ts->info; int i; diff --git a/linux-user/mips/signal.c b/linux-user/mips/signal.c index 58a9d7a8a32..d69a5d73ddd 100644 --- a/linux-user/mips/signal.c +++ b/linux-user/mips/signal.c @@ -303,7 +303,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, goto give_sigsegv; } - tswap_siginfo(&frame->rs_info, info); + frame->rs_info = *info; __put_user(0, &frame->rs_uc.tuc_flags); __put_user(0, &frame->rs_uc.tuc_link); @@ -311,7 +311,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); - for(i = 0; i < TARGET_NSIG_WORDS; i++) { + for (i = 0; i < TARGET_NSIG_WORDS; i++) { __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); } diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 96c9433e271..be3b9a68ebc 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -165,6 +165,7 @@ static int target_to_host_prot(int prot) /* NOTE: all the constants are the HOST ones, but addresses are target. */ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) { + int host_page_size = qemu_real_host_page_size(); abi_ulong starts[3]; abi_ulong lens[3]; int prots[3]; @@ -189,13 +190,13 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) } last = start + len - 1; - host_start = start & qemu_host_page_mask; - host_last = HOST_PAGE_ALIGN(last) - 1; + host_start = start & -host_page_size; + host_last = ROUND_UP(last, host_page_size) - 1; nranges = 0; mmap_lock(); - if (host_last - host_start < qemu_host_page_size) { + if (host_last - host_start < host_page_size) { /* Single host page contains all guest pages: sum the prot. */ prot1 = target_prot; for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) { @@ -205,7 +206,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) prot1 |= page_get_flags(a + 1); } starts[nranges] = host_start; - lens[nranges] = qemu_host_page_size; + lens[nranges] = host_page_size; prots[nranges] = prot1; nranges++; } else { @@ -218,10 +219,10 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) /* If the resulting sum differs, create a new range. */ if (prot1 != target_prot) { starts[nranges] = host_start; - lens[nranges] = qemu_host_page_size; + lens[nranges] = host_page_size; prots[nranges] = prot1; nranges++; - host_start += qemu_host_page_size; + host_start += host_page_size; } } @@ -233,9 +234,9 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) } /* If the resulting sum differs, create a new range. */ if (prot1 != target_prot) { - host_last -= qemu_host_page_size; + host_last -= host_page_size; starts[nranges] = host_last + 1; - lens[nranges] = qemu_host_page_size; + lens[nranges] = host_page_size; prots[nranges] = prot1; nranges++; } @@ -266,10 +267,35 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) return ret; } -/* map an incomplete host page */ +/* + * Perform munmap on behalf of the target, with host parameters. + * If reserved_va, we must replace the memory reservation. + */ +static int do_munmap(void *addr, size_t len) +{ + if (reserved_va) { + void *ptr = mmap(addr, len, PROT_NONE, + MAP_FIXED | MAP_ANONYMOUS + | MAP_PRIVATE | MAP_NORESERVE, -1, 0); + return ptr == addr ? 0 : -1; + } + return munmap(addr, len); +} + +/* + * Map an incomplete host page. + * + * Here be dragons. This case will not work if there is an existing + * overlapping host page, which is file mapped, and for which the mapping + * is beyond the end of the file. In that case, we will see SIGBUS when + * trying to write a portion of this page. + * + * FIXME: Work around this with a temporary signal handler and longjmp. + */ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, int prot, int flags, int fd, off_t offset) { + int host_page_size = qemu_real_host_page_size(); abi_ulong real_last; void *host_start; int prot_old, prot_new; @@ -286,7 +312,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, return false; } - real_last = real_start + qemu_host_page_size - 1; + real_last = real_start + host_page_size - 1; host_start = g2h_untagged(real_start); /* Get the protection of the target pages outside the mapping. */ @@ -304,12 +330,12 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, * outside of the fragment we need to map. Allocate a new host * page to cover, discarding whatever else may have been present. */ - void *p = mmap(host_start, qemu_host_page_size, + void *p = mmap(host_start, host_page_size, target_to_host_prot(prot), flags | MAP_ANONYMOUS, -1, 0); if (p != host_start) { if (p != MAP_FAILED) { - munmap(p, qemu_host_page_size); + do_munmap(p, host_page_size); errno = EEXIST; } return false; @@ -324,7 +350,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, /* Adjust protection to be able to write. */ if (!(host_prot_old & PROT_WRITE)) { host_prot_old |= PROT_WRITE; - mprotect(host_start, qemu_host_page_size, host_prot_old); + mprotect(host_start, host_page_size, host_prot_old); } /* Read or zero the new guest pages. */ @@ -338,7 +364,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, /* Put final protection */ if (host_prot_new != host_prot_old) { - mprotect(host_start, qemu_host_page_size, host_prot_new); + mprotect(host_start, host_page_size, host_prot_new); } return true; } @@ -373,21 +399,21 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, */ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align) { + int host_page_size = qemu_real_host_page_size(); void *ptr, *prev; abi_ulong addr; int wrapped, repeat; - align = MAX(align, qemu_host_page_size); + align = MAX(align, host_page_size); /* If 'start' == 0, then a default start address is used. */ if (start == 0) { start = mmap_next_start; } else { - start &= qemu_host_page_mask; + start &= -host_page_size; } start = ROUND_UP(start, align); - - size = HOST_PAGE_ALIGN(size); + size = ROUND_UP(size, host_page_size); if (reserved_va) { return mmap_find_vma_reserved(start, size, align); @@ -488,302 +514,463 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align) } } -/* NOTE: all the constants are the HOST ones */ -abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, - int flags, int fd, off_t offset) +/* + * Record a successful mmap within the user-exec interval tree. + */ +static abi_long mmap_end(abi_ulong start, abi_ulong last, + abi_ulong passthrough_start, + abi_ulong passthrough_last, + int flags, int page_flags) { - abi_ulong ret, last, real_start, real_last, retaddr, host_len; - abi_ulong passthrough_start = -1, passthrough_last = 0; - int page_flags; - off_t host_offset; - - mmap_lock(); - trace_target_mmap(start, len, target_prot, flags, fd, offset); - - if (!len) { - errno = EINVAL; - goto fail; + if (flags & MAP_ANONYMOUS) { + page_flags |= PAGE_ANON; } - - page_flags = validate_prot_to_pageflags(target_prot); - if (!page_flags) { - errno = EINVAL; - goto fail; + page_flags |= PAGE_RESET; + if (passthrough_start > passthrough_last) { + page_set_flags(start, last, page_flags); + } else { + if (start < passthrough_start) { + page_set_flags(start, passthrough_start - 1, page_flags); + } + page_set_flags(passthrough_start, passthrough_last, + page_flags | PAGE_PASSTHROUGH); + if (passthrough_last < last) { + page_set_flags(passthrough_last + 1, last, page_flags); + } } - - /* Also check for overflows... */ - len = TARGET_PAGE_ALIGN(len); - if (!len) { - errno = ENOMEM; - goto fail; + shm_region_rm_complete(start, last); + trace_target_mmap_complete(start); + if (qemu_loglevel_mask(CPU_LOG_PAGE)) { + FILE *f = qemu_log_trylock(); + if (f) { + fprintf(f, "page layout changed following mmap\n"); + page_dump(f); + qemu_log_unlock(f); + } } + return start; +} - if (offset & ~TARGET_PAGE_MASK) { - errno = EINVAL; - goto fail; - } +/* + * Special case host page size == target page size, + * where there are no edge conditions. + */ +static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len, + int host_prot, int flags, int page_flags, + int fd, off_t offset) +{ + void *p, *want_p = g2h_untagged(start); + abi_ulong last; - /* - * If we're mapping shared memory, ensure we generate code for parallel - * execution and flush old translations. This will work up to the level - * supported by the host -- anything that requires EXCP_ATOMIC will not - * be atomic with respect to an external process. - */ - if (flags & MAP_SHARED) { - CPUState *cpu = thread_cpu; - if (!(cpu->tcg_cflags & CF_PARALLEL)) { - cpu->tcg_cflags |= CF_PARALLEL; - tb_flush(cpu); - } + p = mmap(want_p, len, host_prot, flags, fd, offset); + if (p == MAP_FAILED) { + return -1; + } + /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */ + if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) { + do_munmap(p, len); + errno = EEXIST; + return -1; } - real_start = start & qemu_host_page_mask; - host_offset = offset & qemu_host_page_mask; + start = h2g(p); + last = start + len - 1; + return mmap_end(start, last, start, last, flags, page_flags); +} - /* - * If the user is asking for the kernel to find a location, do that - * before we truncate the length for mapping files below. - */ - if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { - host_len = len + offset - host_offset; - host_len = HOST_PAGE_ALIGN(host_len); - start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE); - if (start == (abi_ulong)-1) { - errno = ENOMEM; - goto fail; - } - } +/* + * Special case host page size < target page size. + * + * The two special cases are increased guest alignment, and mapping + * past the end of a file. + * + * When mapping files into a memory area larger than the file, + * accesses to pages beyond the file size will cause a SIGBUS. + * + * For example, if mmaping a file of 100 bytes on a host with 4K + * pages emulating a target with 8K pages, the target expects to + * be able to access the first 8K. But the host will trap us on + * any access beyond 4K. + * + * When emulating a target with a larger page-size than the hosts, + * we may need to truncate file maps at EOF and add extra anonymous + * pages up to the targets page boundary. + * + * This workaround only works for files that do not change. + * If the file is later extended (e.g. ftruncate), the SIGBUS + * vanishes and the proper behaviour is that changes within the + * anon page should be reflected in the file. + * + * However, this case is rather common with executable images, + * so the workaround is important for even trivial tests, whereas + * the mmap of of a file being extended is less common. + */ +static abi_long mmap_h_lt_g(abi_ulong start, abi_ulong len, int host_prot, + int mmap_flags, int page_flags, int fd, + off_t offset, int host_page_size) +{ + void *p, *want_p = g2h_untagged(start); + off_t fileend_adj = 0; + int flags = mmap_flags; + abi_ulong last, pass_last; - /* - * When mapping files into a memory area larger than the file, accesses - * to pages beyond the file size will cause a SIGBUS. - * - * For example, if mmaping a file of 100 bytes on a host with 4K pages - * emulating a target with 8K pages, the target expects to be able to - * access the first 8K. But the host will trap us on any access beyond - * 4K. - * - * When emulating a target with a larger page-size than the hosts, we - * may need to truncate file maps at EOF and add extra anonymous pages - * up to the targets page boundary. - */ - if ((qemu_real_host_page_size() < qemu_host_page_size) && - !(flags & MAP_ANONYMOUS)) { + if (!(flags & MAP_ANONYMOUS)) { struct stat sb; if (fstat(fd, &sb) == -1) { - goto fail; + return -1; } - - /* Are we trying to create a map beyond EOF?. */ - if (offset + len > sb.st_size) { + if (offset >= sb.st_size) { /* - * If so, truncate the file map at eof aligned with - * the hosts real pagesize. Additional anonymous maps - * will be created beyond EOF. + * The entire map is beyond the end of the file. + * Transform it to an anonymous mapping. */ - len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset); + flags |= MAP_ANONYMOUS; + fd = -1; + offset = 0; + } else if (offset + len > sb.st_size) { + /* + * A portion of the map is beyond the end of the file. + * Truncate the file portion of the allocation. + */ + fileend_adj = offset + len - sb.st_size; } } - if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { - uintptr_t host_start; - int host_prot; - void *p; + if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) { + if (fileend_adj) { + p = mmap(want_p, len, host_prot, flags | MAP_ANONYMOUS, -1, 0); + } else { + p = mmap(want_p, len, host_prot, flags, fd, offset); + } + if (p != want_p) { + if (p != MAP_FAILED) { + /* Host does not support MAP_FIXED_NOREPLACE: emulate. */ + do_munmap(p, len); + errno = EEXIST; + } + return -1; + } - host_len = len + offset - host_offset; - host_len = HOST_PAGE_ALIGN(host_len); - host_prot = target_to_host_prot(target_prot); + if (fileend_adj) { + void *t = mmap(p, len - fileend_adj, host_prot, + (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED, + fd, offset); + + if (t == MAP_FAILED) { + int save_errno = errno; + + /* + * We failed a map over the top of the successful anonymous + * mapping above. The only failure mode is running out of VMAs, + * and there's nothing that we can do to detect that earlier. + * If we have replaced an existing mapping with MAP_FIXED, + * then we cannot properly recover. It's a coin toss whether + * it would be better to exit or continue here. + */ + if (!(flags & MAP_FIXED_NOREPLACE) && + !page_check_range_empty(start, start + len - 1)) { + qemu_log("QEMU target_mmap late failure: %s", + strerror(save_errno)); + } + + do_munmap(want_p, len); + errno = save_errno; + return -1; + } + } + } else { + size_t host_len, part_len; /* - * Note: we prefer to control the mapping address. It is - * especially important if qemu_host_page_size > - * qemu_real_host_page_size. + * Take care to align the host memory. Perform a larger anonymous + * allocation and extract the aligned portion. Remap the file on + * top of that. */ - p = mmap(g2h_untagged(start), host_len, host_prot, - flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0); + host_len = len + TARGET_PAGE_SIZE - host_page_size; + p = mmap(want_p, host_len, host_prot, flags | MAP_ANONYMOUS, -1, 0); if (p == MAP_FAILED) { - goto fail; + return -1; + } + + part_len = (uintptr_t)p & (TARGET_PAGE_SIZE - 1); + if (part_len) { + part_len = TARGET_PAGE_SIZE - part_len; + do_munmap(p, part_len); + p += part_len; + host_len -= part_len; } - /* update start so that it points to the file position at 'offset' */ - host_start = (uintptr_t)p; + if (len < host_len) { + do_munmap(p + len, host_len - len); + } + if (!(flags & MAP_ANONYMOUS)) { - p = mmap(g2h_untagged(start), len, host_prot, - flags | MAP_FIXED, fd, host_offset); - if (p == MAP_FAILED) { - munmap(g2h_untagged(start), host_len); - goto fail; + void *t = mmap(p, len - fileend_adj, host_prot, + flags | MAP_FIXED, fd, offset); + + if (t == MAP_FAILED) { + int save_errno = errno; + do_munmap(p, len); + errno = save_errno; + return -1; } - host_start += offset - host_offset; } - start = h2g(host_start); - last = start + len - 1; - passthrough_start = start; - passthrough_last = last; + + start = h2g(p); + } + + last = start + len - 1; + if (fileend_adj) { + pass_last = ROUND_UP(last - fileend_adj, host_page_size) - 1; } else { - if (start & ~TARGET_PAGE_MASK) { - errno = EINVAL; - goto fail; + pass_last = last; + } + return mmap_end(start, last, start, pass_last, mmap_flags, page_flags); +} + +/* + * Special case host page size > target page size. + * + * The two special cases are address and file offsets that are valid + * for the guest that cannot be directly represented by the host. + */ +static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len, + int target_prot, int host_prot, + int flags, int page_flags, int fd, + off_t offset, int host_page_size) +{ + void *p, *want_p = g2h_untagged(start); + off_t host_offset = offset & -host_page_size; + abi_ulong last, real_start, real_last; + bool misaligned_offset = false; + size_t host_len; + + if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { + /* + * Adjust the offset to something representable on the host. + */ + host_len = len + offset - host_offset; + p = mmap(want_p, host_len, host_prot, flags, fd, host_offset); + if (p == MAP_FAILED) { + return -1; } + + /* Update start to the file position at offset. */ + p += offset - host_offset; + + start = h2g(p); last = start + len - 1; - real_last = HOST_PAGE_ALIGN(last) - 1; + return mmap_end(start, last, start, last, flags, page_flags); + } + + if (!(flags & MAP_ANONYMOUS)) { + misaligned_offset = (start ^ offset) & (host_page_size - 1); /* - * Test if requested memory area fits target address space - * It can fail only on 64-bit host with 32-bit target. - * On any other target/host host mmap() handles this error correctly. + * The fallback for misalignment is a private mapping + read. + * This carries none of semantics required of MAP_SHARED. */ - if (last < start || !guest_range_valid_untagged(start, len)) { - errno = ENOMEM; - goto fail; + if (misaligned_offset && (flags & MAP_TYPE) != MAP_PRIVATE) { + errno = EINVAL; + return -1; } + } - if (flags & MAP_FIXED_NOREPLACE) { - /* Validate that the chosen range is empty. */ - if (!page_check_range_empty(start, last)) { - errno = EEXIST; - goto fail; - } + last = start + len - 1; + real_start = start & -host_page_size; + real_last = ROUND_UP(last, host_page_size) - 1; - /* - * With reserved_va, the entire address space is mmaped in the - * host to ensure it isn't accidentally used for something else. - * We have just checked that the guest address is not mapped - * within the guest, but need to replace the host reservation. - * - * Without reserved_va, despite the guest address check above, - * keep MAP_FIXED_NOREPLACE so that the guest does not overwrite - * any host address mappings. - */ - if (reserved_va) { - flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED; + /* + * Handle the start and end of the mapping. + */ + if (real_start < start) { + abi_ulong real_page_last = real_start + host_page_size - 1; + if (last <= real_page_last) { + /* Entire allocation a subset of one host page. */ + if (!mmap_frag(real_start, start, last, target_prot, + flags, fd, offset)) { + return -1; } + return mmap_end(start, last, -1, 0, flags, page_flags); } - /* - * worst case: we cannot map the file because the offset is not - * aligned, so we read it - */ - if (!(flags & MAP_ANONYMOUS) && - (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) { - /* - * msync() won't work here, so we return an error if write is - * possible while it is a shared mapping - */ - if ((flags & MAP_TYPE) == MAP_SHARED - && (target_prot & PROT_WRITE)) { - errno = EINVAL; - goto fail; - } - retaddr = target_mmap(start, len, target_prot | PROT_WRITE, - (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) - | MAP_PRIVATE | MAP_ANONYMOUS, - -1, 0); - if (retaddr == -1) { - goto fail; - } - if (pread(fd, g2h_untagged(start), len, offset) == -1) { - goto fail; - } - if (!(target_prot & PROT_WRITE)) { - ret = target_mprotect(start, len, target_prot); - assert(ret == 0); - } - goto the_end; + if (!mmap_frag(real_start, start, real_page_last, target_prot, + flags, fd, offset)) { + return -1; } + real_start = real_page_last + 1; + } - /* handle the start of the mapping */ - if (start > real_start) { - if (real_last == real_start + qemu_host_page_size - 1) { - /* one single host page */ - if (!mmap_frag(real_start, start, last, - target_prot, flags, fd, offset)) { - goto fail; - } - goto the_end1; - } - if (!mmap_frag(real_start, start, - real_start + qemu_host_page_size - 1, - target_prot, flags, fd, offset)) { - goto fail; - } - real_start += qemu_host_page_size; + if (last < real_last) { + abi_ulong real_page_start = real_last - host_page_size + 1; + if (!mmap_frag(real_page_start, real_page_start, last, + target_prot, flags, fd, + offset + real_page_start - start)) { + return -1; } - /* handle the end of the mapping */ - if (last < real_last) { - abi_ulong real_page = real_last - qemu_host_page_size + 1; - if (!mmap_frag(real_page, real_page, last, - target_prot, flags, fd, - offset + real_page - start)) { - goto fail; - } - real_last -= qemu_host_page_size; + real_last = real_page_start - 1; + } + + if (real_start > real_last) { + return mmap_end(start, last, -1, 0, flags, page_flags); + } + + /* + * Handle the middle of the mapping. + */ + + host_len = real_last - real_start + 1; + want_p += real_start - start; + + if (flags & MAP_ANONYMOUS) { + p = mmap(want_p, host_len, host_prot, flags, -1, 0); + } else if (!misaligned_offset) { + p = mmap(want_p, host_len, host_prot, flags, fd, + offset + real_start - start); + } else { + p = mmap(want_p, host_len, host_prot | PROT_WRITE, + flags | MAP_ANONYMOUS, -1, 0); + } + if (p != want_p) { + if (p != MAP_FAILED) { + do_munmap(p, host_len); + errno = EEXIST; } + return -1; + } - /* map the middle (easier) */ - if (real_start < real_last) { - void *p, *want_p; - off_t offset1; - size_t len1; + if (misaligned_offset) { + /* TODO: The read could be short. */ + if (pread(fd, p, host_len, offset + real_start - start) != host_len) { + do_munmap(p, host_len); + return -1; + } + if (!(host_prot & PROT_WRITE)) { + mprotect(p, host_len, host_prot); + } + } - if (flags & MAP_ANONYMOUS) { - offset1 = 0; - } else { - offset1 = offset + real_start - start; + return mmap_end(start, last, -1, 0, flags, page_flags); +} + +static abi_long target_mmap__locked(abi_ulong start, abi_ulong len, + int target_prot, int flags, int page_flags, + int fd, off_t offset) +{ + int host_page_size = qemu_real_host_page_size(); + int host_prot; + + /* + * For reserved_va, we are in full control of the allocation. + * Find a suitable hole and convert to MAP_FIXED. + */ + if (reserved_va) { + if (flags & MAP_FIXED_NOREPLACE) { + /* Validate that the chosen range is empty. */ + if (!page_check_range_empty(start, start + len - 1)) { + errno = EEXIST; + return -1; } - len1 = real_last - real_start + 1; - want_p = g2h_untagged(real_start); - - p = mmap(want_p, len1, target_to_host_prot(target_prot), - flags, fd, offset1); - if (p != want_p) { - if (p != MAP_FAILED) { - munmap(p, len1); - errno = EEXIST; - } - goto fail; + flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED; + } else if (!(flags & MAP_FIXED)) { + abi_ulong real_start = start & -host_page_size; + off_t host_offset = offset & -host_page_size; + size_t real_len = len + offset - host_offset; + abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE); + + start = mmap_find_vma(real_start, real_len, align); + if (start == (abi_ulong)-1) { + errno = ENOMEM; + return -1; } - passthrough_start = real_start; - passthrough_last = real_last; + start += offset - host_offset; + flags |= MAP_FIXED; } } - the_end1: - if (flags & MAP_ANONYMOUS) { - page_flags |= PAGE_ANON; - } - page_flags |= PAGE_RESET; - if (passthrough_start > passthrough_last) { - page_set_flags(start, last, page_flags); + + host_prot = target_to_host_prot(target_prot); + + if (host_page_size == TARGET_PAGE_SIZE) { + return mmap_h_eq_g(start, len, host_prot, flags, + page_flags, fd, offset); + } else if (host_page_size < TARGET_PAGE_SIZE) { + return mmap_h_lt_g(start, len, host_prot, flags, + page_flags, fd, offset, host_page_size); } else { - if (start < passthrough_start) { - page_set_flags(start, passthrough_start - 1, page_flags); + return mmap_h_gt_g(start, len, target_prot, host_prot, flags, + page_flags, fd, offset, host_page_size); + } +} + +/* NOTE: all the constants are the HOST ones */ +abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, + int flags, int fd, off_t offset) +{ + abi_long ret; + int page_flags; + + trace_target_mmap(start, len, target_prot, flags, fd, offset); + + if (!len) { + errno = EINVAL; + return -1; + } + + page_flags = validate_prot_to_pageflags(target_prot); + if (!page_flags) { + errno = EINVAL; + return -1; + } + + /* Also check for overflows... */ + len = TARGET_PAGE_ALIGN(len); + if (!len || len != (size_t)len) { + errno = ENOMEM; + return -1; + } + + if (offset & ~TARGET_PAGE_MASK) { + errno = EINVAL; + return -1; + } + if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) { + if (start & ~TARGET_PAGE_MASK) { + errno = EINVAL; + return -1; } - page_set_flags(passthrough_start, passthrough_last, - page_flags | PAGE_PASSTHROUGH); - if (passthrough_last < last) { - page_set_flags(passthrough_last + 1, last, page_flags); + if (!guest_range_valid_untagged(start, len)) { + errno = ENOMEM; + return -1; } } - shm_region_rm_complete(start, last); - the_end: - trace_target_mmap_complete(start); - if (qemu_loglevel_mask(CPU_LOG_PAGE)) { - FILE *f = qemu_log_trylock(); - if (f) { - fprintf(f, "page layout changed following mmap\n"); - page_dump(f); - qemu_log_unlock(f); + + mmap_lock(); + + ret = target_mmap__locked(start, len, target_prot, flags, + page_flags, fd, offset); + + mmap_unlock(); + + /* + * If we're mapping shared memory, ensure we generate code for parallel + * execution and flush old translations. This will work up to the level + * supported by the host -- anything that requires EXCP_ATOMIC will not + * be atomic with respect to an external process. + */ + if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) { + CPUState *cpu = thread_cpu; + if (!(cpu->tcg_cflags & CF_PARALLEL)) { + cpu->tcg_cflags |= CF_PARALLEL; + tb_flush(cpu); } } - mmap_unlock(); - return start; -fail: - mmap_unlock(); - return -1; + + return ret; } static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) { + int host_page_size = qemu_real_host_page_size(); abi_ulong real_start; abi_ulong real_last; abi_ulong real_len; @@ -793,8 +980,8 @@ static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) int prot; last = start + len - 1; - real_start = start & qemu_host_page_mask; - real_last = HOST_PAGE_ALIGN(last) - 1; + real_start = start & -host_page_size; + real_last = ROUND_UP(last, host_page_size) - 1; /* * If guest pages remain on the first or last host pages, @@ -802,7 +989,7 @@ static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) * The single page special case is required for the last page, * lest real_start overflow to zero. */ - if (real_last - real_start < qemu_host_page_size) { + if (real_last - real_start < host_page_size) { prot = 0; for (a = real_start; a < start; a += TARGET_PAGE_SIZE) { prot |= page_get_flags(a); @@ -818,14 +1005,14 @@ static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) prot |= page_get_flags(a); } if (prot != 0) { - real_start += qemu_host_page_size; + real_start += host_page_size; } for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) { prot |= page_get_flags(a + 1); } if (prot != 0) { - real_last -= qemu_host_page_size; + real_last -= host_page_size; } if (real_last < real_start) { @@ -836,13 +1023,7 @@ static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) real_len = real_last - real_start + 1; host_start = g2h_untagged(real_start); - if (reserved_va) { - void *ptr = mmap(host_start, real_len, PROT_NONE, - MAP_FIXED | MAP_ANONYMOUS - | MAP_PRIVATE | MAP_NORESERVE, -1, 0); - return ptr == host_start ? 0 : -1; - } - return munmap(host_start, real_len); + return do_munmap(host_start, real_len); } int target_munmap(abi_ulong start, abi_ulong len) @@ -1055,69 +1236,161 @@ static inline abi_ulong target_shmlba(CPUArchState *cpu_env) } #endif +#if defined(__arm__) || defined(__mips__) || defined(__sparc__) +#define HOST_FORCE_SHMLBA 1 +#else +#define HOST_FORCE_SHMLBA 0 +#endif + abi_ulong target_shmat(CPUArchState *cpu_env, int shmid, abi_ulong shmaddr, int shmflg) { CPUState *cpu = env_cpu(cpu_env); - abi_ulong raddr; struct shmid_ds shm_info; int ret; - abi_ulong shmlba; + int h_pagesize; + int t_shmlba, h_shmlba, m_shmlba; + size_t t_len, h_len, m_len; /* shmat pointers are always untagged */ - /* find out the length of the shared memory segment */ + /* + * Because we can't use host shmat() unless the address is sufficiently + * aligned for the host, we'll need to check both. + * TODO: Could be fixed with softmmu. + */ + t_shmlba = target_shmlba(cpu_env); + h_pagesize = qemu_real_host_page_size(); + h_shmlba = (HOST_FORCE_SHMLBA ? SHMLBA : h_pagesize); + m_shmlba = MAX(t_shmlba, h_shmlba); + + if (shmaddr) { + if (shmaddr & (m_shmlba - 1)) { + if (shmflg & SHM_RND) { + /* + * The guest is allowing the kernel to round the address. + * Assume that the guest is ok with us rounding to the + * host required alignment too. Anyway if we don't, we'll + * get an error from the kernel. + */ + shmaddr &= ~(m_shmlba - 1); + if (shmaddr == 0 && (shmflg & SHM_REMAP)) { + return -TARGET_EINVAL; + } + } else { + int require = TARGET_PAGE_SIZE; +#ifdef TARGET_FORCE_SHMLBA + require = t_shmlba; +#endif + /* + * Include host required alignment, as otherwise we cannot + * use host shmat at all. + */ + require = MAX(require, h_shmlba); + if (shmaddr & (require - 1)) { + return -TARGET_EINVAL; + } + } + } + } else { + if (shmflg & SHM_REMAP) { + return -TARGET_EINVAL; + } + } + /* All rounding now manually concluded. */ + shmflg &= ~SHM_RND; + + /* Find out the length of the shared memory segment. */ ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); if (is_error(ret)) { /* can't get length, bail out */ return ret; } + t_len = TARGET_PAGE_ALIGN(shm_info.shm_segsz); + h_len = ROUND_UP(shm_info.shm_segsz, h_pagesize); + m_len = MAX(t_len, h_len); - shmlba = target_shmlba(cpu_env); - - if (shmaddr & (shmlba - 1)) { - if (shmflg & SHM_RND) { - shmaddr &= ~(shmlba - 1); - } else { - return -TARGET_EINVAL; - } - } - if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) { + if (!guest_range_valid_untagged(shmaddr, m_len)) { return -TARGET_EINVAL; } WITH_MMAP_LOCK_GUARD() { - void *host_raddr; + bool mapped = false; + void *want, *test; abi_ulong last; - if (shmaddr) { - host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg); + if (!shmaddr) { + shmaddr = mmap_find_vma(0, m_len, m_shmlba); + if (shmaddr == -1) { + return -TARGET_ENOMEM; + } + mapped = !reserved_va; + } else if (shmflg & SHM_REMAP) { + /* + * If host page size > target page size, the host shmat may map + * more memory than the guest expects. Reject a mapping that + * would replace memory in the unexpected gap. + * TODO: Could be fixed with softmmu. + */ + if (t_len < h_len && + !page_check_range_empty(shmaddr + t_len, + shmaddr + h_len - 1)) { + return -TARGET_EINVAL; + } } else { - abi_ulong mmap_start; + if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) { + return -TARGET_EINVAL; + } + } - /* In order to use the host shmat, we need to honor host SHMLBA. */ - mmap_start = mmap_find_vma(0, shm_info.shm_segsz, - MAX(SHMLBA, shmlba)); + /* All placement is now complete. */ + want = (void *)g2h_untagged(shmaddr); - if (mmap_start == -1) { - return -TARGET_ENOMEM; + /* + * Map anonymous pages across the entire range, then remap with + * the shared memory. This is required for a number of corner + * cases for which host and guest page sizes differ. + */ + if (h_len != t_len) { + int mmap_p = PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE); + int mmap_f = MAP_PRIVATE | MAP_ANONYMOUS + | (reserved_va || mapped || (shmflg & SHM_REMAP) + ? MAP_FIXED : MAP_FIXED_NOREPLACE); + + test = mmap(want, m_len, mmap_p, mmap_f, -1, 0); + if (unlikely(test != want)) { + /* shmat returns EINVAL not EEXIST like mmap. */ + ret = (test == MAP_FAILED && errno != EEXIST + ? get_errno(-1) : -TARGET_EINVAL); + if (mapped) { + do_munmap(want, m_len); + } + return ret; } - host_raddr = shmat(shmid, g2h_untagged(mmap_start), - shmflg | SHM_REMAP); + mapped = true; } - if (host_raddr == (void *)-1) { - return get_errno(-1); + if (reserved_va || mapped) { + shmflg |= SHM_REMAP; + } + test = shmat(shmid, want, shmflg); + if (test == MAP_FAILED) { + ret = get_errno(-1); + if (mapped) { + do_munmap(want, m_len); + } + return ret; } - raddr = h2g(host_raddr); - last = raddr + shm_info.shm_segsz - 1; + assert(test == want); - page_set_flags(raddr, last, + last = shmaddr + m_len - 1; + page_set_flags(shmaddr, last, PAGE_VALID | PAGE_RESET | PAGE_READ | - (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); + (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) | + (shmflg & SHM_EXEC ? PAGE_EXEC : 0)); - shm_region_rm_complete(raddr, last); - shm_region_add(raddr, last); + shm_region_rm_complete(shmaddr, last); + shm_region_add(shmaddr, last); } /* @@ -1131,7 +1404,15 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid, tb_flush(cpu); } - return raddr; + if (qemu_loglevel_mask(CPU_LOG_PAGE)) { + FILE *f = qemu_log_trylock(); + if (f) { + fprintf(f, "page layout changed following shmat\n"); + page_dump(f); + qemu_log_unlock(f); + } + } + return shmaddr; } abi_long target_shmdt(abi_ulong shmaddr) diff --git a/linux-user/nios2/cpu_loop.c b/linux-user/nios2/cpu_loop.c index da77ede76bd..7fe08c87501 100644 --- a/linux-user/nios2/cpu_loop.c +++ b/linux-user/nios2/cpu_loop.c @@ -32,6 +32,7 @@ void cpu_loop(CPUNios2State *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); switch (trapnr) { case EXCP_INTERRUPT: diff --git a/linux-user/nios2/signal.c b/linux-user/nios2/signal.c index 32b3dc99c6e..64c345f4099 100644 --- a/linux-user/nios2/signal.c +++ b/linux-user/nios2/signal.c @@ -157,7 +157,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, return; } - tswap_siginfo(&frame->info, info); + frame->info = *info; /* Create the ucontext. */ __put_user(0, &frame->uc.tuc_flags); diff --git a/linux-user/openrisc/signal.c b/linux-user/openrisc/signal.c index be8b68784a2..cb74a9fe5e2 100644 --- a/linux-user/openrisc/signal.c +++ b/linux-user/openrisc/signal.c @@ -103,7 +103,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, } if (ka->sa_flags & SA_SIGINFO) { - tswap_siginfo(&frame->info, info); + frame->info = *info; } __put_user(0, &frame->uc.tuc_flags); diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c index 7e7302823b0..652038a53ce 100644 --- a/linux-user/ppc/signal.c +++ b/linux-user/ppc/signal.c @@ -486,14 +486,14 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, int i, err = 0; #if defined(TARGET_PPC64) struct target_sigcontext *sc = 0; - struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; + struct image_info *image = get_task_state(thread_cpu)->info; #endif rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) goto sigsegv; - tswap_siginfo(&rt_sf->info, info); + rt_sf->info = *info; __put_user(0, &rt_sf->uc.tuc_flags); __put_user(0, &rt_sf->uc.tuc_link); @@ -502,7 +502,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, __put_user(h2g (&rt_sf->uc.tuc_mcontext), &rt_sf->uc.tuc_regs); #endif - for(i = 0; i < TARGET_NSIG_WORDS; i++) { + for (i = 0; i < TARGET_NSIG_WORDS; i++) { __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); } @@ -673,7 +673,7 @@ abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx, } if (uold_ctx) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) { return -TARGET_EFAULT; diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 4de9ec783f6..32cd43d9eff 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -162,6 +162,11 @@ typedef struct TaskState { uint64_t start_boottime; } TaskState; +static inline TaskState *get_task_state(CPUState *cs) +{ + return cs->opaque; +} + abi_long do_brk(abi_ulong new_brk); int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode, bool safe); diff --git a/linux-user/riscv/cpu_loop.c b/linux-user/riscv/cpu_loop.c index bffca7db127..52c49c2e426 100644 --- a/linux-user/riscv/cpu_loop.c +++ b/linux-user/riscv/cpu_loop.c @@ -97,7 +97,7 @@ void cpu_loop(CPURISCVState *env) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) { CPUState *cpu = env_cpu(env); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); struct image_info *info = ts->info; env->pc = regs->sepc; diff --git a/linux-user/riscv/signal.c b/linux-user/riscv/signal.c index 941eadce870..358fa1d82d2 100644 --- a/linux-user/riscv/signal.c +++ b/linux-user/riscv/signal.c @@ -125,7 +125,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, } setup_ucontext(&frame->uc, env, set); - tswap_siginfo(&frame->info, info); + frame->info = *info; env->pc = ka->_sa_handler; env->gpr[xSP] = frame_addr; diff --git a/linux-user/s390x/signal.c b/linux-user/s390x/signal.c index b40f738a708..df49c247080 100644 --- a/linux-user/s390x/signal.c +++ b/linux-user/s390x/signal.c @@ -267,7 +267,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, } /* Create siginfo on the signal stack. */ - tswap_siginfo(&frame->info, info); + frame->info = *info; /* Create ucontext on the signal stack. */ uc_flags = 0; diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c index c16c2c2d57f..9ecc026fae7 100644 --- a/linux-user/sh4/signal.c +++ b/linux-user/sh4/signal.c @@ -233,7 +233,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, goto give_sigsegv; } - tswap_siginfo(&frame->info, info); + frame->info = *info; /* Create the ucontext. */ __put_user(0, &frame->uc.tuc_flags); diff --git a/linux-user/signal-common.h b/linux-user/signal-common.h index 3e2dc604c2f..f4cbe6185e1 100644 --- a/linux-user/signal-common.h +++ b/linux-user/signal-common.h @@ -43,8 +43,6 @@ void host_to_target_sigset_internal(target_sigset_t *d, const sigset_t *s); void target_to_host_sigset_internal(sigset_t *d, const target_sigset_t *s); -void tswap_siginfo(target_siginfo_t *tinfo, - const target_siginfo_t *info); void set_sigmask(const sigset_t *set); void force_sig(int sig); void force_sigsegv(int oldsig); @@ -113,7 +111,7 @@ int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset, static inline void finish_sigsuspend_mask(int ret) { if (ret != -QEMU_ERESTARTSYS) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); ts->in_sigsuspend = 1; } } diff --git a/linux-user/signal.c b/linux-user/signal.c index c9527adfa3a..05dc4afb524 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -34,6 +34,9 @@ #include "user/safe-syscall.h" #include "tcg/tcg.h" +/* target_siginfo_t must fit in gdbstub's siginfo save area. */ +QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH); + static struct target_sigaction sigact_table[TARGET_NSIG]; static void host_signal_handler(int host_signum, siginfo_t *info, @@ -172,7 +175,7 @@ void target_to_host_old_sigset(sigset_t *sigset, int block_signals(void) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); sigset_t set; /* It's OK to block everything including SIGSEGV, because we won't @@ -194,7 +197,7 @@ int block_signals(void) */ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); if (oldset) { *oldset = ts->signal_mask; @@ -237,7 +240,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) */ void set_sigmask(const sigset_t *set) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); ts->signal_mask = *set; } @@ -246,7 +249,7 @@ void set_sigmask(const sigset_t *set) int on_sig_stack(unsigned long sp) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); return (sp - ts->sigaltstack_used.ss_sp < ts->sigaltstack_used.ss_size); @@ -254,7 +257,7 @@ int on_sig_stack(unsigned long sp) int sas_ss_flags(unsigned long sp) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE : on_sig_stack(sp) ? SS_ONSTACK : 0); @@ -265,7 +268,7 @@ abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka) /* * This is the X/Open sanctioned signal stack switching. */ - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size; @@ -275,7 +278,7 @@ abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka) void target_save_altstack(target_stack_t *uss, CPUArchState *env) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp); __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags); @@ -284,7 +287,7 @@ void target_save_altstack(target_stack_t *uss, CPUArchState *env) abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); size_t minstacksize = TARGET_MINSIGSTKSZ; target_stack_t ss; @@ -409,8 +412,8 @@ static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, tinfo->si_code = deposit32(si_code, 16, 16, si_type); } -void tswap_siginfo(target_siginfo_t *tinfo, - const target_siginfo_t *info) +static void tswap_siginfo(target_siginfo_t *tinfo, + const target_siginfo_t *info) { int si_type = extract32(info->si_code, 16, 16); int si_code = sextract32(info->si_code, 0, 16); @@ -571,7 +574,7 @@ static void signal_table_init(void) void signal_init(void) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); struct sigaction act, oact; /* initialize signal conversion tables */ @@ -623,7 +626,6 @@ void signal_init(void) void force_sig(int sig) { CPUState *cpu = thread_cpu; - CPUArchState *env = cpu_env(cpu); target_siginfo_t info = {}; info.si_signo = sig; @@ -631,7 +633,7 @@ void force_sig(int sig) info.si_code = TARGET_SI_KERNEL; info._sifields._kill._pid = 0; info._sifields._kill._uid = 0; - queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); + queue_signal(cpu_env(cpu), info.si_signo, QEMU_SI_KILL, &info); } /* @@ -641,14 +643,13 @@ void force_sig(int sig) void force_sig_fault(int sig, int code, abi_ulong addr) { CPUState *cpu = thread_cpu; - CPUArchState *env = cpu_env(cpu); target_siginfo_t info = {}; info.si_signo = sig; info.si_errno = 0; info.si_code = code; info._sifields._sigfault._addr = addr; - queue_signal(env, sig, QEMU_SI_FAULT, &info); + queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info); } /* Force a SIGSEGV if we couldn't write to memory trying to set @@ -671,7 +672,7 @@ void force_sigsegv(int oldsig) void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, MMUAccessType access_type, bool maperr, uintptr_t ra) { - const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; if (tcg_ops->record_sigsegv) { tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra); @@ -687,7 +688,7 @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, MMUAccessType access_type, uintptr_t ra) { - const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; if (tcg_ops->record_sigbus) { tcg_ops->record_sigbus(cpu, addr, access_type, ra); @@ -730,7 +731,7 @@ static G_NORETURN void dump_core_and_abort(CPUArchState *env, int target_sig) { CPUState *cpu = env_cpu(env); - TaskState *ts = (TaskState *)cpu->opaque; + TaskState *ts = get_task_state(cpu); int host_sig, core_dumped = 0; /* On exit, undo the remapping of SIGABRT. */ @@ -769,7 +770,7 @@ void queue_signal(CPUArchState *env, int sig, int si_type, target_siginfo_t *info) { CPUState *cpu = env_cpu(env); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); trace_user_queue_signal(env, sig); @@ -954,7 +955,7 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) { CPUState *cpu = thread_cpu; CPUArchState *env = cpu_env(cpu); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); target_siginfo_t tinfo; host_sigcontext *uc = puc; struct emulated_sigtable *k; @@ -1172,15 +1173,27 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig, CPUState *cpu = env_cpu(cpu_env); abi_ulong handler; sigset_t set; + target_siginfo_t unswapped; target_sigset_t target_old_set; struct target_sigaction *sa; - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); trace_user_handle_signal(cpu_env, sig); /* dequeue signal */ k->pending = 0; - sig = gdb_handlesig(cpu, sig); + /* + * Writes out siginfo values byteswapped, accordingly to the target. + * It also cleans the si_type from si_code making it correct for + * the target. We must hold on to the original unswapped copy for + * strace below, because si_type is still required there. + */ + if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { + unswapped = k->info; + } + tswap_siginfo(&k->info, &k->info); + + sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info)); if (!sig) { sa = NULL; handler = TARGET_SIG_IGN; @@ -1190,7 +1203,7 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig, } if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { - print_taken_signal(sig, &k->info); + print_taken_signal(sig, &unswapped); } if (handler == TARGET_SIG_DFL) { @@ -1256,7 +1269,7 @@ void process_pending_signals(CPUArchState *cpu_env) { CPUState *cpu = env_cpu(cpu_env); int sig; - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); sigset_t set; sigset_t *blocked_set; @@ -1316,7 +1329,7 @@ void process_pending_signals(CPUArchState *cpu_env) int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset, target_ulong sigsize) { - TaskState *ts = (TaskState *)thread_cpu->opaque; + TaskState *ts = get_task_state(thread_cpu); sigset_t *host_set = &ts->sigsuspend_mask; target_sigset_t *target_sigset; diff --git a/linux-user/sparc/cpu_loop.c b/linux-user/sparc/cpu_loop.c index 3c1bde00dda..50424a54df5 100644 --- a/linux-user/sparc/cpu_loop.c +++ b/linux-user/sparc/cpu_loop.c @@ -293,7 +293,7 @@ void cpu_loop (CPUSPARCState *env) case TT_FP_EXCP: { int code = TARGET_FPE_FLTUNK; - target_ulong fsr = env->fsr; + target_ulong fsr = cpu_get_fsr(env); if ((fsr & FSR_FTT_MASK) == FSR_FTT_IEEE_EXCP) { if (fsr & FSR_NVC) { diff --git a/linux-user/sparc/signal.c b/linux-user/sparc/signal.c index dfcae707e03..f164b74032f 100644 --- a/linux-user/sparc/signal.c +++ b/linux-user/sparc/signal.c @@ -199,20 +199,21 @@ static void save_fpu(struct target_siginfo_fpu *fpu, CPUSPARCState *env) for (i = 0; i < 32; ++i) { __put_user(env->fpr[i].ll, &fpu->si_double_regs[i]); } - __put_user(env->fsr, &fpu->si_fsr); + __put_user(cpu_get_fsr(env), &fpu->si_fsr); __put_user(env->gsr, &fpu->si_gsr); __put_user(env->fprs, &fpu->si_fprs); #else for (i = 0; i < 16; ++i) { __put_user(env->fpr[i].ll, &fpu->si_double_regs[i]); } - __put_user(env->fsr, &fpu->si_fsr); + __put_user(cpu_get_fsr(env), &fpu->si_fsr); __put_user(0, &fpu->si_fpqdepth); #endif } static void restore_fpu(struct target_siginfo_fpu *fpu, CPUSPARCState *env) { + target_ulong fsr; int i; #ifdef TARGET_SPARC64 @@ -230,15 +231,16 @@ static void restore_fpu(struct target_siginfo_fpu *fpu, CPUSPARCState *env) __get_user(env->fpr[i].ll, &fpu->si_double_regs[i]); } } - __get_user(env->fsr, &fpu->si_fsr); __get_user(env->gsr, &fpu->si_gsr); env->fprs |= fprs; #else for (i = 0; i < 16; ++i) { __get_user(env->fpr[i].ll, &fpu->si_double_regs[i]); } - __get_user(env->fsr, &fpu->si_fsr); #endif + + __get_user(fsr, &fpu->si_fsr); + cpu_put_fsr(env, fsr); } #ifdef TARGET_ARCH_HAS_SETUP_FRAME @@ -331,7 +333,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, __put_user(0, &sf->rwin_save); /* TODO: save_rwin_state */ - tswap_siginfo(&sf->info, info); + sf->info = *info; tswap_sigset(&sf->mask, set); target_save_altstack(&sf->stack, env); @@ -662,6 +664,7 @@ void sparc64_set_context(CPUSPARCState *env) __get_user(fenab, &(fpup->mcfpu_enab)); if (fenab) { abi_ulong fprs; + abi_ulong fsr; /* * We use the FPRS from the guest only in deciding whether @@ -690,7 +693,8 @@ void sparc64_set_context(CPUSPARCState *env) __get_user(env->fpr[i].ll, &(fpup->mcfpu_fregs.dregs[i])); } } - __get_user(env->fsr, &(fpup->mcfpu_fsr)); + __get_user(fsr, &(fpup->mcfpu_fsr)); + cpu_put_fsr(env, fsr); __get_user(env->gsr, &(fpup->mcfpu_gsr)); } unlock_user_struct(ucp, ucp_addr, 0); diff --git a/linux-user/strace.c b/linux-user/strace.c index cf26e552643..b4d1098170e 100644 --- a/linux-user/strace.c +++ b/linux-user/strace.c @@ -657,7 +657,6 @@ print_newselect(CPUArchState *cpu_env, const struct syscallname *name, } #endif -#ifdef TARGET_NR_semctl static void print_semctl(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg1, abi_long arg2, abi_long arg3, @@ -668,7 +667,26 @@ print_semctl(CPUArchState *cpu_env, const struct syscallname *name, print_ipc_cmd(arg3); qemu_log(",0x" TARGET_ABI_FMT_lx ")", arg4); } -#endif + +static void +print_shmat(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + static const struct flags shmat_flags[] = { + FLAG_GENERIC(SHM_RND), + FLAG_GENERIC(SHM_REMAP), + FLAG_GENERIC(SHM_RDONLY), + FLAG_GENERIC(SHM_EXEC), + FLAG_END + }; + + print_syscall_prologue(name); + print_raw_param(TARGET_ABI_FMT_ld, arg0, 0); + print_pointer(arg1, 0); + print_flags(shmat_flags, arg2, 1); + print_syscall_epilogue(name); +} #ifdef TARGET_NR_ipc static void @@ -678,10 +696,12 @@ print_ipc(CPUArchState *cpu_env, const struct syscallname *name, { switch(arg1) { case IPCOP_semctl: - qemu_log("semctl(" TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld ",", - arg1, arg2); - print_ipc_cmd(arg3); - qemu_log(",0x" TARGET_ABI_FMT_lx ")", arg4); + print_semctl(cpu_env, &(const struct syscallname){ .name = "semctl" }, + arg2, arg3, arg4, arg5, 0, 0); + break; + case IPCOP_shmat: + print_shmat(cpu_env, &(const struct syscallname){ .name = "shmat" }, + arg2, arg5, arg3, 0, 0, 0); break; default: qemu_log(("%s(" diff --git a/linux-user/strace.list b/linux-user/strace.list index 6655d4f26d6..dfd4237d14e 100644 --- a/linux-user/strace.list +++ b/linux-user/strace.list @@ -1398,7 +1398,7 @@ { TARGET_NR_sgetmask, "sgetmask" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_shmat -{ TARGET_NR_shmat, "shmat" , NULL, NULL, print_syscall_ret_addr }, +{ TARGET_NR_shmat, "shmat" , NULL, print_shmat, print_syscall_ret_addr }, #endif #ifdef TARGET_NR_shmctl { TARGET_NR_shmctl, "shmctl" , NULL, NULL, NULL }, diff --git a/linux-user/syscall.c b/linux-user/syscall.c index d2436324054..75764f7a8fe 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -6457,16 +6457,28 @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, case PR_SET_NO_NEW_PRIVS: case PR_GET_IO_FLUSHER: case PR_SET_IO_FLUSHER: + case PR_SET_CHILD_SUBREAPER: + case PR_GET_SPECULATION_CTRL: + case PR_SET_SPECULATION_CTRL: /* Some prctl options have no pointer arguments and we can pass on. */ return get_errno(prctl(option, arg2, arg3, arg4, arg5)); case PR_GET_CHILD_SUBREAPER: - case PR_SET_CHILD_SUBREAPER: - case PR_GET_SPECULATION_CTRL: - case PR_SET_SPECULATION_CTRL: + { + int val; + ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val, + arg3, arg4, arg5)); + if (!is_error(ret) && put_user_s32(val, arg2)) { + return -TARGET_EFAULT; + } + return ret; + } + case PR_GET_TID_ADDRESS: - /* TODO */ - return -TARGET_EINVAL; + { + TaskState *ts = env_cpu(env)->opaque; + return put_user_ual(ts->child_tidptr, arg2); + } case PR_GET_FPEXC: case PR_SET_FPEXC: @@ -6522,7 +6534,7 @@ static void *clone_func(void *arg) env = info->env; cpu = env_cpu(env); thread_cpu = cpu; - ts = (TaskState *)cpu->opaque; + ts = get_task_state(cpu); info->tid = sys_gettid(); task_settid(ts); if (info->child_tidptr) @@ -6564,7 +6576,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, flags &= ~(CLONE_VFORK | CLONE_VM); if (flags & CLONE_VM) { - TaskState *parent_ts = (TaskState *)cpu->opaque; + TaskState *parent_ts = get_task_state(cpu); new_thread_info info; pthread_attr_t attr; @@ -6676,7 +6688,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, if (ret == 0) { /* Child Process. */ cpu_clone_regs_child(env, newsp, flags); - fork_end(1); + fork_end(ret); /* There is a race condition here. The parent process could theoretically read the TID in the child process before the child tid is set. This would require using either ptrace @@ -6687,7 +6699,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, put_user_u32(sys_gettid(), child_tidptr); if (flags & CLONE_PARENT_SETTID) put_user_u32(sys_gettid(), parent_tidptr); - ts = (TaskState *)cpu->opaque; + ts = get_task_state(cpu); if (flags & CLONE_SETTLS) cpu_set_tls (env, newtls); if (flags & CLONE_CHILD_CLEARTID) @@ -6707,8 +6719,8 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, } #endif put_user_u32(pid_fd, parent_tidptr); - } - fork_end(0); + } + fork_end(ret); } g_assert(!cpu_in_exclusive_context(cpu)); } @@ -7953,7 +7965,7 @@ int host_to_target_waitstatus(int status) static int open_self_cmdline(CPUArchState *cpu_env, int fd) { CPUState *cpu = env_cpu(cpu_env); - struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; + struct linux_binprm *bprm = get_task_state(cpu)->bprm; int i; for (i = 0; i < bprm->argc; i++) { @@ -8001,6 +8013,10 @@ static void open_self_maps_4(const struct open_self_maps_data *d, path = "[heap]"; } else if (start == info->vdso) { path = "[vdso]"; +#ifdef TARGET_X86_64 + } else if (start == TARGET_VSYSCALL_PAGE) { + path = "[vsyscall]"; +#endif } /* Except null device (MAP_ANON), adjust offset for this fragment. */ @@ -8089,6 +8105,18 @@ static int open_self_maps_2(void *opaque, target_ulong guest_start, uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start); uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1); +#ifdef TARGET_X86_64 + /* + * Because of the extremely high position of the page within the guest + * virtual address space, this is not backed by host memory at all. + * Therefore the loop below would fail. This is the only instance + * of not having host backing memory. + */ + if (guest_start == TARGET_VSYSCALL_PAGE) { + return open_self_maps_3(opaque, guest_start, guest_end, flags); + } +#endif + while (1) { IntervalTreeNode *n = interval_tree_iter_first(d->host_maps, host_start, host_start); @@ -8137,7 +8165,7 @@ static int open_self_smaps(CPUArchState *cpu_env, int fd) static int open_self_stat(CPUArchState *cpu_env, int fd) { CPUState *cpu = env_cpu(cpu_env); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); g_autoptr(GString) buf = g_string_new(NULL); int i; @@ -8178,7 +8206,7 @@ static int open_self_stat(CPUArchState *cpu_env, int fd) static int open_self_auxv(CPUArchState *cpu_env, int fd) { CPUState *cpu = env_cpu(cpu_env); - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); abi_ulong auxv = ts->info->saved_auxv; abi_ulong len = ts->info->auxv_len; char *ptr; @@ -8802,13 +8830,43 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3 #define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0) -#define RISCV_HWPROBE_KEY_IMA_EXT_0 4 -#define RISCV_HWPROBE_IMA_FD (1 << 0) -#define RISCV_HWPROBE_IMA_C (1 << 1) -#define RISCV_HWPROBE_IMA_V (1 << 2) -#define RISCV_HWPROBE_EXT_ZBA (1 << 3) -#define RISCV_HWPROBE_EXT_ZBB (1 << 4) -#define RISCV_HWPROBE_EXT_ZBS (1 << 5) +#define RISCV_HWPROBE_KEY_IMA_EXT_0 4 +#define RISCV_HWPROBE_IMA_FD (1 << 0) +#define RISCV_HWPROBE_IMA_C (1 << 1) +#define RISCV_HWPROBE_IMA_V (1 << 2) +#define RISCV_HWPROBE_EXT_ZBA (1 << 3) +#define RISCV_HWPROBE_EXT_ZBB (1 << 4) +#define RISCV_HWPROBE_EXT_ZBS (1 << 5) +#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6) +#define RISCV_HWPROBE_EXT_ZBC (1 << 7) +#define RISCV_HWPROBE_EXT_ZBKB (1 << 8) +#define RISCV_HWPROBE_EXT_ZBKC (1 << 9) +#define RISCV_HWPROBE_EXT_ZBKX (1 << 10) +#define RISCV_HWPROBE_EXT_ZKND (1 << 11) +#define RISCV_HWPROBE_EXT_ZKNE (1 << 12) +#define RISCV_HWPROBE_EXT_ZKNH (1 << 13) +#define RISCV_HWPROBE_EXT_ZKSED (1 << 14) +#define RISCV_HWPROBE_EXT_ZKSH (1 << 15) +#define RISCV_HWPROBE_EXT_ZKT (1 << 16) +#define RISCV_HWPROBE_EXT_ZVBB (1 << 17) +#define RISCV_HWPROBE_EXT_ZVBC (1 << 18) +#define RISCV_HWPROBE_EXT_ZVKB (1 << 19) +#define RISCV_HWPROBE_EXT_ZVKG (1 << 20) +#define RISCV_HWPROBE_EXT_ZVKNED (1 << 21) +#define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22) +#define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23) +#define RISCV_HWPROBE_EXT_ZVKSED (1 << 24) +#define RISCV_HWPROBE_EXT_ZVKSH (1 << 25) +#define RISCV_HWPROBE_EXT_ZVKT (1 << 26) +#define RISCV_HWPROBE_EXT_ZFH (1 << 27) +#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28) +#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29) +#define RISCV_HWPROBE_EXT_ZVFH (1 << 30) +#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31) +#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32) +#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) +#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34) +#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) @@ -8867,6 +8925,66 @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env, RISCV_HWPROBE_EXT_ZBB : 0; value |= cfg->ext_zbs ? RISCV_HWPROBE_EXT_ZBS : 0; + value |= cfg->ext_zicboz ? + RISCV_HWPROBE_EXT_ZICBOZ : 0; + value |= cfg->ext_zbc ? + RISCV_HWPROBE_EXT_ZBC : 0; + value |= cfg->ext_zbkb ? + RISCV_HWPROBE_EXT_ZBKB : 0; + value |= cfg->ext_zbkc ? + RISCV_HWPROBE_EXT_ZBKC : 0; + value |= cfg->ext_zbkx ? + RISCV_HWPROBE_EXT_ZBKX : 0; + value |= cfg->ext_zknd ? + RISCV_HWPROBE_EXT_ZKND : 0; + value |= cfg->ext_zkne ? + RISCV_HWPROBE_EXT_ZKNE : 0; + value |= cfg->ext_zknh ? + RISCV_HWPROBE_EXT_ZKNH : 0; + value |= cfg->ext_zksed ? + RISCV_HWPROBE_EXT_ZKSED : 0; + value |= cfg->ext_zksh ? + RISCV_HWPROBE_EXT_ZKSH : 0; + value |= cfg->ext_zkt ? + RISCV_HWPROBE_EXT_ZKT : 0; + value |= cfg->ext_zvbb ? + RISCV_HWPROBE_EXT_ZVBB : 0; + value |= cfg->ext_zvbc ? + RISCV_HWPROBE_EXT_ZVBC : 0; + value |= cfg->ext_zvkb ? + RISCV_HWPROBE_EXT_ZVKB : 0; + value |= cfg->ext_zvkg ? + RISCV_HWPROBE_EXT_ZVKG : 0; + value |= cfg->ext_zvkned ? + RISCV_HWPROBE_EXT_ZVKNED : 0; + value |= cfg->ext_zvknha ? + RISCV_HWPROBE_EXT_ZVKNHA : 0; + value |= cfg->ext_zvknhb ? + RISCV_HWPROBE_EXT_ZVKNHB : 0; + value |= cfg->ext_zvksed ? + RISCV_HWPROBE_EXT_ZVKSED : 0; + value |= cfg->ext_zvksh ? + RISCV_HWPROBE_EXT_ZVKSH : 0; + value |= cfg->ext_zvkt ? + RISCV_HWPROBE_EXT_ZVKT : 0; + value |= cfg->ext_zfh ? + RISCV_HWPROBE_EXT_ZFH : 0; + value |= cfg->ext_zfhmin ? + RISCV_HWPROBE_EXT_ZFHMIN : 0; + value |= cfg->ext_zihintntl ? + RISCV_HWPROBE_EXT_ZIHINTNTL : 0; + value |= cfg->ext_zvfh ? + RISCV_HWPROBE_EXT_ZVFH : 0; + value |= cfg->ext_zvfhmin ? + RISCV_HWPROBE_EXT_ZVFHMIN : 0; + value |= cfg->ext_zfa ? + RISCV_HWPROBE_EXT_ZFA : 0; + value |= cfg->ext_ztso ? + RISCV_HWPROBE_EXT_ZTSO : 0; + value |= cfg->ext_zacas ? + RISCV_HWPROBE_EXT_ZACAS : 0; + value |= cfg->ext_zicond ? + RISCV_HWPROBE_EXT_ZICOND : 0; __put_user(value, &pair->value); break; case RISCV_HWPROBE_KEY_CPUPERF_0: @@ -9006,7 +9124,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, pthread_mutex_lock(&clone_lock); if (CPU_NEXT(first_cpu)) { - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); if (ts->child_tidptr) { put_user_u32(0, ts->child_tidptr); @@ -9164,14 +9282,24 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #ifdef TARGET_NR_waitid case TARGET_NR_waitid: { + struct rusage ru; siginfo_t info; - info.si_pid = 0; - ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); - if (!is_error(ret) && arg3 && info.si_pid != 0) { - if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) + + ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL), + arg4, (arg5 ? &ru : NULL))); + if (!is_error(ret)) { + if (arg3) { + p = lock_user(VERIFY_WRITE, arg3, + sizeof(target_siginfo_t), 0); + if (!p) { + return -TARGET_EFAULT; + } + host_to_target_siginfo(p, &info); + unlock_user(p, arg3, sizeof(target_siginfo_t)); + } + if (arg5 && host_to_target_rusage(arg5, &ru)) { return -TARGET_EFAULT; - host_to_target_siginfo(p, &info); - unlock_user(p, arg3, sizeof(target_siginfo_t)); + } } } return ret; @@ -9433,7 +9561,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #ifdef TARGET_NR_pause /* not on alpha */ case TARGET_NR_pause: if (!block_signals()) { - sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); + sigsuspend(&get_task_state(cpu)->signal_mask); } return -TARGET_EINTR; #endif @@ -9999,7 +10127,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, sigset_t *set; #if defined(TARGET_ALPHA) - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); /* target_to_host_old_sigset will bswap back */ abi_ulong mask = tswapal(arg1); set = &ts->sigsuspend_mask; @@ -10400,7 +10528,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, case TARGET_NR_mprotect: arg1 = cpu_untagged_addr(cpu, arg1); { - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); /* Special hack to detect libc making the stack executable. */ if ((arg3 & PROT_GROWSDOWN) && arg1 >= ts->info->stack_limit @@ -12531,7 +12659,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return do_set_thread_area(cpu_env, arg1); #elif defined(TARGET_M68K) { - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); ts->tp_value = arg1; return 0; } @@ -12545,7 +12673,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return do_get_thread_area(cpu_env, arg1); #elif defined(TARGET_M68K) { - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); return ts->tp_value; } #else @@ -12670,7 +12798,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #if defined(TARGET_NR_set_tid_address) case TARGET_NR_set_tid_address: { - TaskState *ts = cpu->opaque; + TaskState *ts = get_task_state(cpu); ts->child_tidptr = arg1; /* do not call host set_tid_address() syscall, instead return tid() */ return get_errno(sys_gettid()); diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index 77ba343c850..744fda599e4 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -943,6 +943,9 @@ struct target_rtc_pll_info { #define TARGET_FICLONE TARGET_IOW(0x94, 9, abi_int) #define TARGET_FICLONERANGE TARGET_IOW(0x94, 13, struct file_clone_range) +#define TARGET_FIFREEZE TARGET_IOWR('X', 119, abi_int) +#define TARGET_FITHAW TARGET_IOWR('X', 120, abi_int) + /* * Note that the ioctl numbers for FS_IOC_ * claim type "long" but the actual type used by the kernel is "int". diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h index c63ef45fc78..ce11d9e21c1 100644 --- a/linux-user/user-internals.h +++ b/linux-user/user-internals.h @@ -71,7 +71,7 @@ const char *target_strerror(int err); int get_osversion(void); void init_qemu_uname_release(void); void fork_start(void); -void fork_end(int child); +void fork_end(pid_t pid); /** * probe_guest_base: diff --git a/linux-user/vm86.c b/linux-user/vm86.c index c2facf3fc2d..9f512a2242b 100644 --- a/linux-user/vm86.c +++ b/linux-user/vm86.c @@ -74,7 +74,7 @@ static inline unsigned int vm_getl(CPUX86State *env, void save_v86_state(CPUX86State *env) { CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); struct target_vm86plus_struct * target_v86; if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0)) @@ -134,7 +134,7 @@ static inline void return_to_32bit(CPUX86State *env, int retval) static inline int set_IF(CPUX86State *env) { CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); ts->v86flags |= VIF_MASK; if (ts->v86flags & VIP_MASK) { @@ -147,7 +147,7 @@ static inline int set_IF(CPUX86State *env) static inline void clear_IF(CPUX86State *env) { CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); ts->v86flags &= ~VIF_MASK; } @@ -165,7 +165,7 @@ static inline void clear_AC(CPUX86State *env) static inline int set_vflags_long(unsigned long eflags, CPUX86State *env) { CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); set_flags(ts->v86flags, eflags, ts->v86mask); set_flags(env->eflags, eflags, SAFE_MASK); @@ -179,7 +179,7 @@ static inline int set_vflags_long(unsigned long eflags, CPUX86State *env) static inline int set_vflags_short(unsigned short flags, CPUX86State *env) { CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); set_flags(ts->v86flags, flags, ts->v86mask & 0xffff); set_flags(env->eflags, flags, SAFE_MASK); @@ -193,7 +193,7 @@ static inline int set_vflags_short(unsigned short flags, CPUX86State *env) static inline unsigned int get_vflags(CPUX86State *env) { CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); unsigned int flags; flags = env->eflags & RETURN_MASK; @@ -210,7 +210,7 @@ static inline unsigned int get_vflags(CPUX86State *env) static void do_int(CPUX86State *env, int intno) { CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); uint32_t int_addr, segoffs, ssp; unsigned int sp; @@ -269,7 +269,7 @@ void handle_vm86_trap(CPUX86State *env, int trapno) void handle_vm86_fault(CPUX86State *env) { CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); uint32_t csp, ssp; unsigned int ip, sp, newflags, newip, newcs, opcode, intno; int data32, pref_done; @@ -394,7 +394,7 @@ void handle_vm86_fault(CPUX86State *env) int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr) { CPUState *cs = env_cpu(env); - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); struct target_vm86plus_struct * target_v86; int ret; diff --git a/linux-user/xtensa/signal.c b/linux-user/xtensa/signal.c index 32dcfa52291..6514b8dd57f 100644 --- a/linux-user/xtensa/signal.c +++ b/linux-user/xtensa/signal.c @@ -157,7 +157,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, { abi_ulong frame_addr; struct target_rt_sigframe *frame; - int is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info); + int is_fdpic = info_is_fdpic(get_task_state(thread_cpu)->info); abi_ulong handler = 0; abi_ulong handler_fdpic_GOT = 0; uint32_t ra; @@ -184,7 +184,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, } if (ka->sa_flags & SA_SIGINFO) { - tswap_siginfo(&frame->info, info); + frame->info = *info; } __put_user(0, &frame->uc.tuc_flags); diff --git a/meson.build b/meson.build index 0f8fd794ec2..18d446f09a7 100644 --- a/meson.build +++ b/meson.build @@ -9,28 +9,19 @@ add_test_setup('thorough', env: ['G_TEST_SLOW=1', 'SPEED=thorough']) meson.add_postconf_script(find_program('scripts/symlink-install-tree.py')) +#################### +# Global variables # +#################### + not_found = dependency('', required: false) keyval = import('keyval') ss = import('sourceset') fs = import('fs') cmake = import('cmake') -targetos = host_machine.system() -sh = find_program('sh') +host_os = host_machine.system() config_host = keyval.load(meson.current_build_dir() / 'config-host.mak') -cc = meson.get_compiler('c') -all_languages = ['c'] -if targetos == 'windows' and add_languages('cpp', required: false, native: false) - all_languages += ['cpp'] - cxx = meson.get_compiler('cpp') -endif -if targetos == 'darwin' and \ - add_languages('objc', required: get_option('cocoa'), native: false) - all_languages += ['objc'] - objc = meson.get_compiler('objc') -endif - # Temporary directory used for files created while # configure runs. Since it is in the build directory # we can safely blow away any previous version of it @@ -50,7 +41,6 @@ qemu_moddir = get_option('libdir') / get_option('qemu_suffix') qemu_desktopdir = get_option('datadir') / 'applications' qemu_icondir = get_option('datadir') / 'icons' -config_host_data = configuration_data() genh = [] qapi_trace_events = [] @@ -62,6 +52,127 @@ supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv32', 'riscv64', 'x86', 'x86_64' cpu = host_machine.cpu_family() target_dirs = config_host['TARGET_DIRS'].split() + +############ +# Programs # +############ + +sh = find_program('sh') +python = import('python').find_installation() + +cc = meson.get_compiler('c') +all_languages = ['c'] +if host_os == 'windows' and add_languages('cpp', required: false, native: false) + all_languages += ['cpp'] + cxx = meson.get_compiler('cpp') +endif +if host_os == 'darwin' and \ + add_languages('objc', required: true, native: false) + all_languages += ['objc'] + objc = meson.get_compiler('objc') +endif + +dtrace = not_found +stap = not_found +if 'dtrace' in get_option('trace_backends') + dtrace = find_program('dtrace', required: true) + stap = find_program('stap', required: false) + if stap.found() + # Workaround to avoid dtrace(1) producing a file with 'hidden' symbol + # visibility. Define STAP_SDT_V2 to produce 'default' symbol visibility + # instead. QEMU --enable-modules depends on this because the SystemTap + # semaphores are linked into the main binary and not the module's shared + # object. + add_global_arguments('-DSTAP_SDT_V2', + native: false, language: all_languages) + endif +endif + +if get_option('iasl') == '' + iasl = find_program('iasl', required: false) +else + iasl = find_program(get_option('iasl'), required: true) +endif + +edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu' ] +unpack_edk2_blobs = false +foreach target : edk2_targets + if target in target_dirs + bzip2 = find_program('bzip2', required: get_option('install_blobs')) + unpack_edk2_blobs = bzip2.found() + break + endif +endforeach + +##################### +# Option validation # +##################### + +# Fuzzing +if get_option('fuzzing') and get_option('fuzzing_engine') == '' and \ + not cc.links(''' + #include + #include + int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); + int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; } + ''', + args: ['-Werror', '-fsanitize=fuzzer']) + error('Your compiler does not support -fsanitize=fuzzer') +endif + +# Tracing backends +if 'ftrace' in get_option('trace_backends') and host_os != 'linux' + error('ftrace is supported only on Linux') +endif +if 'syslog' in get_option('trace_backends') and not cc.compiles(''' + #include + int main(void) { + openlog("qemu", LOG_PID, LOG_DAEMON); + syslog(LOG_INFO, "configure"); + return 0; + }''') + error('syslog is not supported on this system') +endif + +# Miscellaneous Linux-only features +get_option('mpath') \ + .require(host_os == 'linux', error_message: 'Multipath is supported only on Linux') + +multiprocess_allowed = get_option('multiprocess') \ + .require(host_os == 'linux', error_message: 'Multiprocess QEMU is supported only on Linux') \ + .allowed() + +vfio_user_server_allowed = get_option('vfio_user_server') \ + .require(host_os == 'linux', error_message: 'vfio-user server is supported only on Linux') \ + .allowed() + +have_tpm = get_option('tpm') \ + .require(host_os != 'windows', error_message: 'TPM emulation only available on POSIX systems') \ + .allowed() + +# vhost +have_vhost_user = get_option('vhost_user') \ + .disable_auto_if(host_os != 'linux') \ + .require(host_os != 'windows', + error_message: 'vhost-user is not available on Windows').allowed() +have_vhost_vdpa = get_option('vhost_vdpa') \ + .require(host_os == 'linux', + error_message: 'vhost-vdpa is only available on Linux').allowed() +have_vhost_kernel = get_option('vhost_kernel') \ + .require(host_os == 'linux', + error_message: 'vhost-kernel is only available on Linux').allowed() +have_vhost_user_crypto = get_option('vhost_crypto') \ + .require(have_vhost_user, + error_message: 'vhost-crypto requires vhost-user to be enabled').allowed() + +have_vhost = have_vhost_user or have_vhost_vdpa or have_vhost_kernel + +have_vhost_net_user = have_vhost_user and get_option('vhost_net').allowed() +have_vhost_net_vdpa = have_vhost_vdpa and get_option('vhost_net').allowed() +have_vhost_net_kernel = have_vhost_kernel and get_option('vhost_net').allowed() +have_vhost_net = have_vhost_net_kernel or have_vhost_net_user or have_vhost_net_vdpa + +# type of binaries to build have_linux_user = false have_bsd_user = false have_system = false @@ -71,23 +182,27 @@ foreach target : target_dirs have_system = have_system or target.endswith('-softmmu') endforeach have_user = have_linux_user or have_bsd_user + have_tools = get_option('tools') \ .disable_auto_if(not have_system) \ .allowed() have_ga = get_option('guest_agent') \ .disable_auto_if(not have_system and not have_tools) \ - .require(targetos in ['sunos', 'linux', 'windows', 'freebsd', 'netbsd', 'openbsd'], + .require(host_os in ['sunos', 'linux', 'windows', 'freebsd', 'netbsd', 'openbsd'], error_message: 'unsupported OS for QEMU guest agent') \ .allowed() +have_block = have_system or have_tools + enable_modules = get_option('modules') \ - .require(targetos != 'windows', + .require(host_os != 'windows', error_message: 'Modules are not available for Windows') \ .require(not get_option('prefer_static'), error_message: 'Modules are incompatible with static linking') \ .allowed() -have_block = have_system or have_tools -python = import('python').find_installation() +####################################### +# Variables for host and accelerators # +####################################### if cpu not in supported_cpus host_arch = 'unknown' @@ -115,15 +230,11 @@ elif cpu in ['riscv32'] kvm_targets = ['riscv32-softmmu'] elif cpu in ['riscv64'] kvm_targets = ['riscv64-softmmu'] +elif cpu in ['loongarch64'] + kvm_targets = ['loongarch64-softmmu'] else kvm_targets = [] endif - -kvm_targets_c = '""' -if get_option('kvm').allowed() and targetos == 'linux' - kvm_targets_c = '"' + '" ,"'.join(kvm_targets) + '"' -endif -config_host_data.set('CONFIG_KVM_TARGETS', kvm_targets_c) accelerator_targets = { 'CONFIG_KVM': kvm_targets } if cpu in ['x86', 'x86_64'] @@ -152,42 +263,10 @@ endif modular_tcg = [] # Darwin does not support references to thread-local variables in modules -if targetos != 'darwin' +if host_os != 'darwin' modular_tcg = ['i386-softmmu', 'x86_64-softmmu'] endif -edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu' ] -unpack_edk2_blobs = false -foreach target : edk2_targets - if target in target_dirs - bzip2 = find_program('bzip2', required: get_option('install_blobs')) - unpack_edk2_blobs = bzip2.found() - break - endif -endforeach - -dtrace = not_found -stap = not_found -if 'dtrace' in get_option('trace_backends') - dtrace = find_program('dtrace', required: true) - stap = find_program('stap', required: false) - if stap.found() - # Workaround to avoid dtrace(1) producing a file with 'hidden' symbol - # visibility. Define STAP_SDT_V2 to produce 'default' symbol visibility - # instead. QEMU --enable-modules depends on this because the SystemTap - # semaphores are linked into the main binary and not the module's shared - # object. - add_global_arguments('-DSTAP_SDT_V2', - native: false, language: all_languages) - endif -endif - -if get_option('iasl') == '' - iasl = find_program('iasl', required: false) -else - iasl = find_program(get_option('iasl'), required: true) -endif - ################## # Compiler flags # ################## @@ -224,18 +303,18 @@ qemu_common_flags = [ qemu_cflags = [] qemu_ldflags = [] -if targetos == 'darwin' +if host_os == 'darwin' # Disable attempts to use ObjectiveC features in os/object.h since they # won't work when we're compiling with gcc as a C compiler. if compiler.get_id() == 'gcc' qemu_common_flags += '-DOS_OBJECT_USE_OBJC=0' endif -elif targetos == 'sunos' +elif host_os == 'sunos' # needed for CMSG_ macros in sys/socket.h qemu_common_flags += '-D_XOPEN_SOURCE=600' # needed for TIOCWIN* defines in termios.h qemu_common_flags += '-D__EXTENSIONS__' -elif targetos == 'haiku' +elif host_os == 'haiku' qemu_common_flags += ['-DB_USE_POSITIVE_POSIX_ERRORS', '-D_BSD_SOURCE', '-fPIC'] endif @@ -319,10 +398,10 @@ ucontext_probe = ''' # For POSIX prefer ucontext, but it's not always possible. The fallback # is sigcontext. supported_backends = [] -if targetos == 'windows' +if host_os == 'windows' supported_backends += ['windows'] else - if targetos != 'darwin' and cc.links(ucontext_probe) + if host_os != 'darwin' and cc.links(ucontext_probe) supported_backends += ['ucontext'] endif supported_backends += ['sigaltstack'] @@ -393,16 +472,11 @@ endif # The combination is known as "full relro", because .got.plt is read-only too. qemu_ldflags += cc.get_supported_link_arguments('-Wl,-z,relro', '-Wl,-z,now') -if targetos == 'windows' +if host_os == 'windows' qemu_ldflags += cc.get_supported_link_arguments('-Wl,--no-seh', '-Wl,--nxcompat') qemu_ldflags += cc.get_supported_link_arguments('-Wl,--dynamicbase', '-Wl,--high-entropy-va') endif -# Exclude --warn-common with TSan to suppress warnings from the TSan libraries. -if targetos != 'sunos' and not get_option('tsan') - qemu_ldflags += cc.get_supported_link_arguments('-Wl,--warn-common') -endif - if get_option('fuzzing') # Specify a filter to only instrument code that is directly related to # virtual-devices. @@ -434,42 +508,112 @@ if get_option('fuzzing') endif endif +if get_option('cfi') + cfi_flags=[] + # Check for dependency on LTO + if not get_option('b_lto') + error('Selected Control-Flow Integrity but LTO is disabled') + endif + if enable_modules + error('Selected Control-Flow Integrity is not compatible with modules') + endif + # Check for cfi flags. CFI requires LTO so we can't use + # get_supported_arguments, but need a more complex "compiles" which allows + # custom arguments + if cc.compiles('int main () { return 0; }', name: '-fsanitize=cfi-icall', + args: ['-flto', '-fsanitize=cfi-icall'] ) + cfi_flags += '-fsanitize=cfi-icall' + else + error('-fsanitize=cfi-icall is not supported by the compiler') + endif + if cc.compiles('int main () { return 0; }', + name: '-fsanitize-cfi-icall-generalize-pointers', + args: ['-flto', '-fsanitize=cfi-icall', + '-fsanitize-cfi-icall-generalize-pointers'] ) + cfi_flags += '-fsanitize-cfi-icall-generalize-pointers' + else + error('-fsanitize-cfi-icall-generalize-pointers is not supported by the compiler') + endif + if get_option('cfi_debug') + if cc.compiles('int main () { return 0; }', + name: '-fno-sanitize-trap=cfi-icall', + args: ['-flto', '-fsanitize=cfi-icall', + '-fno-sanitize-trap=cfi-icall'] ) + cfi_flags += '-fno-sanitize-trap=cfi-icall' + else + error('-fno-sanitize-trap=cfi-icall is not supported by the compiler') + endif + endif + add_global_arguments(cfi_flags, native: false, language: all_languages) + add_global_link_arguments(cfi_flags, native: false, language: all_languages) +endif + +# Check further flags that make QEMU more robust against malicious parties + +hardening_flags = [ + # Initialize all stack variables to zero. This makes + # it harder to take advantage of uninitialized stack + # data to drive exploits + '-ftrivial-auto-var-init=zero', +] + +# Zero out registers used during a function call +# upon its return. This makes it harder to assemble +# ROP gadgets into something usable +# +# NB: Clang 17 is broken and SEGVs +# https://github.com/llvm/llvm-project/issues/75168 +# +# NB2: This clashes with the "retguard" extension of OpenBSD's Clang +# https://gitlab.com/qemu-project/qemu/-/issues/2278 +if host_os != 'openbsd' and \ + cc.compiles('extern struct { void (*cb)(void); } s; void f(void) { s.cb(); }', + name: '-fzero-call-used-regs=used-gpr', + args: ['-O2', '-fzero-call-used-regs=used-gpr']) + hardening_flags += '-fzero-call-used-regs=used-gpr' +endif + +qemu_common_flags += cc.get_supported_arguments(hardening_flags) + add_global_arguments(qemu_common_flags, native: false, language: all_languages) add_global_link_arguments(qemu_ldflags, native: false, language: all_languages) -# Collect warnings that we want to enable - +# Collect warning flags we want to set, sorted alphabetically warn_flags = [ - '-Wundef', - '-Wwrite-strings', - '-Wmissing-prototypes', - '-Wstrict-prototypes', - '-Wredundant-decls', - '-Wold-style-declaration', - '-Wold-style-definition', - '-Wtype-limits', - '-Wformat-security', - '-Wformat-y2k', - '-Winit-self', - '-Wignored-qualifiers', + # First enable interesting warnings '-Wempty-body', - '-Wnested-externs', '-Wendif-labels', '-Wexpansion-to-defined', + '-Wformat-security', + '-Wformat-y2k', + '-Wignored-qualifiers', '-Wimplicit-fallthrough=2', + '-Winit-self', '-Wmissing-format-attribute', + '-Wmissing-prototypes', + '-Wnested-externs', + '-Wold-style-declaration', + '-Wold-style-definition', + '-Wredundant-decls', + '-Wshadow=local', + '-Wstrict-prototypes', + '-Wtype-limits', + '-Wundef', + '-Wvla', + '-Wwrite-strings', + + # Then disable some undesirable warnings + '-Wno-gnu-variable-sized-type-not-at-end', '-Wno-initializer-overrides', '-Wno-missing-include-dirs', + '-Wno-psabi', '-Wno-shift-negative-value', '-Wno-string-plus-int', - '-Wno-typedef-redefinition', '-Wno-tautological-type-limit-compare', - '-Wno-psabi', - '-Wno-gnu-variable-sized-type-not-at-end', - '-Wshadow=local', + '-Wno-typedef-redefinition', ] -if targetos != 'darwin' +if host_os != 'darwin' warn_flags += ['-Wthread-safety'] endif @@ -489,105 +633,40 @@ if 'objc' in all_languages # Note sanitizer flags are not applied to Objective-C sources! add_project_arguments(objc.get_supported_arguments(warn_flags), native: false, language: 'objc') endif -if targetos == 'linux' +if host_os == 'linux' add_project_arguments('-isystem', meson.current_source_dir() / 'linux-headers', '-isystem', 'linux-headers', language: all_languages) endif add_project_arguments('-iquote', '.', - '-iquote', meson.current_source_dir(), - '-iquote', meson.current_source_dir() / 'include', - language: all_languages) - -# If a host-specific include directory exists, list that first... -host_include = meson.current_source_dir() / 'host/include/' -if fs.is_dir(host_include / host_arch) - add_project_arguments('-iquote', host_include / host_arch, - language: all_languages) -endif -# ... followed by the generic fallback. -add_project_arguments('-iquote', host_include / 'generic', - language: all_languages) - -sparse = find_program('cgcc', required: get_option('sparse')) -if sparse.found() - run_target('sparse', - command: [find_program('scripts/check_sparse.py'), - 'compile_commands.json', sparse.full_path(), '-Wbitwise', - '-Wno-transparent-union', '-Wno-old-initializer', - '-Wno-non-pointer-null']) -endif - -########################################### -# Target-specific checks and dependencies # -########################################### - -# Fuzzing -if get_option('fuzzing') and get_option('fuzzing_engine') == '' and \ - not cc.links(''' - #include - #include - int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); - int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; } - ''', - args: ['-Werror', '-fsanitize=fuzzer']) - error('Your compiler does not support -fsanitize=fuzzer') -endif - -# Tracing backends -if 'ftrace' in get_option('trace_backends') and targetos != 'linux' - error('ftrace is supported only on Linux') -endif -if 'syslog' in get_option('trace_backends') and not cc.compiles(''' - #include - int main(void) { - openlog("qemu", LOG_PID, LOG_DAEMON); - syslog(LOG_INFO, "configure"); - return 0; - }''') - error('syslog is not supported on this system') -endif - -# Miscellaneous Linux-only features -get_option('mpath') \ - .require(targetos == 'linux', error_message: 'Multipath is supported only on Linux') - -multiprocess_allowed = get_option('multiprocess') \ - .require(targetos == 'linux', error_message: 'Multiprocess QEMU is supported only on Linux') \ - .allowed() - -vfio_user_server_allowed = get_option('vfio_user_server') \ - .require(targetos == 'linux', error_message: 'vfio-user server is supported only on Linux') \ - .allowed() - -have_tpm = get_option('tpm') \ - .require(targetos != 'windows', error_message: 'TPM emulation only available on POSIX systems') \ - .allowed() - -# vhost -have_vhost_user = get_option('vhost_user') \ - .disable_auto_if(targetos != 'linux') \ - .require(targetos != 'windows', - error_message: 'vhost-user is not available on Windows').allowed() -have_vhost_vdpa = get_option('vhost_vdpa') \ - .require(targetos == 'linux', - error_message: 'vhost-vdpa is only available on Linux').allowed() -have_vhost_kernel = get_option('vhost_kernel') \ - .require(targetos == 'linux', - error_message: 'vhost-kernel is only available on Linux').allowed() -have_vhost_user_crypto = get_option('vhost_crypto') \ - .require(have_vhost_user, - error_message: 'vhost-crypto requires vhost-user to be enabled').allowed() + '-iquote', meson.current_source_dir(), + '-iquote', meson.current_source_dir() / 'include', + language: all_languages) -have_vhost = have_vhost_user or have_vhost_vdpa or have_vhost_kernel +# If a host-specific include directory exists, list that first... +host_include = meson.current_source_dir() / 'host/include/' +if fs.is_dir(host_include / host_arch) + add_project_arguments('-iquote', host_include / host_arch, + language: all_languages) +endif +# ... followed by the generic fallback. +add_project_arguments('-iquote', host_include / 'generic', + language: all_languages) -have_vhost_net_user = have_vhost_user and get_option('vhost_net').allowed() -have_vhost_net_vdpa = have_vhost_vdpa and get_option('vhost_net').allowed() -have_vhost_net_kernel = have_vhost_kernel and get_option('vhost_net').allowed() -have_vhost_net = have_vhost_net_kernel or have_vhost_net_user or have_vhost_net_vdpa +sparse = find_program('cgcc', required: get_option('sparse')) +if sparse.found() + run_target('sparse', + command: [find_program('scripts/check_sparse.py'), + 'compile_commands.json', sparse.full_path(), '-Wbitwise', + '-Wno-transparent-union', '-Wno-old-initializer', + '-Wno-non-pointer-null']) +endif + +##################################### +# Host-specific libraries and flags # +##################################### -# Target-specific libraries and flags libm = cc.find_library('m', required: false) threads = dependency('threads') util = cc.find_library('util', required: false) @@ -597,13 +676,11 @@ version_res = [] coref = [] iokit = [] emulator_link_args = [] -nvmm =not_found -hvf = not_found midl = not_found widl = not_found pathcch = not_found host_dsosuf = '.so' -if targetos == 'windows' +if host_os == 'windows' midl = find_program('midl', required: false) widl = find_program('widl', required: false) pathcch = cc.find_library('pathcch') @@ -615,31 +692,34 @@ if targetos == 'windows' depend_files: files('pc-bios/qemu-nsis.ico'), include_directories: include_directories('.')) host_dsosuf = '.dll' -elif targetos == 'darwin' +elif host_os == 'darwin' coref = dependency('appleframeworks', modules: 'CoreFoundation') iokit = dependency('appleframeworks', modules: 'IOKit', required: false) host_dsosuf = '.dylib' -elif targetos == 'sunos' +elif host_os == 'sunos' socket = [cc.find_library('socket'), cc.find_library('nsl'), cc.find_library('resolv')] -elif targetos == 'haiku' +elif host_os == 'haiku' socket = [cc.find_library('posix_error_mapper'), cc.find_library('network'), cc.find_library('bsd')] -elif targetos == 'openbsd' +elif host_os == 'openbsd' if get_option('tcg').allowed() and target_dirs.length() > 0 # Disable OpenBSD W^X if available emulator_link_args = cc.get_supported_link_arguments('-Wl,-z,wxneeded') endif endif -# Target-specific configuration of accelerators +############################################### +# Host-specific configuration of accelerators # +############################################### + accelerators = [] -if get_option('kvm').allowed() and targetos == 'linux' +if get_option('kvm').allowed() and host_os == 'linux' accelerators += 'CONFIG_KVM' endif -if get_option('whpx').allowed() and targetos == 'windows' +if get_option('whpx').allowed() and host_os == 'windows' if get_option('whpx').enabled() and host_machine.cpu() != 'x86_64' error('WHPX requires 64-bit host') elif cc.has_header('winhvplatform.h', required: get_option('whpx')) and \ @@ -647,6 +727,8 @@ if get_option('whpx').allowed() and targetos == 'windows' accelerators += 'CONFIG_WHPX' endif endif + +hvf = not_found if get_option('hvf').allowed() hvf = dependency('appleframeworks', modules: 'Hypervisor', required: get_option('hvf')) @@ -654,7 +736,9 @@ if get_option('hvf').allowed() accelerators += 'CONFIG_HVF' endif endif -if targetos == 'netbsd' + +nvmm = not_found +if host_os == 'netbsd' nvmm = cc.find_library('nvmm', required: get_option('nvmm')) if nvmm.found() accelerators += 'CONFIG_NVMM' @@ -701,6 +785,85 @@ if 'CONFIG_WHPX' not in accelerators and get_option('whpx').enabled() error('WHPX not available on this platform') endif +xen = not_found +if get_option('xen').enabled() or (get_option('xen').auto() and have_system) + xencontrol = dependency('xencontrol', required: false, + method: 'pkg-config') + if xencontrol.found() + xen_pc = declare_dependency(version: xencontrol.version(), + dependencies: [ + xencontrol, + # disabler: true makes xen_pc.found() return false if any is not found + dependency('xenstore', required: false, + method: 'pkg-config', + disabler: true), + dependency('xenforeignmemory', required: false, + method: 'pkg-config', + disabler: true), + dependency('xengnttab', required: false, + method: 'pkg-config', + disabler: true), + dependency('xenevtchn', required: false, + method: 'pkg-config', + disabler: true), + dependency('xendevicemodel', required: false, + method: 'pkg-config', + disabler: true), + # optional, no "disabler: true" + dependency('xentoolcore', required: false, + method: 'pkg-config')]) + if xen_pc.found() + xen = xen_pc + endif + endif + if not xen.found() + xen_tests = [ '4.11.0', '4.10.0', '4.9.0', '4.8.0', '4.7.1' ] + xen_libs = { + '4.11.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ], + '4.10.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ], + '4.9.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], + '4.8.0': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], + '4.7.1': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], + } + xen_deps = {} + foreach ver: xen_tests + # cache the various library tests to avoid polluting the logs + xen_test_deps = [] + foreach l: xen_libs[ver] + if l not in xen_deps + xen_deps += { l: cc.find_library(l, required: false) } + endif + xen_test_deps += xen_deps[l] + endforeach + + # Use -D to pick just one of the test programs in scripts/xen-detect.c + xen_version = ver.split('.') + xen_ctrl_version = xen_version[0] + \ + ('0' + xen_version[1]).substring(-2) + \ + ('0' + xen_version[2]).substring(-2) + if cc.links(files('scripts/xen-detect.c'), + args: '-DCONFIG_XEN_CTRL_INTERFACE_VERSION=' + xen_ctrl_version, + dependencies: xen_test_deps) + xen = declare_dependency(version: ver, dependencies: xen_test_deps) + break + endif + endforeach + endif + if xen.found() + accelerators += 'CONFIG_XEN' + elif get_option('xen').enabled() + error('could not compile and link Xen test program') + endif +endif +have_xen_pci_passthrough = get_option('xen_pci_passthrough') \ + .require(xen.found(), + error_message: 'Xen PCI passthrough requested but Xen not enabled') \ + .require(host_os == 'linux', + error_message: 'Xen PCI passthrough not available on this platform') \ + .require(cpu == 'x86' or cpu == 'x86_64', + error_message: 'Xen PCI passthrough not available on this platform') \ + .allowed() + ################ # Dependencies # ################ @@ -723,7 +886,7 @@ endif # This workaround is required due to a bug in pkg-config file for glib as it # doesn't define GLIB_STATIC_COMPILATION for pkg-config --static -if targetos == 'windows' and get_option('prefer_static') +if host_os == 'windows' and get_option('prefer_static') glib_cflags += ['-DGLIB_STATIC_COMPILATION'] endif @@ -984,12 +1147,12 @@ if vde.found() and not cc.links(''' endif pulse = not_found -if not get_option('pa').auto() or (targetos == 'linux' and have_system) +if not get_option('pa').auto() or (host_os == 'linux' and have_system) pulse = dependency('libpulse', required: get_option('pa'), method: 'pkg-config') endif alsa = not_found -if not get_option('alsa').auto() or (targetos == 'linux' and have_system) +if not get_option('alsa').auto() or (host_os == 'linux' and have_system) alsa = dependency('alsa', required: get_option('alsa'), method: 'pkg-config') endif @@ -999,7 +1162,7 @@ if not get_option('jack').auto() or have_system method: 'pkg-config') endif pipewire = not_found -if not get_option('pipewire').auto() or (targetos == 'linux' and have_system) +if not get_option('pipewire').auto() or (host_os == 'linux' and have_system) pipewire = dependency('libpipewire-0.3', version: '>=0.3.60', required: get_option('pipewire'), method: 'pkg-config') @@ -1044,17 +1207,11 @@ if not get_option('zstd').auto() or have_block endif virgl = not_found -have_vhost_user_gpu = have_tools and targetos == 'linux' and pixman.found() +have_vhost_user_gpu = have_tools and host_os == 'linux' and pixman.found() if not get_option('virglrenderer').auto() or have_system or have_vhost_user_gpu virgl = dependency('virglrenderer', method: 'pkg-config', required: get_option('virglrenderer')) - if virgl.found() - config_host_data.set('HAVE_VIRGL_D3D_INFO_EXT', - cc.has_member('struct virgl_renderer_resource_info_ext', 'd3d_tex2d', - prefix: '#include ', - dependencies: virgl)) - endif endif rutabaga = not_found if not get_option('rutabaga_gfx').auto() or have_system or have_vhost_user_gpu @@ -1075,7 +1232,7 @@ if not get_option('curl').auto() or have_block required: get_option('curl')) endif libudev = not_found -if targetos == 'linux' and (have_system or have_tools) +if host_os == 'linux' and (have_system or have_tools) libudev = dependency('libudev', method: 'pkg-config', required: get_option('libudev')) @@ -1106,7 +1263,7 @@ endif mpathlibs = [libudev] mpathpersist = not_found -if targetos == 'linux' and have_tools and get_option('mpath').allowed() +if host_os == 'linux' and have_tools and get_option('mpath').allowed() mpath_test_source = ''' #include #include @@ -1177,7 +1334,7 @@ if have_system and get_option('curses').allowed() return 0; }''' - curses_dep_list = targetos == 'windows' ? ['ncurses', 'ncursesw'] : ['ncursesw'] + curses_dep_list = host_os == 'windows' ? ['ncurses', 'ncursesw'] : ['ncursesw'] curses = dependency(curses_dep_list, required: false, method: 'pkg-config') @@ -1194,13 +1351,13 @@ if have_system and get_option('curses').allowed() endif if not curses.found() has_curses_h = cc.has_header('curses.h', args: curses_compile_args) - if targetos != 'windows' and not has_curses_h + if host_os != 'windows' and not has_curses_h message('Trying with /usr/include/ncursesw') curses_compile_args += ['-I/usr/include/ncursesw'] has_curses_h = cc.has_header('curses.h', args: curses_compile_args) endif if has_curses_h - curses_libname_list = (targetos == 'windows' ? ['pdcurses'] : ['ncursesw', 'cursesw']) + curses_libname_list = (host_os == 'windows' ? ['pdcurses'] : ['ncursesw', 'cursesw']) foreach curses_libname : curses_libname_list libcurses = cc.find_library(curses_libname, required: false) @@ -1423,7 +1580,7 @@ oss = not_found if get_option('oss').allowed() and have_system if not cc.has_header('sys/soundcard.h') # not found - elif targetos == 'netbsd' + elif host_os == 'netbsd' oss = cc.find_library('ossaudio', required: get_option('oss')) else oss = declare_dependency() @@ -1436,7 +1593,7 @@ if get_option('oss').allowed() and have_system endif endif dsound = not_found -if not get_option('dsound').auto() or (targetos == 'windows' and have_system) +if not get_option('dsound').auto() or (host_os == 'windows' and have_system) if cc.has_header('dsound.h') dsound = declare_dependency(link_args: ['-lole32', '-ldxguid']) endif @@ -1449,7 +1606,7 @@ if not get_option('dsound').auto() or (targetos == 'windows' and have_system) endif coreaudio = not_found -if not get_option('coreaudio').auto() or (targetos == 'darwin' and have_system) +if not get_option('coreaudio').auto() or (host_os == 'darwin' and have_system) coreaudio = dependency('appleframeworks', modules: 'CoreAudio', required: get_option('coreaudio')) endif @@ -1507,6 +1664,7 @@ endif gcrypt = not_found nettle = not_found hogweed = not_found +crypto_sm4 = not_found xts = 'none' if get_option('nettle').enabled() and get_option('gcrypt').enabled() @@ -1532,6 +1690,17 @@ if not gnutls_crypto.found() cc.find_library('gpg-error', required: true)], version: gcrypt.version()) endif + crypto_sm4 = gcrypt + # SM4 ALG is available in libgcrypt >= 1.9 + if gcrypt.found() and not cc.links(''' + #include + int main(void) { + gcry_cipher_hd_t handler; + gcry_cipher_open(&handler, GCRY_CIPHER_SM4, GCRY_CIPHER_MODE_ECB, 0); + return 0; + }''', dependencies: gcrypt) + crypto_sm4 = not_found + endif endif if (not get_option('nettle').auto() or have_system) and not gcrypt.found() nettle = dependency('nettle', version: '>=3.4', @@ -1540,6 +1709,37 @@ if not gnutls_crypto.found() if nettle.found() and not cc.has_header('nettle/xts.h', dependencies: nettle) xts = 'private' endif + crypto_sm4 = nettle + # SM4 ALG is available in nettle >= 3.9 + if nettle.found() and not cc.links(''' + #include + int main(void) { + struct sm4_ctx ctx; + unsigned char key[16] = {0}; + sm4_set_encrypt_key(&ctx, key); + return 0; + }''', dependencies: nettle) + crypto_sm4 = not_found + endif + endif +endif + +capstone = not_found +if not get_option('capstone').auto() or have_system or have_user + capstone = dependency('capstone', version: '>=3.0.5', + method: 'pkg-config', + required: get_option('capstone')) + + # Some versions of capstone have broken pkg-config file + # that reports a wrong -I path, causing the #include to + # fail later. If the system has such a broken version + # do not use it. + if capstone.found() and not cc.compiles('#include ', + dependencies: [capstone]) + capstone = not_found + if get_option('capstone').enabled() + error('capstone requested, but it does not appear to work') + endif endif endif @@ -1639,143 +1839,63 @@ if not get_option('snappy').auto() or have_system required: get_option('snappy')) endif if snappy.found() and not cc.links(''' - #include - int main(void) { snappy_max_compressed_length(4096); return 0; }''', dependencies: snappy) - snappy = not_found - if get_option('snappy').enabled() - error('could not link libsnappy') - else - warning('could not link libsnappy, disabling') - endif -endif - -lzo = not_found -if not get_option('lzo').auto() or have_system - lzo = cc.find_library('lzo2', has_headers: ['lzo/lzo1x.h'], - required: get_option('lzo')) -endif -if lzo.found() and not cc.links(''' - #include - int main(void) { lzo_version(); return 0; }''', dependencies: lzo) - lzo = not_found - if get_option('lzo').enabled() - error('could not link liblzo2') - else - warning('could not link liblzo2, disabling') - endif -endif - -numa = not_found -if not get_option('numa').auto() or have_system or have_tools - numa = cc.find_library('numa', has_headers: ['numa.h'], - required: get_option('numa')) -endif -if numa.found() and not cc.links(''' - #include - int main(void) { return numa_available(); } - ''', dependencies: numa) - numa = not_found - if get_option('numa').enabled() - error('could not link numa') - else - warning('could not link numa, disabling') - endif -endif - -rdma = not_found -if not get_option('rdma').auto() or have_system - libumad = cc.find_library('ibumad', required: get_option('rdma')) - rdma_libs = [cc.find_library('rdmacm', has_headers: ['rdma/rdma_cma.h'], - required: get_option('rdma')), - cc.find_library('ibverbs', required: get_option('rdma')), - libumad] - rdma = declare_dependency(dependencies: rdma_libs) - foreach lib: rdma_libs - if not lib.found() - rdma = not_found - endif - endforeach -endif - -xen = not_found -if get_option('xen').enabled() or (get_option('xen').auto() and have_system) - xencontrol = dependency('xencontrol', required: false, - method: 'pkg-config') - if xencontrol.found() - xen_pc = declare_dependency(version: xencontrol.version(), - dependencies: [ - xencontrol, - # disabler: true makes xen_pc.found() return false if any is not found - dependency('xenstore', required: false, - method: 'pkg-config', - disabler: true), - dependency('xenforeignmemory', required: false, - method: 'pkg-config', - disabler: true), - dependency('xengnttab', required: false, - method: 'pkg-config', - disabler: true), - dependency('xenevtchn', required: false, - method: 'pkg-config', - disabler: true), - dependency('xendevicemodel', required: false, - method: 'pkg-config', - disabler: true), - # optional, no "disabler: true" - dependency('xentoolcore', required: false, - method: 'pkg-config')]) - if xen_pc.found() - xen = xen_pc - endif - endif - if not xen.found() - xen_tests = [ '4.11.0', '4.10.0', '4.9.0', '4.8.0', '4.7.1' ] - xen_libs = { - '4.11.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ], - '4.10.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ], - '4.9.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], - '4.8.0': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], - '4.7.1': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], - } - xen_deps = {} - foreach ver: xen_tests - # cache the various library tests to avoid polluting the logs - xen_test_deps = [] - foreach l: xen_libs[ver] - if l not in xen_deps - xen_deps += { l: cc.find_library(l, required: false) } - endif - xen_test_deps += xen_deps[l] - endforeach - - # Use -D to pick just one of the test programs in scripts/xen-detect.c - xen_version = ver.split('.') - xen_ctrl_version = xen_version[0] + \ - ('0' + xen_version[1]).substring(-2) + \ - ('0' + xen_version[2]).substring(-2) - if cc.links(files('scripts/xen-detect.c'), - args: '-DCONFIG_XEN_CTRL_INTERFACE_VERSION=' + xen_ctrl_version, - dependencies: xen_test_deps) - xen = declare_dependency(version: ver, dependencies: xen_test_deps) - break - endif - endforeach + #include + int main(void) { snappy_max_compressed_length(4096); return 0; }''', dependencies: snappy) + snappy = not_found + if get_option('snappy').enabled() + error('could not link libsnappy') + else + warning('could not link libsnappy, disabling') endif - if xen.found() - accelerators += 'CONFIG_XEN' - elif get_option('xen').enabled() - error('could not compile and link Xen test program') +endif + +lzo = not_found +if not get_option('lzo').auto() or have_system + lzo = cc.find_library('lzo2', has_headers: ['lzo/lzo1x.h'], + required: get_option('lzo')) +endif +if lzo.found() and not cc.links(''' + #include + int main(void) { lzo_version(); return 0; }''', dependencies: lzo) + lzo = not_found + if get_option('lzo').enabled() + error('could not link liblzo2') + else + warning('could not link liblzo2, disabling') + endif +endif + +numa = not_found +if not get_option('numa').auto() or have_system or have_tools + numa = cc.find_library('numa', has_headers: ['numa.h'], + required: get_option('numa')) +endif +if numa.found() and not cc.links(''' + #include + int main(void) { return numa_available(); } + ''', dependencies: numa) + numa = not_found + if get_option('numa').enabled() + error('could not link numa') + else + warning('could not link numa, disabling') endif endif -have_xen_pci_passthrough = get_option('xen_pci_passthrough') \ - .require(xen.found(), - error_message: 'Xen PCI passthrough requested but Xen not enabled') \ - .require(targetos == 'linux', - error_message: 'Xen PCI passthrough not available on this platform') \ - .require(cpu == 'x86' or cpu == 'x86_64', - error_message: 'Xen PCI passthrough not available on this platform') \ - .allowed() +rdma = not_found +if not get_option('rdma').auto() or have_system + libumad = cc.find_library('ibumad', required: get_option('rdma')) + rdma_libs = [cc.find_library('rdmacm', has_headers: ['rdma/rdma_cma.h'], + required: get_option('rdma')), + cc.find_library('ibverbs', required: get_option('rdma')), + libumad] + rdma = declare_dependency(dependencies: rdma_libs) + foreach lib: rdma_libs + if not lib.found() + rdma = not_found + endif + endforeach +endif cacard = not_found if not get_option('smartcard').auto() or have_system @@ -1783,12 +1903,12 @@ if not get_option('smartcard').auto() or have_system version: '>=2.5.1', method: 'pkg-config') endif u2f = not_found -if have_system +if not get_option('u2f').auto() or have_system u2f = dependency('u2f-emu', required: get_option('u2f'), method: 'pkg-config') endif canokey = not_found -if have_system +if not get_option('canokey').auto() or have_system canokey = dependency('canokey-qemu', required: get_option('canokey'), method: 'pkg-config') endif @@ -1865,7 +1985,7 @@ has_statx = cc.has_header_symbol('sys/stat.h', 'STATX_BASIC_STATS', prefix: gnu_ has_statx_mnt_id = cc.has_header_symbol('sys/stat.h', 'STATX_MNT_ID', prefix: gnu_source_prefix) have_vhost_user_blk_server = get_option('vhost_user_blk_server') \ - .require(targetos == 'linux', + .require(host_os == 'linux', error_message: 'vhost_user_blk_server requires linux') \ .require(have_vhost_user, error_message: 'vhost_user_blk_server requires vhost-user support') \ @@ -1893,18 +2013,18 @@ if get_option('fuse_lseek').allowed() endif endif -have_libvduse = (targetos == 'linux') +have_libvduse = (host_os == 'linux') if get_option('libvduse').enabled() - if targetos != 'linux' + if host_os != 'linux' error('libvduse requires linux') endif elif get_option('libvduse').disabled() have_libvduse = false endif -have_vduse_blk_export = (have_libvduse and targetos == 'linux') +have_vduse_blk_export = (have_libvduse and host_os == 'linux') if get_option('vduse_blk_export').enabled() - if targetos != 'linux' + if host_os != 'linux' error('vduse_blk_export requires linux') elif not have_libvduse error('vduse_blk_export requires libvduse support') @@ -1914,19 +2034,23 @@ elif get_option('vduse_blk_export').disabled() endif # libbpf -libbpf = dependency('libbpf', required: get_option('bpf'), method: 'pkg-config') +bpf_version = '1.1.0' +libbpf = dependency('libbpf', version: '>=' + bpf_version, required: get_option('bpf'), method: 'pkg-config') if libbpf.found() and not cc.links(''' #include + #include int main(void) { + // check flag availability + int flag = BPF_F_MMAPABLE; bpf_object__destroy_skeleton(NULL); return 0; }''', dependencies: libbpf) libbpf = not_found if get_option('bpf').enabled() - error('libbpf skeleton test failed') + error('libbpf skeleton/mmaping test failed') else - warning('libbpf skeleton test failed, disabling') + warning('libbpf skeleton/mmaping test failed, disabling') endif endif @@ -1950,6 +2074,8 @@ endif # config-host.h # ################# +config_host_data = configuration_data() + audio_drivers_selected = [] if have_system audio_drivers_available = { @@ -1970,7 +2096,7 @@ if have_system # Default to native drivers first, OSS second, SDL third audio_drivers_priority = \ [ 'pa', 'coreaudio', 'dsound', 'sndio', 'oss' ] + \ - (targetos == 'linux' ? [] : [ 'sdl' ]) + (host_os == 'linux' ? [] : [ 'sdl' ]) audio_drivers_default = [] foreach k: audio_drivers_priority if audio_drivers_available[k] @@ -1991,47 +2117,7 @@ endif config_host_data.set('CONFIG_AUDIO_DRIVERS', '"' + '", "'.join(audio_drivers_selected) + '", ') -if get_option('cfi') - cfi_flags=[] - # Check for dependency on LTO - if not get_option('b_lto') - error('Selected Control-Flow Integrity but LTO is disabled') - endif - if enable_modules - error('Selected Control-Flow Integrity is not compatible with modules') - endif - # Check for cfi flags. CFI requires LTO so we can't use - # get_supported_arguments, but need a more complex "compiles" which allows - # custom arguments - if cc.compiles('int main () { return 0; }', name: '-fsanitize=cfi-icall', - args: ['-flto', '-fsanitize=cfi-icall'] ) - cfi_flags += '-fsanitize=cfi-icall' - else - error('-fsanitize=cfi-icall is not supported by the compiler') - endif - if cc.compiles('int main () { return 0; }', - name: '-fsanitize-cfi-icall-generalize-pointers', - args: ['-flto', '-fsanitize=cfi-icall', - '-fsanitize-cfi-icall-generalize-pointers'] ) - cfi_flags += '-fsanitize-cfi-icall-generalize-pointers' - else - error('-fsanitize-cfi-icall-generalize-pointers is not supported by the compiler') - endif - if get_option('cfi_debug') - if cc.compiles('int main () { return 0; }', - name: '-fno-sanitize-trap=cfi-icall', - args: ['-flto', '-fsanitize=cfi-icall', - '-fno-sanitize-trap=cfi-icall'] ) - cfi_flags += '-fno-sanitize-trap=cfi-icall' - else - error('-fno-sanitize-trap=cfi-icall is not supported by the compiler') - endif - endif - add_global_arguments(cfi_flags, native: false, language: all_languages) - add_global_link_arguments(cfi_flags, native: false, language: all_languages) -endif - -have_host_block_device = (targetos != 'darwin' or +have_host_block_device = (host_os != 'darwin' or cc.has_header('IOKit/storage/IOMedia.h')) dbus_display = get_option('dbus_display') \ @@ -2042,17 +2128,17 @@ dbus_display = get_option('dbus_display') \ .allowed() have_virtfs = get_option('virtfs') \ - .require(targetos == 'linux' or targetos == 'darwin', + .require(host_os == 'linux' or host_os == 'darwin', error_message: 'virtio-9p (virtfs) requires Linux or macOS') \ - .require(targetos == 'linux' or cc.has_function('pthread_fchdir_np'), + .require(host_os == 'linux' or cc.has_function('pthread_fchdir_np'), error_message: 'virtio-9p (virtfs) on macOS requires the presence of pthread_fchdir_np') \ - .require(targetos == 'darwin' or libattr.found(), + .require(host_os == 'darwin' or libattr.found(), error_message: 'virtio-9p (virtfs) on Linux requires libattr-devel') \ .disable_auto_if(not have_tools and not have_system) \ .allowed() have_virtfs_proxy_helper = get_option('virtfs_proxy_helper') \ - .require(targetos != 'darwin', error_message: 'the virtfs proxy helper is incompatible with macOS') \ + .require(host_os != 'darwin', error_message: 'the virtfs proxy helper is incompatible with macOS') \ .require(have_virtfs, error_message: 'the virtfs proxy helper requires that virtfs is enabled') \ .disable_auto_if(not have_tools) \ .require(libcap_ng.found(), error_message: 'the virtfs proxy helper requires libcap-ng') \ @@ -2107,18 +2193,24 @@ if enable_modules endif have_slirp_smbd = get_option('slirp_smbd') \ - .require(targetos != 'windows', error_message: 'Host smbd not supported on this platform.') \ + .require(host_os != 'windows', error_message: 'Host smbd not supported on this platform.') \ .allowed() if have_slirp_smbd smbd_path = get_option('smbd') if smbd_path == '' - smbd_path = (targetos == 'sunos' ? '/usr/sfw/sbin/smbd' : '/usr/sbin/smbd') + smbd_path = (host_os == 'sunos' ? '/usr/sfw/sbin/smbd' : '/usr/sbin/smbd') endif config_host_data.set_quoted('CONFIG_SMBD_COMMAND', smbd_path) endif config_host_data.set('HOST_' + host_arch.to_upper(), 1) +kvm_targets_c = '""' +if get_option('kvm').allowed() and host_os == 'linux' + kvm_targets_c = '"' + '" ,"'.join(kvm_targets) + '"' +endif +config_host_data.set('CONFIG_KVM_TARGETS', kvm_targets_c) + if get_option('module_upgrades') and not enable_modules error('Cannot enable module-upgrades as modules are not enabled') endif @@ -2127,15 +2219,16 @@ config_host_data.set('CONFIG_MODULE_UPGRADES', get_option('module_upgrades')) config_host_data.set('CONFIG_ATTR', libattr.found()) config_host_data.set('CONFIG_BDRV_WHITELIST_TOOLS', get_option('block_drv_whitelist_in_tools')) config_host_data.set('CONFIG_BRLAPI', brlapi.found()) -config_host_data.set('CONFIG_BSD', targetos in bsd_oses) +config_host_data.set('CONFIG_BSD', host_os in bsd_oses) +config_host_data.set('CONFIG_CAPSTONE', capstone.found()) config_host_data.set('CONFIG_COCOA', cocoa.found()) -config_host_data.set('CONFIG_DARWIN', targetos == 'darwin') +config_host_data.set('CONFIG_DARWIN', host_os == 'darwin') config_host_data.set('CONFIG_FUZZ', get_option('fuzzing')) config_host_data.set('CONFIG_GCOV', get_option('b_coverage')) config_host_data.set('CONFIG_LIBUDEV', libudev.found()) -config_host_data.set('CONFIG_LINUX', targetos == 'linux') -config_host_data.set('CONFIG_POSIX', targetos != 'windows') -config_host_data.set('CONFIG_WIN32', targetos == 'windows') +config_host_data.set('CONFIG_LINUX', host_os == 'linux') +config_host_data.set('CONFIG_POSIX', host_os != 'windows') +config_host_data.set('CONFIG_WIN32', host_os == 'windows') config_host_data.set('CONFIG_LZO', lzo.found()) config_host_data.set('CONFIG_MPATH', mpathpersist.found()) config_host_data.set('CONFIG_BLKIO', blkio.found()) @@ -2191,8 +2284,9 @@ if seccomp.found() config_host_data.set('CONFIG_SECCOMP_SYSRAWRC', seccomp_has_sysrawrc) endif config_host_data.set('CONFIG_PIXMAN', pixman.found()) +config_host_data.set('CONFIG_SLIRP', slirp.found()) config_host_data.set('CONFIG_SNAPPY', snappy.found()) -config_host_data.set('CONFIG_SOLARIS', targetos == 'sunos') +config_host_data.set('CONFIG_SOLARIS', host_os == 'sunos') if get_option('tcg').allowed() config_host_data.set('CONFIG_TCG', 1) config_host_data.set('CONFIG_TCG_INTERPRETER', tcg_arch == 'tci') @@ -2216,6 +2310,12 @@ config_host_data.set('CONFIG_PNG', png.found()) config_host_data.set('CONFIG_VNC', vnc.found()) config_host_data.set('CONFIG_VNC_JPEG', jpeg.found()) config_host_data.set('CONFIG_VNC_SASL', sasl.found()) +if virgl.found() + config_host_data.set('HAVE_VIRGL_D3D_INFO_EXT', + cc.has_member('struct virgl_renderer_resource_info_ext', 'd3d_tex2d', + prefix: '#include ', + dependencies: virgl)) +endif config_host_data.set('CONFIG_VIRTFS', have_virtfs) config_host_data.set('CONFIG_VTE', vte.found()) config_host_data.set('CONFIG_XKBCOMMON', xkbcommon.found()) @@ -2226,6 +2326,7 @@ config_host_data.set('CONFIG_GNUTLS_CRYPTO', gnutls_crypto.found()) config_host_data.set('CONFIG_TASN1', tasn1.found()) config_host_data.set('CONFIG_GCRYPT', gcrypt.found()) config_host_data.set('CONFIG_NETTLE', nettle.found()) +config_host_data.set('CONFIG_CRYPTO_SM4', crypto_sm4.found()) config_host_data.set('CONFIG_HOGWEED', hogweed.found()) config_host_data.set('CONFIG_QEMU_PRIVATE_XTS', xts == 'private') config_host_data.set('CONFIG_MALLOC_TRIM', has_malloc_trim) @@ -2287,7 +2388,7 @@ config_host_data.set('HAVE_PTY_H', cc.has_header('pty.h')) config_host_data.set('HAVE_SYS_DISK_H', cc.has_header('sys/disk.h')) config_host_data.set('HAVE_SYS_IOCCOM_H', cc.has_header('sys/ioccom.h')) config_host_data.set('HAVE_SYS_KCOV_H', cc.has_header('sys/kcov.h')) -if targetos == 'windows' +if host_os == 'windows' config_host_data.set('HAVE_AFUNIX_H', cc.has_header('afunix.h')) endif @@ -2320,7 +2421,6 @@ config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', glib_has_gslice) config_host_data.set('HAVE_OPENPTY', cc.has_function('openpty', dependencies: util)) config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul')) config_host_data.set('HAVE_SYSTEM_FUNCTION', cc.has_function('system', prefix: '#include ')) -config_host_data.set('HAVE_GETLOADAVG_FUNCTION', cc.has_function('getloadavg', prefix: '#include ')) if rbd.found() config_host_data.set('HAVE_RBD_NAMESPACE_EXISTS', cc.has_function('rbd_namespace_exists', @@ -2346,6 +2446,22 @@ else endif config_host_data.set('CONFIG_ASAN_IFACE_FIBER', have_asan_fiber) +have_inotify_init = cc.has_header_symbol('sys/inotify.h', 'inotify_init') +have_inotify_init1 = cc.has_header_symbol('sys/inotify.h', 'inotify_init1') +inotify = not_found +if (have_inotify_init or have_inotify_init1) and host_os == 'freebsd' + # libinotify-kqueue + inotify = cc.find_library('inotify') + if have_inotify_init + have_inotify_init = inotify.found() + endif + if have_inotify_init1 + have_inotify_init1 = inotify.found() + endif +endif +config_host_data.set('CONFIG_INOTIFY', have_inotify_init) +config_host_data.set('CONFIG_INOTIFY1', have_inotify_init1) + # has_header_symbol config_host_data.set('CONFIG_BLKZONED', cc.has_header_symbol('linux/blkzoned.h', 'BLKOPENZONE')) @@ -2362,10 +2478,6 @@ config_host_data.set('CONFIG_FIEMAP', config_host_data.set('CONFIG_GETRANDOM', cc.has_function('getrandom') and cc.has_header_symbol('sys/random.h', 'GRND_NONBLOCK')) -config_host_data.set('CONFIG_INOTIFY', - cc.has_header_symbol('sys/inotify.h', 'inotify_init')) -config_host_data.set('CONFIG_INOTIFY1', - cc.has_header_symbol('sys/inotify.h', 'inotify_init1')) config_host_data.set('CONFIG_PRCTL_PR_SET_TIMERSLACK', cc.has_header_symbol('sys/prctl.h', 'PR_SET_TIMERSLACK')) config_host_data.set('CONFIG_RTNETLINK', @@ -2669,7 +2781,7 @@ config_host_data.set('CONFIG_USBFS', have_linux_user and cc.compiles(''' int main(void) { return 0; }''')) have_keyring = get_option('keyring') \ - .require(targetos == 'linux', error_message: 'keyring is only available on Linux') \ + .require(host_os == 'linux', error_message: 'keyring is only available on Linux') \ .require(cc.compiles(''' #include #include @@ -2778,9 +2890,9 @@ endif if get_option('membarrier').disabled() have_membarrier = false -elif targetos == 'windows' +elif host_os == 'windows' have_membarrier = true -elif targetos == 'linux' +elif host_os == 'linux' have_membarrier = cc.compiles(''' #include #include @@ -2817,7 +2929,7 @@ config_host_data.set('CONFIG_AF_VSOCK', cc.has_header_symbol( have_vss = false have_vss_sdk = false # old xp/2003 SDK -if targetos == 'windows' and 'cpp' in all_languages +if host_os == 'windows' and 'cpp' in all_languages have_vss = cxx.compiles(''' #define __MIDL_user_allocate_free_DEFINED__ #include @@ -2828,7 +2940,7 @@ config_host_data.set('HAVE_VSS_SDK', have_vss_sdk) # Older versions of MinGW do not import _lock_file and _unlock_file properly. # This was fixed for v6.0.0 with commit b48e3ac8969d. -if targetos == 'windows' +if host_os == 'windows' config_host_data.set('HAVE__LOCK_FILE', cc.links(''' #include int main(void) { @@ -2838,7 +2950,7 @@ if targetos == 'windows' }''', name: '_lock_file and _unlock_file')) endif -if targetos == 'windows' +if host_os == 'windows' mingw_has_setjmp_longjmp = cc.links(''' #include int main(void) { @@ -2864,21 +2976,9 @@ endif ######################## minikconf = find_program('scripts/minikconf.py') -config_targetos = { - (targetos == 'windows' ? 'CONFIG_WIN32' : 'CONFIG_POSIX'): 'y' -} -if targetos == 'darwin' - config_targetos += {'CONFIG_DARWIN': 'y'} -elif targetos == 'linux' - config_targetos += {'CONFIG_LINUX': 'y'} -endif -if targetos in bsd_oses - config_targetos += {'CONFIG_BSD': 'y'} -endif -config_all = {} +config_all_accel = {} config_all_devices = {} -config_all_disas = {} config_devices_mak_list = [] config_devices_h = {} config_target_h = {} @@ -2920,7 +3020,7 @@ host_kconfig = \ (have_vhost_vdpa ? ['CONFIG_VHOST_VDPA=y'] : []) + \ (have_vhost_kernel ? ['CONFIG_VHOST_KERNEL=y'] : []) + \ (have_virtfs ? ['CONFIG_VIRTFS=y'] : []) + \ - (targetos == 'linux' ? ['CONFIG_LINUX=y'] : []) + \ + (host_os == 'linux' ? ['CONFIG_LINUX=y'] : []) + \ (have_pvrdma ? ['CONFIG_PVRDMA=y'] : []) + \ (multiprocess_allowed ? ['CONFIG_MULTIPROCESS_ALLOWED=y'] : []) + \ (vfio_user_server_allowed ? ['CONFIG_VFIO_USER_SERVER_ALLOWED=y'] : []) + \ @@ -2934,7 +3034,7 @@ fdt_required = [] foreach target : target_dirs config_target = { 'TARGET_NAME': target.split('-')[0] } if target.endswith('linux-user') - if targetos != 'linux' + if host_os != 'linux' if default_targets continue endif @@ -2942,7 +3042,7 @@ foreach target : target_dirs endif config_target += { 'CONFIG_LINUX_USER': 'y' } elif target.endswith('bsd-user') - if targetos not in bsd_oses + if host_os not in bsd_oses if default_targets continue endif @@ -2965,7 +3065,7 @@ foreach target : target_dirs foreach sym: accelerators if sym == 'CONFIG_TCG' or target in accelerator_targets.get(sym, []) config_target += { sym: 'y' } - config_all += { sym: 'y' } + config_all_accel += { sym: 'y' } if target in modular_tcg config_target += { 'CONFIG_TCG_MODULAR': 'y' } else @@ -3004,7 +3104,6 @@ foreach target : target_dirs if host_arch.startswith(k) or config_target['TARGET_BASE_ARCH'].startswith(k) foreach sym: v config_target += { sym: 'y' } - config_all_disas += { sym: 'y' } endforeach endif endforeach @@ -3063,25 +3162,6 @@ foreach target : target_dirs endforeach target_dirs = actual_target_dirs -# This configuration is used to build files that are shared by -# multiple binaries, and then extracted out of the "common" -# static_library target. -# -# We do not use all_sources()/all_dependencies(), because it would -# build literally all source files, including devices only used by -# targets that are not built for this compilation. The CONFIG_ALL -# pseudo symbol replaces it. - -config_all += config_all_devices -config_all += config_targetos -config_all += config_all_disas -config_all += { - 'CONFIG_XEN': xen.found(), - 'CONFIG_SYSTEM_ONLY': have_system, - 'CONFIG_USER_ONLY': have_user, - 'CONFIG_ALL': true, -} - target_configs_h = [] foreach target: target_dirs target_configs_h += config_target_h[target] @@ -3094,28 +3174,9 @@ genh += custom_target('config-poison.h', command: [find_program('scripts/make-config-poison.sh'), target_configs_h]) -############## -# Submodules # -############## - -capstone = not_found -if not get_option('capstone').auto() or have_system or have_user - capstone = dependency('capstone', version: '>=3.0.5', - method: 'pkg-config', - required: get_option('capstone')) - - # Some versions of capstone have broken pkg-config file - # that reports a wrong -I path, causing the #include to - # fail later. If the system has such a broken version - # do not use it. - if capstone.found() and not cc.compiles('#include ', - dependencies: [capstone]) - capstone = not_found - if get_option('capstone').enabled() - error('capstone requested, but it does not appear to work') - endif - endif -endif +############### +# Subprojects # +############### libvfio_user_dep = not_found if have_system and vfio_user_server_allowed @@ -3159,9 +3220,19 @@ else fdt_opt = 'disabled' endif -config_host_data.set('CONFIG_CAPSTONE', capstone.found()) config_host_data.set('CONFIG_FDT', fdt.found()) -config_host_data.set('CONFIG_SLIRP', slirp.found()) + +vhost_user = not_found +if host_os == 'linux' and have_vhost_user + libvhost_user = subproject('libvhost-user') + vhost_user = libvhost_user.get_variable('vhost_user_dep') +endif + +libvduse = not_found +if have_libvduse + libvduse_proj = subproject('libvduse') + libvduse = libvduse_proj.get_variable('libvduse_dep') +endif ##################### # Generated sources # @@ -3245,39 +3316,6 @@ foreach d : hx_headers endforeach genh += hxdep -################### -# Collect sources # -################### - -authz_ss = ss.source_set() -blockdev_ss = ss.source_set() -block_ss = ss.source_set() -chardev_ss = ss.source_set() -common_ss = ss.source_set() -crypto_ss = ss.source_set() -hwcore_ss = ss.source_set() -io_ss = ss.source_set() -qmp_ss = ss.source_set() -qom_ss = ss.source_set() -system_ss = ss.source_set() -specific_fuzz_ss = ss.source_set() -specific_ss = ss.source_set() -stub_ss = ss.source_set() -trace_ss = ss.source_set() -user_ss = ss.source_set() -util_ss = ss.source_set() - -# accel modules -qtest_module_ss = ss.source_set() -tcg_module_ss = ss.source_set() - -modules = {} -target_modules = {} -hw_arch = {} -target_arch = {} -target_system_arch = {} -target_user_arch = {} - ############### # Trace files # ############### @@ -3322,10 +3360,10 @@ if have_system 'hw/arm', 'hw/audio', 'hw/block', - 'hw/block/dataplane', 'hw/char', 'hw/display', 'hw/dma', + 'hw/fsi', 'hw/hyperv', 'hw/i2c', 'hw/i386', @@ -3382,6 +3420,7 @@ if have_system or have_user 'target/hppa', 'target/i386', 'target/i386/kvm', + 'target/loongarch', 'target/mips/tcg', 'target/nios2', 'target/ppc', @@ -3392,17 +3431,38 @@ if have_system or have_user ] endif -vhost_user = not_found -if targetos == 'linux' and have_vhost_user - libvhost_user = subproject('libvhost-user') - vhost_user = libvhost_user.get_variable('vhost_user_dep') -endif +################### +# Collect sources # +################### -libvduse = not_found -if have_libvduse - libvduse_proj = subproject('libvduse') - libvduse = libvduse_proj.get_variable('libvduse_dep') -endif +authz_ss = ss.source_set() +blockdev_ss = ss.source_set() +block_ss = ss.source_set() +chardev_ss = ss.source_set() +common_ss = ss.source_set() +crypto_ss = ss.source_set() +hwcore_ss = ss.source_set() +io_ss = ss.source_set() +qmp_ss = ss.source_set() +qom_ss = ss.source_set() +system_ss = ss.source_set() +specific_fuzz_ss = ss.source_set() +specific_ss = ss.source_set() +stub_ss = ss.source_set() +trace_ss = ss.source_set() +user_ss = ss.source_set() +util_ss = ss.source_set() + +# accel modules +qtest_module_ss = ss.source_set() +tcg_module_ss = ss.source_set() + +modules = {} +target_modules = {} +hw_arch = {} +target_arch = {} +target_system_arch = {} +target_user_arch = {} # NOTE: the trace/ subdirectory needs the qapi_trace_events variable # that is filled in by qapi/. @@ -3423,7 +3483,7 @@ if enable_modules modulecommon = declare_dependency(link_whole: libmodulecommon, compile_args: '-DBUILD_DSO') endif -qom_ss = qom_ss.apply(config_targetos, strict: false) +qom_ss = qom_ss.apply({}) libqom = static_library('qom', qom_ss.sources() + genh, dependencies: [qom_ss.dependencies()], name_suffix: 'fa', @@ -3438,10 +3498,10 @@ event_loop_base = static_library('event-loop-base', event_loop_base = declare_dependency(link_whole: event_loop_base, dependencies: [qom]) -stub_ss = stub_ss.apply(config_all, strict: false) +stub_ss = stub_ss.apply({}) util_ss.add_all(trace_ss) -util_ss = util_ss.apply(config_all, strict: false) +util_ss = util_ss.apply({}) libqemuutil = static_library('qemuutil', build_by_default: false, sources: util_ss.sources() + stub_ss.sources() + genh, @@ -3488,8 +3548,11 @@ if have_block # os-posix.c contains POSIX-specific functions used by qemu-storage-daemon, # os-win32.c does not - blockdev_ss.add(when: 'CONFIG_POSIX', if_true: files('os-posix.c')) - system_ss.add(when: 'CONFIG_WIN32', if_true: [files('os-win32.c')]) + if host_os == 'windows' + system_ss.add(files('os-win32.c')) + else + blockdev_ss.add(files('os-posix.c')) + endif endif common_ss.add(files('cpu-common.c')) @@ -3548,9 +3611,9 @@ specific_ss.add_all(when: 'CONFIG_TCG_BUILTIN', if_true: tcg_module_ss) target_modules += { 'accel' : { 'qtest': qtest_module_ss, 'tcg': tcg_real_module_ss }} -######################## -# Library dependencies # -######################## +############################################## +# Internal static_libraries and dependencies # +############################################## modinfo_collect = find_program('scripts/modinfo-collect.py') modinfo_generate = find_program('scripts/modinfo-generate.py') @@ -3565,7 +3628,7 @@ foreach d, list : modules foreach m, module_ss : list if enable_modules - module_ss = module_ss.apply(config_all, strict: false) + module_ss = module_ss.apply(config_all_devices, strict: false) sl = static_library(d + '-' + m, [genh, module_ss.sources()], dependencies: [modulecommon, module_ss.dependencies()], pic: true) if d == 'block' @@ -3601,7 +3664,6 @@ foreach d, list : target_modules foreach target : target_dirs if target.endswith('-softmmu') config_target = config_target_mak[target] - config_target += config_targetos target_inc = [include_directories('target' / config_target['TARGET_BASE_ARCH'])] c_args = ['-DNEED_CPU_H', '-DCONFIG_TARGET="@0@-config-target.h"'.format(target), @@ -3662,7 +3724,7 @@ qemu_syms = custom_target('qemu.syms', output: 'qemu.syms', capture: true, command: [undefsym, nm, '@INPUT@']) -authz_ss = authz_ss.apply(config_targetos, strict: false) +authz_ss = authz_ss.apply({}) libauthz = static_library('authz', authz_ss.sources() + genh, dependencies: [authz_ss.dependencies()], name_suffix: 'fa', @@ -3671,7 +3733,7 @@ libauthz = static_library('authz', authz_ss.sources() + genh, authz = declare_dependency(link_whole: libauthz, dependencies: qom) -crypto_ss = crypto_ss.apply(config_targetos, strict: false) +crypto_ss = crypto_ss.apply({}) libcrypto = static_library('crypto', crypto_ss.sources() + genh, dependencies: [crypto_ss.dependencies()], name_suffix: 'fa', @@ -3680,7 +3742,7 @@ libcrypto = static_library('crypto', crypto_ss.sources() + genh, crypto = declare_dependency(link_whole: libcrypto, dependencies: [authz, qom]) -io_ss = io_ss.apply(config_targetos, strict: false) +io_ss = io_ss.apply({}) libio = static_library('io', io_ss.sources() + genh, dependencies: [io_ss.dependencies()], link_with: libqemuutil, @@ -3696,7 +3758,7 @@ migration = declare_dependency(link_with: libmigration, dependencies: [zlib, qom, io]) system_ss.add(migration) -block_ss = block_ss.apply(config_targetos, strict: false) +block_ss = block_ss.apply({}) libblock = static_library('block', block_ss.sources() + genh, dependencies: block_ss.dependencies(), link_depends: block_syms, @@ -3707,7 +3769,7 @@ block = declare_dependency(link_whole: [libblock], link_args: '@block.syms', dependencies: [crypto, io]) -blockdev_ss = blockdev_ss.apply(config_targetos, strict: false) +blockdev_ss = blockdev_ss.apply({}) libblockdev = static_library('blockdev', blockdev_ss.sources() + genh, dependencies: blockdev_ss.dependencies(), name_suffix: 'fa', @@ -3716,7 +3778,7 @@ libblockdev = static_library('blockdev', blockdev_ss.sources() + genh, blockdev = declare_dependency(link_whole: [libblockdev], dependencies: [block, event_loop_base]) -qmp_ss = qmp_ss.apply(config_targetos, strict: false) +qmp_ss = qmp_ss.apply({}) libqmp = static_library('qmp', qmp_ss.sources() + genh, dependencies: qmp_ss.dependencies(), name_suffix: 'fa', @@ -3731,7 +3793,7 @@ libchardev = static_library('chardev', chardev_ss.sources() + genh, chardev = declare_dependency(link_whole: libchardev) -hwcore_ss = hwcore_ss.apply(config_targetos, strict: false) +hwcore_ss = hwcore_ss.apply({}) libhwcore = static_library('hwcore', sources: hwcore_ss.sources() + genh, name_suffix: 'fa', build_by_default: false) @@ -3764,21 +3826,24 @@ common_ss.add_all(when: 'CONFIG_USER_ONLY', if_true: user_ss) common_ss.add(symcc_rt) specific_ss.add(symcc_rt) -common_all = common_ss.apply(config_all, strict: false) +# Note that this library is never used directly (only through extract_objects) +# and is not built by default; therefore, source files not used by the build +# configuration will be in build.ninja, but are never built by default. common_all = static_library('common', build_by_default: false, - sources: common_all.sources() + genh, + sources: common_ss.all_sources() + genh, include_directories: common_user_inc, implicit_include_directories: false, - dependencies: common_all.dependencies(), + dependencies: common_ss.all_dependencies(), name_suffix: 'fa') feature_to_c = find_program('scripts/feature_to_c.py') -if targetos == 'darwin' +if host_os == 'darwin' entitlement = find_program('scripts/entitlement.sh') endif +traceable = [] emulators = {} foreach target : target_dirs config_target = config_target_mak[target] @@ -3791,9 +3856,8 @@ foreach target : target_dirs '-DCONFIG_DEVICES="@0@-config-devices.h"'.format(target)] link_args = emulator_link_args - config_target += config_targetos target_inc = [include_directories('target' / config_target['TARGET_BASE_ARCH'])] - if targetos == 'linux' + if host_os == 'linux' target_inc += include_directories('linux-headers', is_system: true) endif if target.endswith('-softmmu') @@ -3803,9 +3867,11 @@ foreach target : target_dirs arch_deps += t.dependencies() hw_dir = target_name == 'sparc64' ? 'sparc64' : target_base_arch - hw = hw_arch[hw_dir].apply(config_target, strict: false) - arch_srcs += hw.sources() - arch_deps += hw.dependencies() + if hw_arch.has_key(hw_dir) + hw = hw_arch[hw_dir].apply(config_target, strict: false) + arch_srcs += hw.sources() + arch_deps += hw.dependencies() + endif arch_srcs += config_devices_h[target] link_args += ['@block.syms', '@qemu.syms'] @@ -3823,7 +3889,7 @@ foreach target : target_dirs endif if 'CONFIG_BSD_USER' in config_target base_dir = 'bsd-user' - target_inc += include_directories('bsd-user/' / targetos) + target_inc += include_directories('bsd-user/' / host_os) target_inc += include_directories('bsd-user/host/' / host_arch) dir = base_dir / abi arch_srcs += files(dir / 'signal.c', dir / 'target_arch_cpu.c') @@ -3880,7 +3946,7 @@ foreach target : target_dirs 'sources': files('system/main.c'), 'dependencies': [] }] - if targetos == 'windows' and (sdl.found() or gtk.found()) + if host_os == 'windows' and (sdl.found() or gtk.found()) execs += [{ 'name': 'qemu-system-' + target_name + 'w', 'win_subsystem': 'windows', @@ -3907,7 +3973,7 @@ foreach target : target_dirs endif foreach exe: execs exe_name = exe['name'] - if targetos == 'darwin' + if host_os == 'darwin' exe_name += '-unsigned' endif @@ -3916,11 +3982,11 @@ foreach target : target_dirs c_args: c_args, dependencies: arch_deps + deps + exe['dependencies'], objects: lib.extract_all_objects(recursive: true), - link_depends: [block_syms, qemu_syms] + exe.get('link_depends', []), + link_depends: [block_syms, qemu_syms], link_args: link_args, win_subsystem: exe['win_subsystem']) - if targetos == 'darwin' + if host_os == 'darwin' icon = 'pc-bios/qemu.rsrc' build_input = [emulator, files(icon)] install_input = [ @@ -3946,29 +4012,11 @@ foreach target : target_dirs emulators += {exe['name']: emulator} endif - if stap.found() - foreach stp: [ - {'ext': '.stp-build', 'fmt': 'stap', 'bin': meson.current_build_dir() / exe['name'], 'install': false}, - {'ext': '.stp', 'fmt': 'stap', 'bin': get_option('prefix') / get_option('bindir') / exe['name'], 'install': true}, - {'ext': '-simpletrace.stp', 'fmt': 'simpletrace-stap', 'bin': '', 'install': true}, - {'ext': '-log.stp', 'fmt': 'log-stap', 'bin': '', 'install': true}, - ] - custom_target(exe['name'] + stp['ext'], - input: trace_events_all, - output: exe['name'] + stp['ext'], - install: stp['install'], - install_dir: get_option('datadir') / 'systemtap/tapset', - command: [ - tracetool, '--group=all', '--format=' + stp['fmt'], - '--binary=' + stp['bin'], - '--target-name=' + target_name, - '--target-type=' + target_type, - '--probe-prefix=qemu.' + target_type + '.' + target_name, - '@INPUT@', '@OUTPUT@' - ], - depend_files: tracetool_depends) - endforeach - endif + traceable += [{ + 'exe': exe['name'], + 'probe-prefix': 'qemu.' + target_type + '.' + target_name, + }] + endforeach endforeach @@ -3976,7 +4024,7 @@ endforeach if get_option('plugins') install_headers('include/qemu/qemu-plugin.h') - if targetos == 'windows' + if host_os == 'windows' # On windows, we want to deliver the qemu_plugin_api.lib file in the qemu installer, # so that plugin authors can compile against it. install_data(win32_qemu_plugin_api_lib, install_dir: 'lib') @@ -4003,6 +4051,14 @@ if have_tools install: true) subdir('storage-daemon') + + foreach exe: [ 'qemu-img', 'qemu-io', 'qemu-nbd', 'qemu-storage-daemon'] + traceable += [{ + 'exe': exe, + 'probe-prefix': 'qemu.' + exe.substring(5).replace('-', '_') + }] + endforeach + subdir('contrib/rdmacm-mux') subdir('contrib/elf2dmp') @@ -4017,7 +4073,7 @@ if have_tools subdir('contrib/vhost-user-scsi') endif - if targetos == 'linux' + if host_os == 'linux' executable('qemu-bridge-helper', files('qemu-bridge-helper.c'), dependencies: [qemuutil, libcap_ng], install: true, @@ -4035,6 +4091,32 @@ if have_tools endif endif +if stap.found() + foreach t: traceable + foreach stp: [ + {'ext': '.stp-build', 'fmt': 'stap', 'bin': meson.current_build_dir() / t['exe'], 'install': false}, + {'ext': '.stp', 'fmt': 'stap', 'bin': get_option('prefix') / get_option('bindir') / t['exe'], 'install': true}, + {'ext': '-simpletrace.stp', 'fmt': 'simpletrace-stap', 'bin': '', 'install': true}, + {'ext': '-log.stp', 'fmt': 'log-stap', 'bin': '', 'install': true}, + ] + cmd = [ + tracetool, '--group=all', '--format=' + stp['fmt'], + '--binary=' + stp['bin'], + '--probe-prefix=' + t['probe-prefix'], + '@INPUT@', '@OUTPUT@' + ] + + custom_target(t['exe'] + stp['ext'], + input: trace_events_all, + output: t['exe'] + stp['ext'], + install: stp['install'], + install_dir: get_option('datadir') / 'systemtap/tapset', + command: cmd, + depend_files: tracetool_depends) + endforeach + endforeach +endif + subdir('scripts') subdir('tools') subdir('pc-bios') @@ -4084,7 +4166,7 @@ summary(summary_info, bool_yn: true, section: 'Build environment') # Directories summary_info += {'Install prefix': get_option('prefix')} summary_info += {'BIOS directory': qemu_datadir} -pathsep = targetos == 'windows' ? ';' : ':' +pathsep = host_os == 'windows' ? ';' : ':' summary_info += {'firmware path': pathsep.join(get_option('qemu_firmwarepath'))} summary_info += {'binary directory': get_option('prefix') / get_option('bindir')} summary_info += {'library directory': get_option('prefix') / get_option('libdir')} @@ -4092,7 +4174,7 @@ summary_info += {'module directory': qemu_moddir} summary_info += {'libexec directory': get_option('prefix') / get_option('libexecdir')} summary_info += {'include directory': get_option('prefix') / get_option('includedir')} summary_info += {'config directory': get_option('prefix') / get_option('sysconfdir')} -if targetos != 'windows' +if host_os != 'windows' summary_info += {'local state directory': get_option('prefix') / get_option('localstatedir')} summary_info += {'Manual directory': get_option('prefix') / get_option('mandir')} else @@ -4115,7 +4197,7 @@ if config_host.has_key('GDB') endif summary_info += {'iasl': iasl} summary_info += {'genisoimage': config_host['GENISOIMAGE']} -if targetos == 'windows' and have_ga +if host_os == 'windows' and have_ga summary_info += {'wixl': wixl} endif if slirp.found() and have_system @@ -4213,7 +4295,7 @@ if get_option('cfi') endif summary_info += {'strip binaries': get_option('strip')} summary_info += {'sparse': sparse} -summary_info += {'mingw32 support': targetos == 'windows'} +summary_info += {'mingw32 support': host_os == 'windows'} summary(summary_info, bool_yn: true, section: 'Compilation') # snarf the cross-compilation information for tests @@ -4236,18 +4318,18 @@ endif # Targets and accelerators summary_info = {} if have_system - summary_info += {'KVM support': config_all.has_key('CONFIG_KVM')} - summary_info += {'HVF support': config_all.has_key('CONFIG_HVF')} - summary_info += {'WHPX support': config_all.has_key('CONFIG_WHPX')} - summary_info += {'NVMM support': config_all.has_key('CONFIG_NVMM')} + summary_info += {'KVM support': config_all_accel.has_key('CONFIG_KVM')} + summary_info += {'HVF support': config_all_accel.has_key('CONFIG_HVF')} + summary_info += {'WHPX support': config_all_accel.has_key('CONFIG_WHPX')} + summary_info += {'NVMM support': config_all_accel.has_key('CONFIG_NVMM')} summary_info += {'Xen support': xen.found()} if xen.found() summary_info += {'xen ctrl version': xen.version()} endif - summary_info += {'Xen emulation': config_all.has_key('CONFIG_XEN_EMU')} + summary_info += {'Xen emulation': config_all_devices.has_key('CONFIG_XEN_EMU')} endif -summary_info += {'TCG support': config_all.has_key('CONFIG_TCG')} -if config_all.has_key('CONFIG_TCG') +summary_info += {'TCG support': config_all_accel.has_key('CONFIG_TCG')} +if config_all_accel.has_key('CONFIG_TCG') if get_option('tcg_interpreter') summary_info += {'TCG backend': 'TCI (TCG with bytecode interpreter, slow)'} else @@ -4304,6 +4386,7 @@ summary_info += {'nettle': nettle} if nettle.found() summary_info += {' XTS': xts != 'private'} endif +summary_info += {'SM4 ALG support': crypto_sm4} summary_info += {'AF_ALG support': have_afalg} summary_info += {'rng-none': get_option('rng_none')} summary_info += {'Linux keyring': have_keyring} @@ -4312,7 +4395,7 @@ summary(summary_info, bool_yn: true, section: 'Crypto') # UI summary_info = {} -if targetos == 'darwin' +if host_os == 'darwin' summary_info += {'Cocoa support': cocoa} endif summary_info += {'SDL support': sdl} @@ -4334,17 +4417,23 @@ summary_info += {'curses support': curses} summary_info += {'brlapi support': brlapi} summary(summary_info, bool_yn: true, section: 'User interface') +# Graphics backends +summary_info = {} +summary_info += {'VirGL support': virgl} +summary_info += {'Rutabaga support': rutabaga} +summary(summary_info, bool_yn: true, section: 'Graphics backends') + # Audio backends summary_info = {} -if targetos not in ['darwin', 'haiku', 'windows'] +if host_os not in ['darwin', 'haiku', 'windows'] summary_info += {'OSS support': oss} summary_info += {'sndio support': sndio} -elif targetos == 'darwin' +elif host_os == 'darwin' summary_info += {'CoreAudio support': coreaudio} -elif targetos == 'windows' +elif host_os == 'windows' summary_info += {'DirectSound support': dsound} endif -if targetos == 'linux' +if host_os == 'linux' summary_info += {'ALSA support': alsa} summary_info += {'PulseAudio support': pulse} endif @@ -4354,7 +4443,7 @@ summary(summary_info, bool_yn: true, section: 'Audio backends') # Network backends summary_info = {} -if targetos == 'darwin' +if host_os == 'darwin' summary_info += {'vmnet.framework support': vmnet} endif summary_info += {'AF_XDP support': libxdp} @@ -4381,8 +4470,6 @@ summary_info = {} summary_info += {'libtasn1': tasn1} summary_info += {'PAM': pam} summary_info += {'iconv support': iconv} -summary_info += {'virgl support': virgl} -summary_info += {'rutabaga support': rutabaga} summary_info += {'blkio support': blkio} summary_info += {'curl support': curl} summary_info += {'Multipath support': mpathpersist} @@ -4403,7 +4490,7 @@ summary_info += {'OpenGL support (epoxy)': opengl} summary_info += {'GBM': gbm} summary_info += {'libiscsi support': libiscsi} summary_info += {'libnfs support': libnfs} -if targetos == 'windows' +if host_os == 'windows' if have_ga summary_info += {'QGA VSS support': have_qga_vss} endif @@ -4427,6 +4514,9 @@ summary_info += {'libudev': libudev} summary_info += {'FUSE lseek': fuse_lseek.found()} summary_info += {'selinux': selinux} summary_info += {'libdw': libdw} +if host_os == 'freebsd' + summary_info += {'libinotify-kqueue': inotify} +endif summary(summary_info, bool_yn: true, section: 'Dependencies') if host_arch == 'unknown' @@ -4445,20 +4535,20 @@ if host_arch == 'unknown' endif endif -if not supported_oses.contains(targetos) +if not supported_oses.contains(host_os) message() warning('UNSUPPORTED HOST OS') message() - message('Support for host OS ' + targetos + 'is not currently maintained.') + message('Support for host OS ' + host_os + 'is not currently maintained.') message('configure has succeeded and you can continue to build, but') message('the QEMU project does not guarantee that QEMU will compile or') message('work on this operating system. You can help by volunteering') message('to maintain it and providing a build host for our continuous') message('integration setup. This will ensure that future versions of QEMU') - message('will keep working on ' + targetos + '.') + message('will keep working on ' + host_os + '.') endif -if host_arch == 'unknown' or not supported_oses.contains(targetos) +if host_arch == 'unknown' or not supported_oses.contains(host_os) message() message('If you want to help supporting QEMU on this platform, please') message('contact the developers at qemu-devel@nongnu.org.') @@ -4472,8 +4562,8 @@ if get_option('relocatable') and \ warning('bindir not included within prefix, the installation will not be relocatable.') actually_reloc = false endif -if not actually_reloc and (targetos == 'windows' or get_option('relocatable')) - if targetos == 'windows' +if not actually_reloc and (host_os == 'windows' or get_option('relocatable')) + if host_os == 'windows' message() warning('Windows installs should usually be relocatable.') endif diff --git a/meson_options.txt b/meson_options.txt index 7a32ab5b304..1b84ca744c7 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -101,7 +101,7 @@ option('cfi_debug', type: 'boolean', value: false, description: 'Verbose errors in case of CFI violation') option('multiprocess', type: 'feature', value: 'auto', description: 'Out of process device emulation support') -option('relocatable', type : 'boolean', value : 'true', +option('relocatable', type : 'boolean', value : true, description: 'toggle relocatable install') option('vfio_user_server', type: 'feature', value: 'disabled', description: 'vfio-user server support') diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c index 24347ab0f75..2708abf3d76 100644 --- a/migration/block-dirty-bitmap.c +++ b/migration/block-dirty-bitmap.c @@ -464,7 +464,7 @@ static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s, g_free(buf); } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static void dirty_bitmap_do_save_cleanup(DBMSaveState *s) { SaveBitmapState *dbms; @@ -479,7 +479,7 @@ static void dirty_bitmap_do_save_cleanup(DBMSaveState *s) } } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs, const char *bs_name, GHashTable *alias_map) { @@ -598,7 +598,7 @@ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs, return 0; } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int init_dirty_bitmap_migration(DBMSaveState *s) { BlockDriverState *bs; @@ -607,7 +607,7 @@ static int init_dirty_bitmap_migration(DBMSaveState *s) BlockBackend *blk; GHashTable *alias_map = NULL; - /* Runs in the migration thread, but holds the iothread lock */ + /* Runs in the migration thread, but holds the BQL */ GLOBAL_STATE_CODE(); GRAPH_RDLOCK_GUARD_MAINLOOP(); @@ -742,7 +742,7 @@ static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque) return s->bulk_completed; } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque) { @@ -774,7 +774,7 @@ static void dirty_bitmap_state_pending(void *opaque, SaveBitmapState *dbms; uint64_t pending = 0; - qemu_mutex_lock_iothread(); + bql_lock(); QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) { uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap); @@ -784,7 +784,7 @@ static void dirty_bitmap_state_pending(void *opaque, pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran); } - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_dirty_bitmap_state_pending(pending); diff --git a/migration/block.c b/migration/block.c index a15f9bddcb9..2b9054889ad 100644 --- a/migration/block.c +++ b/migration/block.c @@ -66,7 +66,7 @@ typedef struct BlkMigDevState { /* Protected by block migration lock. */ int64_t completed_sectors; - /* During migration this is protected by iothread lock / AioContext. + /* During migration this is protected by bdrv_dirty_bitmap_lock(). * Allocation and free happen during setup and cleanup respectively. */ BdrvDirtyBitmap *dirty_bitmap; @@ -101,7 +101,7 @@ typedef struct BlkMigState { int prev_progress; int bulk_completed; - /* Lock must be taken _inside_ the iothread lock and any AioContexts. */ + /* Lock must be taken _inside_ the BQL. */ QemuMutex lock; } BlkMigState; @@ -117,7 +117,7 @@ static void blk_mig_unlock(void) qemu_mutex_unlock(&block_mig_state.lock); } -/* Must run outside of the iothread lock during the bulk phase, +/* Must run outside of the BQL during the bulk phase, * or the VM will stall. */ @@ -269,8 +269,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) int64_t count; if (bmds->shared_base) { - qemu_mutex_lock_iothread(); - aio_context_acquire(blk_get_aio_context(bb)); + bql_lock(); /* Skip unallocated sectors; intentionally treats failure or * partial sector as an allocated sector */ while (cur_sector < total_sectors && @@ -281,8 +280,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) } cur_sector += count >> BDRV_SECTOR_BITS; } - aio_context_release(blk_get_aio_context(bb)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } if (cur_sector >= total_sectors) { @@ -313,28 +311,23 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) block_mig_state.submitted++; blk_mig_unlock(); - /* We do not know if bs is under the main thread (and thus does - * not acquire the AioContext when doing AIO) or rather under - * dataplane. Thus acquire both the iothread mutex and the - * AioContext. - * - * This is ugly and will disappear when we make bdrv_* thread-safe, - * without the need to acquire the AioContext. + /* + * The migration thread does not have an AioContext. Lock the BQL so that + * I/O runs in the main loop AioContext (see + * qemu_get_current_aio_context()). */ - qemu_mutex_lock_iothread(); - aio_context_acquire(blk_get_aio_context(bmds->blk)); + bql_lock(); bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE, nr_sectors * BDRV_SECTOR_SIZE); blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov, 0, blk_mig_read_cb, blk); - aio_context_release(blk_get_aio_context(bmds->blk)); - qemu_mutex_unlock_iothread(); + bql_unlock(); bmds->cur_sector = cur_sector + nr_sectors; return (bmds->cur_sector >= total_sectors); } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int set_dirty_tracking(void) { @@ -361,7 +354,7 @@ static int set_dirty_tracking(void) return ret; } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static void unset_dirty_tracking(void) { @@ -409,7 +402,10 @@ static int init_blk_migration(QEMUFile *f) } sectors = bdrv_nb_sectors(bs); - if (sectors <= 0) { + if (sectors == 0) { + continue; + } + if (sectors < 0) { ret = sectors; bdrv_next_cleanup(&it); goto out; @@ -512,7 +508,7 @@ static void blk_mig_reset_dirty_cursor(void) } } -/* Called with iothread lock and AioContext taken. */ +/* Called with the BQL taken. */ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, int is_async) @@ -594,7 +590,7 @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, return ret; } -/* Called with iothread lock taken. +/* Called with the BQL taken. * * return value: * 0: too much data for max_downtime @@ -606,9 +602,7 @@ static int blk_mig_save_dirty_block(QEMUFile *f, int is_async) int ret = 1; QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { - aio_context_acquire(blk_get_aio_context(bmds->blk)); ret = mig_save_device_dirty(f, bmds, is_async); - aio_context_release(blk_get_aio_context(bmds->blk)); if (ret <= 0) { break; } @@ -658,7 +652,7 @@ static int flush_blks(QEMUFile *f) return ret; } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int64_t get_remaining_dirty(void) { @@ -666,9 +660,9 @@ static int64_t get_remaining_dirty(void) int64_t dirty = 0; QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { - aio_context_acquire(blk_get_aio_context(bmds->blk)); + bdrv_dirty_bitmap_lock(bmds->dirty_bitmap); dirty += bdrv_get_dirty_count(bmds->dirty_bitmap); - aio_context_release(blk_get_aio_context(bmds->blk)); + bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap); } return dirty; @@ -676,12 +670,11 @@ static int64_t get_remaining_dirty(void) -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static void block_migration_cleanup_bmds(void) { BlkMigDevState *bmds; BlockDriverState *bs; - AioContext *ctx; unset_dirty_tracking(); @@ -693,20 +686,14 @@ static void block_migration_cleanup_bmds(void) bdrv_op_unblock_all(bs, bmds->blocker); } error_free(bmds->blocker); - - /* Save ctx, because bmds->blk can disappear during blk_unref. */ - ctx = blk_get_aio_context(bmds->blk); - aio_context_acquire(ctx); blk_unref(bmds->blk); - aio_context_release(ctx); - g_free(bmds->blk_name); g_free(bmds->aio_bitmap); g_free(bmds); } } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static void block_migration_cleanup(void *opaque) { BlkMigBlock *blk; @@ -783,12 +770,12 @@ static int block_save_iterate(QEMUFile *f, void *opaque) } ret = 0; } else { - /* Always called with iothread lock taken for + /* Always called with the BQL taken for * simplicity, block_save_complete also calls it. */ - qemu_mutex_lock_iothread(); + bql_lock(); ret = blk_mig_save_dirty_block(f, 1); - qemu_mutex_unlock_iothread(); + bql_unlock(); } if (ret < 0) { return ret; @@ -811,7 +798,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque) return (delta_bytes > 0); } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int block_save_complete(QEMUFile *f, void *opaque) { @@ -860,9 +847,9 @@ static void block_state_pending(void *opaque, uint64_t *must_precopy, /* Estimate pending number of bytes to send */ uint64_t pending; - qemu_mutex_lock_iothread(); + bql_lock(); pending = get_remaining_dirty(); - qemu_mutex_unlock_iothread(); + bql_unlock(); blk_mig_lock(); pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE + diff --git a/migration/channel.c b/migration/channel.c index ca3319a3098..f9de064f3b1 100644 --- a/migration/channel.c +++ b/migration/channel.c @@ -117,9 +117,12 @@ int migration_channel_read_peek(QIOChannel *ioc, len = qio_channel_readv_full(ioc, &iov, 1, NULL, NULL, QIO_CHANNEL_READ_FLAG_MSG_PEEK, errp); - if (len <= 0 && len != QIO_CHANNEL_ERR_BLOCK) { - error_setg(errp, - "Failed to peek at channel"); + if (len < 0 && len != QIO_CHANNEL_ERR_BLOCK) { + return -1; + } + + if (len == 0) { + error_setg(errp, "Failed to peek at channel"); return -1; } diff --git a/migration/colo.c b/migration/colo.c index 4447e349149..84632a603ec 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -63,9 +63,9 @@ static bool colo_runstate_is_stopped(void) return runstate_check(RUN_STATE_COLO) || !runstate_is_running(); } -static void colo_checkpoint_notify(void *opaque) +static void colo_checkpoint_notify(void) { - MigrationState *s = opaque; + MigrationState *s = migrate_get_current(); int64_t next_notify_time; qemu_event_set(&s->colo_checkpoint_event); @@ -74,10 +74,15 @@ static void colo_checkpoint_notify(void *opaque) timer_mod(s->colo_delay_timer, next_notify_time); } +static void colo_checkpoint_notify_timer(void *opaque) +{ + colo_checkpoint_notify(); +} + void colo_checkpoint_delay_set(void) { if (migration_in_colo_state()) { - colo_checkpoint_notify(migrate_get_current()); + colo_checkpoint_notify(); } } @@ -162,7 +167,7 @@ static void primary_vm_do_failover(void) * kick COLO thread which might wait at * qemu_sem_wait(&s->colo_checkpoint_sem). */ - colo_checkpoint_notify(s); + colo_checkpoint_notify(); /* * Wake up COLO thread which may blocked in recv() or send(), @@ -420,13 +425,13 @@ static int colo_do_checkpoint_transaction(MigrationState *s, qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL); bioc->usage = 0; - qemu_mutex_lock_iothread(); + bql_lock(); if (failover_get_state() != FAILOVER_STATUS_NONE) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } vm_stop_force_state(RUN_STATE_COLO); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("run", "stop"); /* * Failover request bh could be called after vm_stop_force_state(), @@ -435,23 +440,23 @@ static int colo_do_checkpoint_transaction(MigrationState *s, if (failover_get_state() != FAILOVER_STATUS_NONE) { goto out; } - qemu_mutex_lock_iothread(); + bql_lock(); replication_do_checkpoint_all(&local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } /* Note: device state is saved into buffer */ ret = qemu_save_device_state(fb); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (ret < 0) { goto out; } @@ -504,9 +509,9 @@ static int colo_do_checkpoint_transaction(MigrationState *s, ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); out: @@ -518,7 +523,7 @@ static int colo_do_checkpoint_transaction(MigrationState *s, static void colo_compare_notify_checkpoint(Notifier *notifier, void *data) { - colo_checkpoint_notify(data); + colo_checkpoint_notify(); } static void colo_process_checkpoint(MigrationState *s) @@ -557,15 +562,15 @@ static void colo_process_checkpoint(MigrationState *s) fb = qemu_file_new_output(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); - qemu_mutex_lock_iothread(); + bql_lock(); replication_start_all(REPLICATION_MODE_PRIMARY, &local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); timer_mod(s->colo_delay_timer, qemu_clock_get_ms(QEMU_CLOCK_HOST) + @@ -639,14 +644,14 @@ static void colo_process_checkpoint(MigrationState *s) void migrate_start_colo_process(MigrationState *s) { - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_event_init(&s->colo_checkpoint_event, false); s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST, - colo_checkpoint_notify, s); + colo_checkpoint_notify_timer, NULL); qemu_sem_init(&s->colo_exit_sem, 0); colo_process_checkpoint(s); - qemu_mutex_lock_iothread(); + bql_lock(); } static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, @@ -657,9 +662,9 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, Error *local_err = NULL; int ret; - qemu_mutex_lock_iothread(); + bql_lock(); vm_stop_force_state(RUN_STATE_COLO); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("run", "stop"); /* FIXME: This is unnecessary for periodic checkpoint mode */ @@ -677,10 +682,10 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, return; } - qemu_mutex_lock_iothread(); + bql_lock(); cpu_synchronize_all_states(); ret = qemu_loadvm_state_main(mis->from_src_file, mis); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (ret < 0) { error_setg(errp, "Load VM's live state (ram) error"); @@ -719,14 +724,14 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, return; } - qemu_mutex_lock_iothread(); + bql_lock(); vmstate_loading = true; colo_flush_ram_cache(); ret = qemu_load_device_state(fb); if (ret < 0) { error_setg(errp, "COLO: load device state failed"); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } @@ -734,7 +739,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, if (local_err) { error_propagate(errp, local_err); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } @@ -743,7 +748,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, if (local_err) { error_propagate(errp, local_err); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } /* Notify all filters of all NIC to do checkpoint */ @@ -752,13 +757,13 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, if (local_err) { error_propagate(errp, local_err); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } vmstate_loading = false; vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) { @@ -851,14 +856,14 @@ static void *colo_process_incoming_thread(void *opaque) fb = qemu_file_new_input(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); - qemu_mutex_lock_iothread(); + bql_lock(); replication_start_all(REPLICATION_MODE_SECONDARY, &local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY, @@ -920,7 +925,7 @@ int coroutine_fn colo_incoming_co(void) Error *local_err = NULL; QemuThread th; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (!migration_incoming_colo_enabled()) { return 0; @@ -940,12 +945,12 @@ int coroutine_fn colo_incoming_co(void) qemu_coroutine_yield(); mis->colo_incoming_co = NULL; - qemu_mutex_unlock_iothread(); + bql_unlock(); /* Wait checkpoint incoming thread exit before free resource */ qemu_thread_join(&th); - qemu_mutex_lock_iothread(); + bql_lock(); - /* We hold the global iothread lock, so it is safe here */ + /* We hold the global BQL, so it is safe here */ colo_release_ram_cache(); return 0; diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 036ac017fc9..1d2e85746fb 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -90,13 +90,13 @@ static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages, void global_dirty_log_change(unsigned int flag, bool start) { - qemu_mutex_lock_iothread(); + bql_lock(); if (start) { memory_global_dirty_log_start(flag); } else { memory_global_dirty_log_stop(flag); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* @@ -106,12 +106,12 @@ void global_dirty_log_change(unsigned int flag, bool start) */ static void global_dirty_log_sync(unsigned int flag, bool one_shot) { - qemu_mutex_lock_iothread(); + bql_lock(); memory_global_dirty_log_sync(false); if (one_shot) { memory_global_dirty_log_stop(flag); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) @@ -129,8 +129,7 @@ static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) return g_new0(DirtyPageRecord, nvcpu); } -static void vcpu_dirty_stat_collect(VcpuStat *stat, - DirtyPageRecord *records, +static void vcpu_dirty_stat_collect(DirtyPageRecord *records, bool start) { CPUState *cpu; @@ -158,7 +157,7 @@ int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) { gen_id = cpu_list_generation_id_get(); records = vcpu_dirty_stat_alloc(stat); - vcpu_dirty_stat_collect(stat, records, true); + vcpu_dirty_stat_collect(records, true); } duration = dirty_stat_wait(calc_time_ms, init_time_ms); @@ -172,7 +171,7 @@ int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, cpu_list_unlock(); goto retry; } - vcpu_dirty_stat_collect(stat, records, false); + vcpu_dirty_stat_collect(records, false); } for (i = 0; i < stat->nvcpu; i++) { @@ -610,7 +609,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) int64_t start_time; DirtyPageRecord dirty_pages; - qemu_mutex_lock_iothread(); + bql_lock(); memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE); /* @@ -627,7 +626,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled. */ dirtyrate_manual_reset_protect(); - qemu_mutex_unlock_iothread(); + bql_unlock(); record_dirtypages_bitmap(&dirty_pages, true); diff --git a/migration/exec.c b/migration/exec.c index 47d2f3b8fb0..20e6cccf8c0 100644 --- a/migration/exec.c +++ b/migration/exec.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "qapi/type-helpers.h" #include "qemu/error-report.h" #include "channel.h" #include "exec.h" @@ -39,51 +40,16 @@ const char *exec_get_cmd_path(void) } #endif -/* provides the length of strList */ -static int -str_list_length(strList *list) -{ - int len = 0; - strList *elem; - - for (elem = list; elem != NULL; elem = elem->next) { - len++; - } - - return len; -} - -static void -init_exec_array(strList *command, char **argv, Error **errp) -{ - int i = 0; - strList *lst; - - for (lst = command; lst; lst = lst->next) { - argv[i++] = lst->value; - } - - argv[i] = NULL; - return; -} - void exec_start_outgoing_migration(MigrationState *s, strList *command, Error **errp) { - QIOChannel *ioc; - - int length = str_list_length(command); - g_auto(GStrv) argv = (char **) g_new0(const char *, length + 1); - - init_exec_array(command, argv, errp); + QIOChannel *ioc = NULL; + g_auto(GStrv) argv = strv_from_str_list(command); + const char * const *args = (const char * const *) argv; g_autofree char *new_command = g_strjoinv(" ", (char **)argv); trace_migration_exec_outgoing(new_command); - ioc = QIO_CHANNEL( - qio_channel_command_new_spawn( - (const char * const *) g_steal_pointer(&argv), - O_RDWR, - errp)); + ioc = QIO_CHANNEL(qio_channel_command_new_spawn(args, O_RDWR, errp)); if (!ioc) { return; } @@ -105,19 +71,12 @@ static gboolean exec_accept_incoming_migration(QIOChannel *ioc, void exec_start_incoming_migration(strList *command, Error **errp) { QIOChannel *ioc; - - int length = str_list_length(command); - g_auto(GStrv) argv = (char **) g_new0(const char *, length + 1); - - init_exec_array(command, argv, errp); + g_auto(GStrv) argv = strv_from_str_list(command); + const char * const *args = (const char * const *) argv; g_autofree char *new_command = g_strjoinv(" ", (char **)argv); trace_migration_exec_incoming(new_command); - ioc = QIO_CHANNEL( - qio_channel_command_new_spawn( - (const char * const *) g_steal_pointer(&argv), - O_RDWR, - errp)); + ioc = QIO_CHANNEL(qio_channel_command_new_spawn(args, O_RDWR, errp)); if (!ioc) { return; } diff --git a/migration/fd.c b/migration/fd.c index 0eb677dcae2..449adaa2dee 100644 --- a/migration/fd.c +++ b/migration/fd.c @@ -17,6 +17,7 @@ #include "qemu/osdep.h" #include "channel.h" #include "fd.h" +#include "file.h" #include "migration.h" #include "monitor/monitor.h" #include "io/channel-util.h" diff --git a/migration/file.c b/migration/file.c index 5d4975f43e1..ab18ba505a1 100644 --- a/migration/file.c +++ b/migration/file.c @@ -6,17 +6,25 @@ */ #include "qemu/osdep.h" +#include "exec/ramblock.h" #include "qemu/cutils.h" +#include "qemu/error-report.h" #include "qapi/error.h" #include "channel.h" #include "file.h" #include "migration.h" #include "io/channel-file.h" +#include "io/channel-socket.h" #include "io/channel-util.h" +#include "options.h" #include "trace.h" #define OFFSET_OPTION ",offset=" +static struct FileOutgoingArgs { + char *fname; +} outgoing_args; + /* Remove the offset option from @filespec and return it in @offsetp. */ int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp) @@ -36,6 +44,36 @@ int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp) return 0; } +void file_cleanup_outgoing_migration(void) +{ + g_free(outgoing_args.fname); + outgoing_args.fname = NULL; +} + +bool file_send_channel_create(gpointer opaque, Error **errp) +{ + QIOChannelFile *ioc; + int flags = O_WRONLY; + bool ret = true; + + ioc = qio_channel_file_new_path(outgoing_args.fname, flags, 0, errp); + if (!ioc) { + ret = false; + goto out; + } + + multifd_channel_connect(opaque, QIO_CHANNEL(ioc)); + +out: + /* + * File channel creation is synchronous. However posting this + * semaphore here is simpler than adding a special case. + */ + multifd_send_channel_created(); + + return ret; +} + void file_start_outgoing_migration(MigrationState *s, FileMigrationArgs *file_args, Error **errp) { @@ -52,6 +90,8 @@ void file_start_outgoing_migration(MigrationState *s, return; } + outgoing_args.fname = g_strdup(filename); + ioc = QIO_CHANNEL(fioc); if (offset && qio_channel_io_seek(ioc, offset, SEEK_SET, errp) < 0) { return; @@ -69,12 +109,46 @@ static gboolean file_accept_incoming_migration(QIOChannel *ioc, return G_SOURCE_REMOVE; } +void file_create_incoming_channels(QIOChannel *ioc, Error **errp) +{ + int i, fd, channels = 1; + g_autofree QIOChannel **iocs = NULL; + + if (migrate_multifd()) { + channels += migrate_multifd_channels(); + } + + iocs = g_new0(QIOChannel *, channels); + fd = QIO_CHANNEL_FILE(ioc)->fd; + iocs[0] = ioc; + + for (i = 1; i < channels; i++) { + QIOChannelFile *fioc = qio_channel_file_new_dupfd(fd, errp); + + if (!fioc) { + while (i) { + object_unref(iocs[--i]); + } + return; + } + + iocs[i] = QIO_CHANNEL(fioc); + } + + for (i = 0; i < channels; i++) { + qio_channel_set_name(iocs[i], "migration-file-incoming"); + qio_channel_add_watch_full(iocs[i], G_IO_IN, + file_accept_incoming_migration, + NULL, NULL, + g_main_context_get_thread_default()); + } +} + void file_start_incoming_migration(FileMigrationArgs *file_args, Error **errp) { g_autofree char *filename = g_strdup(file_args->filename); QIOChannelFile *fioc = NULL; uint64_t offset = file_args->offset; - QIOChannel *ioc; trace_migration_file_incoming(filename); @@ -83,13 +157,82 @@ void file_start_incoming_migration(FileMigrationArgs *file_args, Error **errp) return; } - ioc = QIO_CHANNEL(fioc); - if (offset && qio_channel_io_seek(ioc, offset, SEEK_SET, errp) < 0) { + if (offset && + qio_channel_io_seek(QIO_CHANNEL(fioc), offset, SEEK_SET, errp) < 0) { + object_unref(OBJECT(fioc)); return; } - qio_channel_set_name(QIO_CHANNEL(ioc), "migration-file-incoming"); - qio_channel_add_watch_full(ioc, G_IO_IN, - file_accept_incoming_migration, - NULL, NULL, - g_main_context_get_thread_default()); + + file_create_incoming_channels(QIO_CHANNEL(fioc), errp); +} + +int file_write_ramblock_iov(QIOChannel *ioc, const struct iovec *iov, + int niov, RAMBlock *block, Error **errp) +{ + ssize_t ret = 0; + int i, slice_idx, slice_num; + uintptr_t base, next, offset; + size_t len; + + slice_idx = 0; + slice_num = 1; + + /* + * If the iov array doesn't have contiguous elements, we need to + * split it in slices because we only have one file offset for the + * whole iov. Do this here so callers don't need to break the iov + * array themselves. + */ + for (i = 0; i < niov; i++, slice_num++) { + base = (uintptr_t) iov[i].iov_base; + + if (i != niov - 1) { + len = iov[i].iov_len; + next = (uintptr_t) iov[i + 1].iov_base; + + if (base + len == next) { + continue; + } + } + + /* + * Use the offset of the first element of the segment that + * we're sending. + */ + offset = (uintptr_t) iov[slice_idx].iov_base - (uintptr_t) block->host; + if (offset >= block->used_length) { + error_setg(errp, "offset %" PRIxPTR + "outside of ramblock %s range", offset, block->idstr); + ret = -1; + break; + } + + ret = qio_channel_pwritev(ioc, &iov[slice_idx], slice_num, + block->pages_offset + offset, errp); + if (ret < 0) { + break; + } + + slice_idx += slice_num; + slice_num = 0; + } + + return (ret < 0) ? ret : 0; +} + +int multifd_file_recv_data(MultiFDRecvParams *p, Error **errp) +{ + MultiFDRecvData *data = p->data; + size_t ret; + + ret = qio_channel_pread(p->c, (char *) data->opaque, + data->size, data->file_offset, errp); + if (ret != data->size) { + error_prepend(errp, + "multifd recv (%u): read 0x%zx, expected 0x%zx", + p->id, ret, data->size); + return -1; + } + + return 0; } diff --git a/migration/file.h b/migration/file.h index 37d6a08bfca..7699c04677e 100644 --- a/migration/file.h +++ b/migration/file.h @@ -9,10 +9,19 @@ #define QEMU_MIGRATION_FILE_H #include "qapi/qapi-types-migration.h" +#include "io/task.h" +#include "channel.h" +#include "multifd.h" void file_start_incoming_migration(FileMigrationArgs *file_args, Error **errp); void file_start_outgoing_migration(MigrationState *s, FileMigrationArgs *file_args, Error **errp); int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp); +void file_cleanup_outgoing_migration(void); +bool file_send_channel_create(gpointer opaque, Error **errp); +void file_create_incoming_channels(QIOChannel *ioc, Error **errp); +int file_write_ramblock_iov(QIOChannel *ioc, const struct iovec *iov, + int niov, RAMBlock *block, Error **errp); +int multifd_file_recv_data(MultiFDRecvParams *p, Error **errp); #endif diff --git a/migration/global_state.c b/migration/global_state.c index 4e2a9d8ec0a..3a9796cae28 100644 --- a/migration/global_state.c +++ b/migration/global_state.c @@ -22,7 +22,16 @@ typedef struct { uint32_t size; - uint8_t runstate[100]; + + /* + * runstate was 100 bytes, zero padded, but we trimmed it to add a + * few fields and maintain backwards compatibility. + */ + uint8_t runstate[32]; + uint8_t has_vm_was_suspended; + uint8_t vm_was_suspended; + uint8_t unused[66]; + RunState state; bool received; } GlobalState; @@ -35,6 +44,10 @@ static void global_state_do_store(RunState state) assert(strlen(state_str) < sizeof(global_state.runstate)); strpadcpy((char *)global_state.runstate, sizeof(global_state.runstate), state_str, '\0'); + global_state.has_vm_was_suspended = true; + global_state.vm_was_suspended = vm_get_suspended(); + + memset(global_state.unused, 0, sizeof(global_state.unused)); } void global_state_store(void) @@ -59,24 +72,7 @@ RunState global_state_get_runstate(void) static bool global_state_needed(void *opaque) { - GlobalState *s = opaque; - char *runstate = (char *)s->runstate; - - /* If it is not optional, it is mandatory */ - - if (migrate_get_current()->store_global_state) { - return true; - } - - /* If state is running or paused, it is not needed */ - - if (strcmp(runstate, "running") == 0 || - strcmp(runstate, "paused") == 0) { - return false; - } - - /* for any other state it is needed */ - return true; + return migrate_get_current()->store_global_state; } static int global_state_post_load(void *opaque, int version_id) @@ -93,7 +89,7 @@ static int global_state_post_load(void *opaque, int version_id) sizeof(s->runstate)) == sizeof(s->runstate)) { /* * This condition should never happen during migration, because - * all runstate names are shorter than 100 bytes (the size of + * all runstate names are shorter than 32 bytes (the size of * s->runstate). However, a malicious stream could overflow * the qapi_enum_parse() call, so we force the last character * to a NUL byte. @@ -110,6 +106,14 @@ static int global_state_post_load(void *opaque, int version_id) } s->state = r; + /* + * global_state is saved on the outgoing side before forcing a stopped + * state, so it may have saved state=suspended and vm_was_suspended=0. + * Now we are in a paused state, and when we later call vm_start, it must + * restore the suspended state, so we must set vm_was_suspended=1 here. + */ + vm_set_suspended(s->vm_was_suspended || r == RUN_STATE_SUSPENDED); + return 0; } @@ -131,9 +135,12 @@ static const VMStateDescription vmstate_globalstate = { .post_load = global_state_post_load, .pre_save = global_state_pre_save, .needed = global_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(size, GlobalState), VMSTATE_BUFFER(runstate, GlobalState), + VMSTATE_UINT8(has_vm_was_suspended, GlobalState), + VMSTATE_UINT8(vm_was_suspended, GlobalState), + VMSTATE_BUFFER(unused, GlobalState), VMSTATE_END_OF_LIST() }, }; diff --git a/migration/meson.build b/migration/meson.build index 92b1cc42978..1eeb915ff63 100644 --- a/migration/meson.build +++ b/migration/meson.build @@ -22,6 +22,7 @@ system_ss.add(files( 'migration.c', 'multifd.c', 'multifd-zlib.c', + 'multifd-zero-page.c', 'ram-compress.c', 'options.c', 'postcopy-ram.c', diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c index 2faa5cad46c..7e96ae6ffda 100644 --- a/migration/migration-hmp-cmds.c +++ b/migration/migration-hmp-cmds.c @@ -344,6 +344,11 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) monitor_printf(mon, "%s: %s\n", MigrationParameter_str(MIGRATION_PARAMETER_MULTIFD_COMPRESSION), MultiFDCompression_str(params->multifd_compression)); + assert(params->has_zero_page_detection); + monitor_printf(mon, "%s: %s\n", + MigrationParameter_str(MIGRATION_PARAMETER_ZERO_PAGE_DETECTION), + qapi_enum_lookup(&ZeroPageDetection_lookup, + params->zero_page_detection)); monitor_printf(mon, "%s: %" PRIu64 " bytes\n", MigrationParameter_str(MIGRATION_PARAMETER_XBZRLE_CACHE_SIZE), params->xbzrle_cache_size); @@ -399,15 +404,17 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) void hmp_loadvm(Monitor *mon, const QDict *qdict) { - int saved_vm_running = runstate_is_running(); + RunState saved_state = runstate_get(); + const char *name = qdict_get_str(qdict, "name"); Error *err = NULL; vm_stop(RUN_STATE_RESTORE_VM); - if (load_snapshot(name, NULL, false, NULL, &err) && saved_vm_running) { - vm_start(); + if (load_snapshot(name, NULL, false, NULL, &err)) { + load_snapshot_resume(saved_state); } + hmp_handle_error(mon, err); } @@ -632,6 +639,10 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) p->has_multifd_zstd_level = true; visit_type_uint8(v, param, &p->multifd_zstd_level, &err); break; + case MIGRATION_PARAMETER_ZERO_PAGE_DETECTION: + p->has_zero_page_detection = true; + visit_type_ZeroPageDetection(v, param, &p->zero_page_detection, &err); + break; case MIGRATION_PARAMETER_XBZRLE_CACHE_SIZE: p->has_xbzrle_cache_size = true; if (!visit_type_size(v, param, &cache_size, &err)) { @@ -850,14 +861,11 @@ static void vm_completion(ReadLineState *rs, const char *str) for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { SnapshotInfoList *snapshots, *snapshot; - AioContext *ctx = bdrv_get_aio_context(bs); bool ok = false; - aio_context_acquire(ctx); if (bdrv_can_snapshot(bs)) { ok = bdrv_query_snapshot_info_list(bs, &snapshots, NULL) == 0; } - aio_context_release(ctx); if (!ok) { continue; } diff --git a/migration/migration.c b/migration/migration.c index 982ab85f04b..86bf76e9258 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -67,9 +67,15 @@ #include "options.h" #include "sysemu/dirtylimit.h" #include "qemu/sockets.h" +#include "sysemu/kvm.h" -static NotifierList migration_state_notifiers = - NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); +#define NOTIFIER_ELEM_INIT(array, elem) \ + [elem] = NOTIFIER_WITH_RETURN_LIST_INITIALIZER((array)[elem]) + +static NotifierWithReturnList migration_state_notifiers[] = { + NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL), + NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT), +}; /* Messages sent on the return path from destination to source */ enum mig_rp_message_type { @@ -101,6 +107,7 @@ static int migration_maybe_pause(MigrationState *s, int new_state); static void migrate_fd_cancel(MigrationState *s); static bool close_return_path_on_source(MigrationState *s); +static void migration_completion_end(MigrationState *s); static void migration_downtime_start(MigrationState *s) { @@ -133,9 +140,25 @@ static bool transport_supports_multi_channels(MigrationAddress *addr) if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { SocketAddress *saddr = &addr->u.socket; - return saddr->type == SOCKET_ADDRESS_TYPE_INET || - saddr->type == SOCKET_ADDRESS_TYPE_UNIX || - saddr->type == SOCKET_ADDRESS_TYPE_VSOCK; + return (saddr->type == SOCKET_ADDRESS_TYPE_INET || + saddr->type == SOCKET_ADDRESS_TYPE_UNIX || + saddr->type == SOCKET_ADDRESS_TYPE_VSOCK); + } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { + return migrate_mapped_ram(); + } else { + return false; + } +} + +static bool migration_needs_seekable_channel(void) +{ + return migrate_mapped_ram(); +} + +static bool transport_supports_seeking(MigrationAddress *addr) +{ + if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { + return true; } return false; @@ -145,6 +168,12 @@ static bool migration_channels_and_transport_compatible(MigrationAddress *addr, Error **errp) { + if (migration_needs_seekable_channel() && + !transport_supports_seeking(addr)) { + error_setg(errp, "Migration requires seekable transport (e.g. file)"); + return false; + } + if (migration_needs_multiple_sockets() && !transport_supports_multi_channels(addr)) { error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); @@ -161,11 +190,19 @@ static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) return (a > b) - (a < b); } -int migration_stop_vm(RunState state) +static int migration_stop_vm(MigrationState *s, RunState state) { - int ret = vm_stop_force_state(state); + int ret; + + migration_downtime_start(s); + + s->vm_old_state = runstate_get(); + global_state_store(); + + ret = vm_stop_force_state(state); trace_vmstate_downtime_checkpoint("src-vm-stopped"); + trace_migration_completion_vm_stop(ret); return ret; } @@ -204,6 +241,47 @@ void migration_object_init(void) dirty_bitmap_mig_init(); } +typedef struct { + QEMUBH *bh; + QEMUBHFunc *cb; + void *opaque; +} MigrationBH; + +static void migration_bh_dispatch_bh(void *opaque) +{ + MigrationState *s = migrate_get_current(); + MigrationBH *migbh = opaque; + + /* cleanup this BH */ + qemu_bh_delete(migbh->bh); + migbh->bh = NULL; + + /* dispatch the other one */ + migbh->cb(migbh->opaque); + object_unref(OBJECT(s)); + + g_free(migbh); +} + +void migration_bh_schedule(QEMUBHFunc *cb, void *opaque) +{ + MigrationState *s = migrate_get_current(); + MigrationBH *migbh = g_new0(MigrationBH, 1); + QEMUBH *bh = qemu_bh_new(migration_bh_dispatch_bh, migbh); + + /* Store these to dispatch when the BH runs */ + migbh->bh = bh; + migbh->cb = cb; + migbh->opaque = opaque; + + /* + * Ref the state for bh, because it may be called when + * there're already no other refs + */ + object_ref(OBJECT(s)); + qemu_bh_schedule(bh); +} + void migration_cancel(const Error *error) { if (error) { @@ -275,7 +353,7 @@ void migration_incoming_state_destroy(void) { struct MigrationIncomingState *mis = migration_incoming_get_current(); - multifd_load_cleanup(); + multifd_recv_cleanup(); compress_threads_load_cleanup(); if (mis->to_src_file) { @@ -528,28 +606,26 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels, /* * Having preliminary checks for uri and channel */ - if (uri && has_channels) { - error_setg(errp, "'uri' and 'channels' arguments are mutually " - "exclusive; exactly one of the two should be present in " - "'migrate-incoming' qmp command "); + if (!uri == !channels) { + error_setg(errp, "need either 'uri' or 'channels' argument"); return; - } else if (channels) { + } + + if (channels) { /* To verify that Migrate channel list has only item */ if (channels->next) { error_setg(errp, "Channel list has more than one entries"); return; } addr = channels->value->addr; - } else if (uri) { + } + + if (uri) { /* caller uses the old URI syntax */ if (!migrate_uri_parse(uri, &channel, errp)) { return; } addr = channel->addr; - } else { - error_setg(errp, "neither 'uri' or 'channels' argument are " - "specified in 'migrate-incoming' qmp command "); - return; } /* transport mechanism not suitable for migration? */ @@ -609,7 +685,7 @@ static void process_incoming_migration_bh(void *opaque) */ if (!migrate_late_block_activate() || (autostart && (!global_state_received() || - global_state_get_runstate() == RUN_STATE_RUNNING))) { + runstate_is_live(global_state_get_runstate())))) { /* Make sure all file formats throw away their mutable metadata. * If we get an error here, just don't restart the VM yet. */ bdrv_activate_all(&local_err); @@ -628,12 +704,12 @@ static void process_incoming_migration_bh(void *opaque) trace_vmstate_downtime_checkpoint("dst-precopy-bh-announced"); - multifd_load_shutdown(); + multifd_recv_shutdown(); dirty_bitmap_mig_before_vm_start(); if (!global_state_received() || - global_state_get_runstate() == RUN_STATE_RUNNING) { + runstate_is_live(global_state_get_runstate())) { if (autostart) { vm_start(); } else { @@ -653,9 +729,7 @@ static void process_incoming_migration_bh(void *opaque) */ migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COMPLETED); - qemu_bh_delete(mis->bh); migration_incoming_state_destroy(); - object_unref(OBJECT(migrate_get_current())); } static void coroutine_fn @@ -705,6 +779,13 @@ process_incoming_migration_co(void *opaque) } if (ret < 0) { + MigrationState *s = migrate_get_current(); + + if (migrate_has_error(s)) { + WITH_QEMU_LOCK_GUARD(&s->error_mutex) { + error_report_err(s->error); + } + } error_report("load of migration failed: %s", strerror(-ret)); goto fail; } @@ -713,16 +794,14 @@ process_incoming_migration_co(void *opaque) goto fail; } - mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); - object_ref(OBJECT(migrate_get_current())); - qemu_bh_schedule(mis->bh); + migration_bh_schedule(process_incoming_migration_bh, mis); return; fail: migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED); qemu_fclose(mis->from_src_file); - multifd_load_cleanup(); + multifd_recv_cleanup(); compress_threads_load_cleanup(); exit(EXIT_FAILURE); @@ -731,11 +810,8 @@ process_incoming_migration_co(void *opaque) /** * migration_incoming_setup: Setup incoming migration * @f: file for main migration channel - * @errp: where to put errors - * - * Returns: %true on success, %false on error. */ -static bool migration_incoming_setup(QEMUFile *f, Error **errp) +static void migration_incoming_setup(QEMUFile *f) { MigrationIncomingState *mis = migration_incoming_get_current(); @@ -743,7 +819,6 @@ static bool migration_incoming_setup(QEMUFile *f, Error **errp) mis->from_src_file = f; } qemu_file_set_blocking(f, false); - return true; } void migration_incoming_process(void) @@ -785,11 +860,9 @@ static bool postcopy_try_recover(void) return false; } -void migration_fd_process_incoming(QEMUFile *f, Error **errp) +void migration_fd_process_incoming(QEMUFile *f) { - if (!migration_incoming_setup(f, errp)) { - return; - } + migration_incoming_setup(f); if (postcopy_try_recover()) { return; } @@ -830,7 +903,8 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) uint32_t channel_magic = 0; int ret = 0; - if (migrate_multifd() && !migrate_postcopy_ram() && + if (migrate_multifd() && !migrate_mapped_ram() && + !migrate_postcopy_ram() && qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { /* * With multiple channels, it is possible that we receive channels @@ -843,10 +917,9 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) * issue is not possible. */ ret = migration_channel_read_peek(ioc, (void *)&channel_magic, - sizeof(channel_magic), &local_err); + sizeof(channel_magic), errp); if (ret != 0) { - error_propagate(errp, local_err); return; } @@ -855,17 +928,13 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) default_channel = !mis->from_src_file; } - if (multifd_load_setup(errp) != 0) { - error_setg(errp, "Failed to setup multifd channels"); + if (multifd_recv_setup(errp) != 0) { return; } if (default_channel) { f = qemu_file_new_input(ioc); - - if (!migration_incoming_setup(f, errp)) { - return; - } + migration_incoming_setup(f); } else { /* Multiple connections */ assert(migration_needs_multiple_sockets()); @@ -999,9 +1068,11 @@ void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) * Return true if we're already in the middle of a migration * (i.e. any of the active or setup states) */ -bool migration_is_setup_or_active(int state) +bool migration_is_setup_or_active(void) { - switch (state) { + MigrationState *s = current_migration; + + switch (s->state) { case MIGRATION_STATUS_ACTIVE: case MIGRATION_STATUS_POSTCOPY_ACTIVE: case MIGRATION_STATUS_POSTCOPY_PAUSED: @@ -1019,9 +1090,11 @@ bool migration_is_setup_or_active(int state) } } -bool migration_is_running(int state) +bool migration_is_running(void) { - switch (state) { + MigrationState *s = current_migration; + + switch (s->state) { case MIGRATION_STATUS_ACTIVE: case MIGRATION_STATUS_POSTCOPY_ACTIVE: case MIGRATION_STATUS_POSTCOPY_PAUSED: @@ -1287,8 +1360,7 @@ void migrate_set_state(int *state, int old_state, int new_state) static void migrate_fd_cleanup(MigrationState *s) { - qemu_bh_delete(s->cleanup_bh); - s->cleanup_bh = NULL; + MigrationEventType type; g_free(s->hostname); s->hostname = NULL; @@ -1297,18 +1369,20 @@ static void migrate_fd_cleanup(MigrationState *s) qemu_savevm_state_cleanup(); + close_return_path_on_source(s); + if (s->to_dst_file) { QEMUFile *tmp; trace_migrate_fd_cleanup(); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (s->migration_thread_running) { qemu_thread_join(&s->thread); s->migration_thread_running = false; } - qemu_mutex_lock_iothread(); + bql_lock(); - multifd_save_cleanup(); + multifd_send_shutdown(); qemu_mutex_lock(&s->qemu_file_lock); tmp = s->to_dst_file; s->to_dst_file = NULL; @@ -1321,13 +1395,7 @@ static void migrate_fd_cleanup(MigrationState *s) qemu_fclose(tmp); } - /* - * We already cleaned up to_dst_file, so errors from the return - * path might be due to that, ignore them. - */ - close_return_path_on_source(s); - - assert(!migration_is_active(s)); + assert(!migration_is_active()); if (s->state == MIGRATION_STATUS_CANCELLING) { migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, @@ -1338,26 +1406,16 @@ static void migrate_fd_cleanup(MigrationState *s) /* It is used on info migrate. We can't free it */ error_report_err(error_copy(s->error)); } - migration_call_notifiers(s); + type = migration_has_failed(s) ? MIG_EVENT_PRECOPY_FAILED : + MIG_EVENT_PRECOPY_DONE; + migration_call_notifiers(s, type, NULL); block_cleanup_parameters(); yank_unregister_instance(MIGRATION_YANK_INSTANCE); } -static void migrate_fd_cleanup_schedule(MigrationState *s) -{ - /* - * Ref the state for bh, because it may be called when - * there're already no other refs - */ - object_ref(OBJECT(s)); - qemu_bh_schedule(s->cleanup_bh); -} - static void migrate_fd_cleanup_bh(void *opaque) { - MigrationState *s = opaque; - migrate_fd_cleanup(s); - object_unref(OBJECT(s)); + migrate_fd_cleanup(opaque); } void migrate_set_error(MigrationState *s, const Error *error) @@ -1408,7 +1466,7 @@ static void migrate_fd_cancel(MigrationState *s) do { old_state = s->state; - if (!migration_is_running(old_state)) { + if (!migration_is_running()) { break; } /* If the migration is paused, kick it out of the pause */ @@ -1442,34 +1500,39 @@ static void migrate_fd_cancel(MigrationState *s) } } -void migration_add_notifier(Notifier *notify, - void (*func)(Notifier *notifier, void *data)) +void migration_add_notifier_mode(NotifierWithReturn *notify, + MigrationNotifyFunc func, MigMode mode) { - notify->notify = func; - notifier_list_add(&migration_state_notifiers, notify); + notify->notify = (NotifierWithReturnFunc)func; + notifier_with_return_list_add(&migration_state_notifiers[mode], notify); } -void migration_remove_notifier(Notifier *notify) +void migration_add_notifier(NotifierWithReturn *notify, + MigrationNotifyFunc func) { - if (notify->notify) { - notifier_remove(notify); - notify->notify = NULL; - } + migration_add_notifier_mode(notify, func, MIG_MODE_NORMAL); } -void migration_call_notifiers(MigrationState *s) +void migration_remove_notifier(NotifierWithReturn *notify) { - notifier_list_notify(&migration_state_notifiers, s); + if (notify->notify) { + notifier_with_return_remove(notify); + notify->notify = NULL; + } } -bool migration_in_setup(MigrationState *s) +int migration_call_notifiers(MigrationState *s, MigrationEventType type, + Error **errp) { - return s->state == MIGRATION_STATUS_SETUP; -} + MigMode mode = s->parameters.mode; + MigrationEvent e; + int ret; -bool migration_has_finished(MigrationState *s) -{ - return s->state == MIGRATION_STATUS_COMPLETED; + e.type = type; + ret = notifier_with_return_list_notify(&migration_state_notifiers[mode], + &e, errp); + assert(!ret || type == MIG_EVENT_PRECOPY_SETUP); + return ret; } bool migration_has_failed(MigrationState *s) @@ -1503,11 +1566,6 @@ bool migration_postcopy_is_alive(int state) } } -bool migration_in_postcopy_after_devices(MigrationState *s) -{ - return migration_in_postcopy() && s->postcopy_after_devices; -} - bool migration_in_incoming_postcopy(void) { PostcopyState ps = postcopy_state_get(); @@ -1524,10 +1582,8 @@ bool migration_incoming_postcopy_advised(void) bool migration_in_bg_snapshot(void) { - MigrationState *s = migrate_get_current(); - return migrate_background_snapshot() && - migration_is_setup_or_active(s->state); + migration_is_setup_or_active(); } bool migration_is_idle(void) @@ -1560,12 +1616,33 @@ bool migration_is_idle(void) return false; } -bool migration_is_active(MigrationState *s) +bool migration_is_active(void) { + MigrationState *s = current_migration; + return (s->state == MIGRATION_STATUS_ACTIVE || s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); } +bool migration_is_device(void) +{ + MigrationState *s = current_migration; + + return s->state == MIGRATION_STATUS_DEVICE; +} + +bool migration_thread_is_self(void) +{ + MigrationState *s = current_migration; + + return qemu_thread_is_self(&s->thread); +} + +bool migrate_mode_is_cpr(MigrationState *s) +{ + return s->parameters.mode == MIG_MODE_CPR_REBOOT; +} + int migrate_init(MigrationState *s, Error **errp) { int ret; @@ -1580,8 +1657,6 @@ int migrate_init(MigrationState *s, Error **errp) * parameters/capabilities that the user set, and * locks. */ - s->cleanup_bh = 0; - s->vm_start_bh = 0; s->to_dst_file = NULL; s->state = MIGRATION_STATUS_NONE; s->rp_state.from_dst_file = NULL; @@ -1591,11 +1666,9 @@ int migrate_init(MigrationState *s, Error **errp) s->expected_downtime = 0; s->setup_time = 0; s->start_postcopy = false; - s->postcopy_after_devices = false; s->migration_thread_running = false; error_free(s->error); s->error = NULL; - s->hostname = NULL; s->vmdesc = NULL; migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); @@ -1843,8 +1916,6 @@ bool migration_is_blocked(Error **errp) static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, bool resume, Error **errp) { - Error *local_err = NULL; - if (blk_inc) { warn_report("parameter 'inc' is deprecated;" " use blockdev-mirror with NBD instead"); @@ -1884,7 +1955,7 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, return true; } - if (migration_is_running(s->state)) { + if (migration_is_running()) { error_setg(errp, QERR_MIGRATION_ACTIVE); return false; } @@ -1900,10 +1971,45 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, return false; } + if (kvm_hwpoisoned_mem()) { + error_setg(errp, "Can't migrate this vm with hardware poisoned memory, " + "please reboot the vm and try again"); + return false; + } + if (migration_is_blocked(errp)) { return false; } + if (migrate_mapped_ram()) { + if (migrate_tls()) { + error_setg(errp, "Cannot use TLS with mapped-ram"); + return false; + } + + if (migrate_multifd_compression()) { + error_setg(errp, "Cannot use compression with mapped-ram"); + return false; + } + } + + if (migrate_mode_is_cpr(s)) { + const char *conflict = NULL; + + if (migrate_postcopy()) { + conflict = "postcopy"; + } else if (migrate_background_snapshot()) { + conflict = "background snapshot"; + } else if (migrate_colo()) { + conflict = "COLO"; + } + + if (conflict) { + error_setg(errp, "Cannot use %s with CPR", conflict); + return false; + } + } + if (blk || blk_inc) { if (migrate_colo()) { error_setg(errp, "No disk migration is required in COLO mode"); @@ -1914,8 +2020,7 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, "current migration capabilities"); return false; } - if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, &local_err)) { - error_propagate(errp, local_err); + if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, errp)) { return false; } s->must_remove_block_options = true; @@ -1946,28 +2051,26 @@ void qmp_migrate(const char *uri, bool has_channels, /* * Having preliminary checks for uri and channel */ - if (uri && has_channels) { - error_setg(errp, "'uri' and 'channels' arguments are mutually " - "exclusive; exactly one of the two should be present in " - "'migrate' qmp command "); + if (!uri == !channels) { + error_setg(errp, "need either 'uri' or 'channels' argument"); return; - } else if (channels) { + } + + if (channels) { /* To verify that Migrate channel list has only item */ if (channels->next) { error_setg(errp, "Channel list has more than one entries"); return; } addr = channels->value->addr; - } else if (uri) { + } + + if (uri) { /* caller uses the old URI syntax */ if (!migrate_uri_parse(uri, &channel, errp)) { return; } addr = channel->addr; - } else { - error_setg(errp, "neither 'uri' or 'channels' argument are " - "specified in 'migrate' qmp command "); - return; } /* transport mechanism not suitable for migration? */ @@ -2189,7 +2292,7 @@ static void *source_return_path_thread(void *opaque) trace_source_return_path_thread_entry(); rcu_register_thread(); - while (migration_is_setup_or_active(ms->state)) { + while (migration_is_setup_or_active()) { trace_source_return_path_thread_loop_top(); header_type = qemu_get_be16(rp); @@ -2369,8 +2472,7 @@ static bool close_return_path_on_source(MigrationState *ms) * cause it to unblock if it's stuck waiting for the destination. */ WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { - if (ms->to_dst_file && ms->rp_state.from_dst_file && - qemu_file_get_error(ms->to_dst_file)) { + if (migrate_has_error(ms) && ms->rp_state.from_dst_file) { qemu_file_shutdown(ms->rp_state.from_dst_file); } } @@ -2408,6 +2510,8 @@ static int postcopy_start(MigrationState *ms, Error **errp) migration_wait_main_channel(ms); if (postcopy_preempt_establish_channel(ms)) { migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); + error_setg(errp, "%s: Failed to establish preempt channel", + __func__); return -1; } } @@ -2418,26 +2522,27 @@ static int postcopy_start(MigrationState *ms, Error **errp) } trace_postcopy_start(); - qemu_mutex_lock_iothread(); + bql_lock(); trace_postcopy_start_set_run(); - migration_downtime_start(ms); - - qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); - global_state_store(); - ret = migration_stop_vm(RUN_STATE_FINISH_MIGRATE); + ret = migration_stop_vm(ms, RUN_STATE_FINISH_MIGRATE); if (ret < 0) { + error_setg_errno(errp, -ret, "%s: Failed to stop the VM", __func__); goto fail; } ret = migration_maybe_pause(ms, &cur_state, MIGRATION_STATUS_POSTCOPY_ACTIVE); if (ret < 0) { + error_setg_errno(errp, -ret, "%s: Failed in migration_maybe_pause()", + __func__); goto fail; } ret = bdrv_inactivate_all(); if (ret < 0) { + error_setg_errno(errp, -ret, "%s: Failed in bdrv_inactivate_all()", + __func__); goto fail; } restart_block = true; @@ -2514,6 +2619,7 @@ static int postcopy_start(MigrationState *ms, Error **errp) /* Now send that blob */ if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { + error_setg(errp, "%s: Failed to send packaged data", __func__); goto fail_closefb; } qemu_fclose(fb); @@ -2522,12 +2628,11 @@ static int postcopy_start(MigrationState *ms, Error **errp) * at the transition to postcopy and after the device state; in particular * spice needs to trigger a transition now */ - ms->postcopy_after_devices = true; - migration_call_notifiers(ms); + migration_call_notifiers(ms, MIG_EVENT_PRECOPY_DONE, NULL); migration_downtime_end(ms); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (migrate_postcopy_ram()) { /* @@ -2543,11 +2648,10 @@ static int postcopy_start(MigrationState *ms, Error **errp) ret = qemu_file_get_error(ms->to_dst_file); if (ret) { - error_setg(errp, "postcopy_start: Migration stream errored"); - migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, - MIGRATION_STATUS_FAILED); + error_setg_errno(errp, -ret, "postcopy_start: Migration stream error"); + bql_lock(); + goto fail; } - trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); return ret; @@ -2568,13 +2672,14 @@ static int postcopy_start(MigrationState *ms, Error **errp) error_report_err(local_err); } } - qemu_mutex_unlock_iothread(); + migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL); + bql_unlock(); return -1; } /** * migration_maybe_pause: Pause if required to by - * migrate_pause_before_switchover called with the iothread locked + * migrate_pause_before_switchover called with the BQL locked * Returns: 0 on success */ static int migration_maybe_pause(MigrationState *s, @@ -2602,14 +2707,14 @@ static int migration_maybe_pause(MigrationState *s, * wait for the 'pause_sem' semaphore. */ if (s->state != MIGRATION_STATUS_CANCELLING) { - qemu_mutex_unlock_iothread(); + bql_unlock(); migrate_set_state(&s->state, *current_active_state, MIGRATION_STATUS_PRE_SWITCHOVER); qemu_sem_wait(&s->pause_sem); migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, new_state); *current_active_state = new_state; - qemu_mutex_lock_iothread(); + bql_lock(); } return s->state == new_state ? 0 : -EINVAL; @@ -2620,17 +2725,13 @@ static int migration_completion_precopy(MigrationState *s, { int ret; - qemu_mutex_lock_iothread(); - migration_downtime_start(s); - qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); + bql_lock(); - s->vm_old_state = runstate_get(); - global_state_store(); - - ret = migration_stop_vm(RUN_STATE_FINISH_MIGRATE); - trace_migration_completion_vm_stop(ret); - if (ret < 0) { - goto out_unlock; + if (!migrate_mode_is_cpr(s)) { + ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); + if (ret < 0) { + goto out_unlock; + } } ret = migration_maybe_pause(s, current_active_state, @@ -2648,7 +2749,7 @@ static int migration_completion_precopy(MigrationState *s, ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, s->block_inactive); out_unlock: - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -2656,9 +2757,9 @@ static void migration_completion_postcopy(MigrationState *s) { trace_migration_completion_postcopy_end(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_complete_postcopy(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); /* * Shutdown the postcopy fast path thread. This is only needed when dest @@ -2682,14 +2783,14 @@ static void migration_completion_failed(MigrationState *s, */ Error *local_err = NULL; - qemu_mutex_lock_iothread(); + bql_lock(); bdrv_activate_all(&local_err); if (local_err) { error_report_err(local_err); } else { s->block_inactive = false; } - qemu_mutex_unlock_iothread(); + bql_unlock(); } migrate_set_state(&s->state, current_active_state, @@ -2706,6 +2807,7 @@ static void migration_completion(MigrationState *s) { int ret = 0; int current_active_state = s->state; + Error *local_err = NULL; if (s->state == MIGRATION_STATUS_ACTIVE) { ret = migration_completion_precopy(s, ¤t_active_state); @@ -2733,13 +2835,21 @@ static void migration_completion(MigrationState *s) migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COLO); } else { - migrate_set_state(&s->state, current_active_state, - MIGRATION_STATUS_COMPLETED); + migration_completion_end(s); } return; fail: + if (qemu_file_get_error_obj(s->to_dst_file, &local_err)) { + migrate_set_error(s, local_err); + error_free(local_err); + } else if (ret) { + error_setg_errno(&local_err, -ret, "Error in migration completion"); + migrate_set_error(s, local_err); + error_free(local_err); + } + migration_completion_failed(s, current_active_state); } @@ -2771,8 +2881,7 @@ static void bg_migration_completion(MigrationState *s) goto fail; } - migrate_set_state(&s->state, current_active_state, - MIGRATION_STATUS_COMPLETED); + migration_completion_end(s); return; fail: @@ -2861,6 +2970,13 @@ static MigThrError postcopy_pause(MigrationState *s) while (true) { QEMUFile *file; + /* + * We're already pausing, so ignore any errors on the return + * path and just wait for the thread to finish. It will be + * re-created when we resume. + */ + close_return_path_on_source(s); + /* * Current channel is possibly broken. Release it. Note that this is * guaranteed even without lock because to_dst_file should only be @@ -2880,13 +2996,6 @@ static MigThrError postcopy_pause(MigrationState *s) qemu_file_shutdown(file); qemu_fclose(file); - /* - * We're already pausing, so ignore any errors on the return - * path and just wait for the thread to finish. It will be - * re-created when we resume. - */ - close_return_path_on_source(s); - migrate_set_state(&s->state, s->state, MIGRATION_STATUS_POSTCOPY_PAUSED); @@ -2924,6 +3033,17 @@ static MigThrError postcopy_pause(MigrationState *s) } } +void migration_file_set_error(int err) +{ + MigrationState *s = current_migration; + + WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { + if (s->to_dst_file) { + qemu_file_set_error(s->to_dst_file, err); + } + } +} + static MigThrError migration_detect_error(MigrationState *s) { int ret; @@ -2974,18 +3094,28 @@ static MigThrError migration_detect_error(MigrationState *s) } } -static void migration_calculate_complete(MigrationState *s) +static void migration_completion_end(MigrationState *s) { uint64_t bytes = migration_transferred_bytes(); int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); int64_t transfer_time; + /* + * Take the BQL here so that query-migrate on the QMP thread sees: + * - atomic update of s->total_time and s->mbps; + * - correct ordering of s->mbps update vs. s->state; + */ + bql_lock(); migration_downtime_end(s); s->total_time = end_time - s->start_time; transfer_time = s->total_time - s->setup_time; if (transfer_time) { s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; } + + migrate_set_state(&s->state, s->state, + MIGRATION_STATUS_COMPLETED); + bql_unlock(); } static void update_iteration_initial_status(MigrationState *s) @@ -3087,17 +3217,16 @@ typedef enum { */ static MigIterateState migration_iteration_run(MigrationState *s) { - uint64_t must_precopy, can_postcopy; + uint64_t must_precopy, can_postcopy, pending_size; Error *local_err = NULL; bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; bool can_switchover = migration_can_switchover(s); qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy); - uint64_t pending_size = must_precopy + can_postcopy; - + pending_size = must_precopy + can_postcopy; trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy); - if (must_precopy <= s->threshold_size) { + if (pending_size < s->threshold_size) { qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy); pending_size = must_precopy + can_postcopy; trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); @@ -3129,10 +3258,9 @@ static void migration_iteration_finish(MigrationState *s) /* If we enabled cpu throttling for auto-converge, turn it off. */ cpu_throttle_stop(); - qemu_mutex_lock_iothread(); + bql_lock(); switch (s->state) { case MIGRATION_STATUS_COMPLETED: - migration_calculate_complete(s); runstate_set(RUN_STATE_POSTMIGRATE); break; case MIGRATION_STATUS_COLO: @@ -3143,7 +3271,7 @@ static void migration_iteration_finish(MigrationState *s) case MIGRATION_STATUS_FAILED: case MIGRATION_STATUS_CANCELLED: case MIGRATION_STATUS_CANCELLING: - if (s->vm_old_state == RUN_STATE_RUNNING) { + if (runstate_is_live(s->vm_old_state)) { if (!runstate_check(RUN_STATE_SHUTDOWN)) { vm_start(); } @@ -3159,8 +3287,9 @@ static void migration_iteration_finish(MigrationState *s) error_report("%s: Unknown ending state %d", __func__, s->state); break; } - migrate_fd_cleanup_schedule(s); - qemu_mutex_unlock_iothread(); + + migration_bh_schedule(migrate_fd_cleanup_bh, s); + bql_unlock(); } static void bg_migration_iteration_finish(MigrationState *s) @@ -3172,12 +3301,9 @@ static void bg_migration_iteration_finish(MigrationState *s) */ ram_write_tracking_stop(); - qemu_mutex_lock_iothread(); + bql_lock(); switch (s->state) { case MIGRATION_STATUS_COMPLETED: - migration_calculate_complete(s); - break; - case MIGRATION_STATUS_ACTIVE: case MIGRATION_STATUS_FAILED: case MIGRATION_STATUS_CANCELLED: @@ -3190,8 +3316,8 @@ static void bg_migration_iteration_finish(MigrationState *s) break; } - migrate_fd_cleanup_schedule(s); - qemu_mutex_unlock_iothread(); + migration_bh_schedule(migrate_fd_cleanup_bh, s); + bql_unlock(); } /* @@ -3313,9 +3439,13 @@ static void *migration_thread(void *opaque) object_ref(OBJECT(s)); update_iteration_initial_status(s); - qemu_mutex_lock_iothread(); + if (!multifd_send_setup()) { + goto out; + } + + bql_lock(); qemu_savevm_state_header(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); /* * If we opened the return path, we need to make sure dst has it @@ -3343,9 +3473,9 @@ static void *migration_thread(void *opaque) qemu_savevm_send_colo_enable(s->to_dst_file); } - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_setup(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); @@ -3354,7 +3484,7 @@ static void *migration_thread(void *opaque) trace_migration_thread_setup_complete(); - while (migration_is_active(s)) { + while (migration_is_active()) { if (urgent || !migration_rate_exceeded(s->to_dst_file)) { MigIterateState iter_state = migration_iteration_run(s); if (iter_state == MIG_ITERATE_SKIP) { @@ -3384,6 +3514,7 @@ static void *migration_thread(void *opaque) urgent = migration_rate_limit(); } +out: trace_migration_thread_after_loop(); migration_iteration_finish(s); object_unref(OBJECT(s)); @@ -3396,10 +3527,7 @@ static void bg_migration_vm_start_bh(void *opaque) { MigrationState *s = opaque; - qemu_bh_delete(s->vm_start_bh); - s->vm_start_bh = NULL; - - vm_start(); + vm_resume(s->vm_old_state); migration_downtime_end(s); } @@ -3456,10 +3584,10 @@ static void *bg_migration_thread(void *opaque) ram_write_tracking_prepare(); #endif - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_header(s->to_dst_file); qemu_savevm_state_setup(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); @@ -3467,20 +3595,10 @@ static void *bg_migration_thread(void *opaque) s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; trace_migration_thread_setup_complete(); - migration_downtime_start(s); - - qemu_mutex_lock_iothread(); - /* - * If VM is currently in suspended state, then, to make a valid runstate - * transition in vm_stop_force_state() we need to wakeup it up. - */ - qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); - s->vm_old_state = runstate_get(); + bql_lock(); - global_state_store(); - /* Forcibly stop VM before saving state of vCPUs and devices */ - if (migration_stop_vm(RUN_STATE_PAUSED)) { + if (migration_stop_vm(s, RUN_STATE_PAUSED)) { goto fail; } /* @@ -3509,12 +3627,10 @@ static void *bg_migration_thread(void *opaque) * calling VM state change notifiers from vm_start() would initiate * writes to virtio VQs memory which is in write-protected region. */ - s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s); - qemu_bh_schedule(s->vm_start_bh); - - qemu_mutex_unlock_iothread(); + migration_bh_schedule(bg_migration_vm_start_bh, s); + bql_unlock(); - while (migration_is_active(s)) { + while (migration_is_active()) { MigIterateState iter_state = bg_migration_iteration_run(s); if (iter_state == MIG_ITERATE_SKIP) { continue; @@ -3541,7 +3657,7 @@ static void *bg_migration_thread(void *opaque) if (early_fail) { migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED); - qemu_mutex_unlock_iothread(); + bql_unlock(); } bg_migration_iteration_finish(s); @@ -3558,6 +3674,7 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) Error *local_err = NULL; uint64_t rate_limit; bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; + int ret; /* * If there's a previous error, free it and prepare for another one. @@ -3567,12 +3684,6 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) migrate_error_free(s); s->expected_downtime = migrate_downtime_limit(); - if (resume) { - assert(s->cleanup_bh); - } else { - assert(!s->cleanup_bh); - s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s); - } if (error_in) { migrate_fd_error(s, error_in); if (resume) { @@ -3598,7 +3709,9 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) rate_limit = migrate_max_bandwidth(); /* Notify before starting migration thread */ - migration_call_notifiers(s); + if (migration_call_notifiers(s, MIG_EVENT_PRECOPY_SETUP, &local_err)) { + goto fail; + } } migration_rate_set(rate_limit); @@ -3612,11 +3725,7 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) if (migrate_postcopy_ram() || migrate_return_path()) { if (open_return_path_on_source(s)) { error_setg(&local_err, "Unable to open return-path for postcopy"); - migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); - migrate_set_error(s, local_err); - error_report_err(local_err); - migrate_fd_cleanup(s); - return; + goto fail; } } @@ -3637,13 +3746,12 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) return; } - if (multifd_save_setup(&local_err) != 0) { - migrate_set_error(s, local_err); - error_report_err(local_err); - migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, - MIGRATION_STATUS_FAILED); - migrate_fd_cleanup(s); - return; + if (migrate_mode_is_cpr(s)) { + ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); + if (ret < 0) { + error_setg(&local_err, "migration_stop_vm failed, error %d", -ret); + goto fail; + } } if (migrate_background_snapshot()) { @@ -3654,6 +3762,13 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) migration_thread, s, QEMU_THREAD_JOINABLE); } s->migration_thread_running = true; + return; + +fail: + migrate_set_error(s, local_err); + migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); + error_report_err(local_err); + migrate_fd_cleanup(s); } static void migration_class_init(ObjectClass *klass, void *data) diff --git a/migration/migration.h b/migration/migration.h index cf2c9c88e01..8045e39c26f 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -26,6 +26,7 @@ #include "qom/object.h" #include "postcopy-ram.h" #include "sysemu/runstate.h" +#include "migration/misc.h" struct PostcopyBlocktimeContext; @@ -159,8 +160,6 @@ struct MigrationIncomingState { /* PostCopyFD's for external userfaultfds & handlers of shared memory */ GArray *postcopy_remote_fds; - QEMUBH *bh; - int state; /* @@ -255,8 +254,6 @@ struct MigrationState { /*< public >*/ QemuThread thread; - QEMUBH *vm_start_bh; - QEMUBH *cleanup_bh; /* Protected by qemu_file_lock */ QEMUFile *to_dst_file; /* Postcopy specific transfer channel */ @@ -296,7 +293,7 @@ struct MigrationState { * this threshold; it's calculated from the requested downtime and * measured bandwidth, or avail-switchover-bandwidth if specified. */ - int64_t threshold_size; + uint64_t threshold_size; /* params from 'migrate-set-parameters' */ MigrationParameters parameters; @@ -352,8 +349,6 @@ struct MigrationState { /* Flag set once the migration has been asked to enter postcopy */ bool start_postcopy; - /* Flag set after postcopy has sent the device state */ - bool postcopy_after_devices; /* Flag set once the migration thread is running (and needs joining) */ bool migration_thread_running; @@ -474,21 +469,19 @@ struct MigrationState { void migrate_set_state(int *state, int old_state, int new_state); -void migration_fd_process_incoming(QEMUFile *f, Error **errp); +void migration_fd_process_incoming(QEMUFile *f); void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp); void migration_incoming_process(void); bool migration_has_all_channels(void); -uint64_t migrate_max_downtime(void); - void migrate_set_error(MigrationState *s, const Error *error); bool migrate_has_error(MigrationState *s); void migrate_fd_connect(MigrationState *s, Error *error_in); -bool migration_is_setup_or_active(int state); -bool migration_is_running(int state); +int migration_call_notifiers(MigrationState *s, MigrationEventType type, + Error **errp); int migrate_init(MigrationState *s, Error **errp); bool migration_is_blocked(Error **errp); @@ -496,6 +489,8 @@ bool migration_is_blocked(Error **errp); bool migration_in_postcopy(void); bool migration_postcopy_is_alive(int state); MigrationState *migrate_get_current(void); +bool migration_has_failed(MigrationState *); +bool migrate_mode_is_cpr(MigrationState *); uint64_t ram_get_total_transferred_pages(void); @@ -530,6 +525,7 @@ int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque); void migration_make_urgent_request(void); void migration_consume_urgent_request(void); bool migration_rate_limit(void); +void migration_bh_schedule(QEMUBHFunc *cb, void *opaque); void migration_cancel(const Error *error); void migration_populate_vfio_info(MigrationInfo *info); @@ -548,6 +544,4 @@ int migration_rp_wait(MigrationState *s); */ void migration_rp_kick(MigrationState *s); -int migration_stop_vm(RunState state); - #endif diff --git a/migration/multifd-zero-page.c b/migration/multifd-zero-page.c new file mode 100644 index 00000000000..1ba38be6361 --- /dev/null +++ b/migration/multifd-zero-page.c @@ -0,0 +1,87 @@ +/* + * Multifd zero page detection implementation. + * + * Copyright (c) 2024 Bytedance Inc + * + * Authors: + * Hao Xiang + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "exec/ramblock.h" +#include "migration.h" +#include "multifd.h" +#include "options.h" +#include "ram.h" + +static bool multifd_zero_page_enabled(void) +{ + return migrate_zero_page_detection() == ZERO_PAGE_DETECTION_MULTIFD; +} + +static void swap_page_offset(ram_addr_t *pages_offset, int a, int b) +{ + ram_addr_t temp; + + if (a == b) { + return; + } + + temp = pages_offset[a]; + pages_offset[a] = pages_offset[b]; + pages_offset[b] = temp; +} + +/** + * multifd_send_zero_page_detect: Perform zero page detection on all pages. + * + * Sorts normal pages before zero pages in p->pages->offset and updates + * p->pages->normal_num. + * + * @param p A pointer to the send params. + */ +void multifd_send_zero_page_detect(MultiFDSendParams *p) +{ + MultiFDPages_t *pages = p->pages; + RAMBlock *rb = pages->block; + int i = 0; + int j = pages->num - 1; + + if (!multifd_zero_page_enabled()) { + pages->normal_num = pages->num; + return; + } + + /* + * Sort the page offset array by moving all normal pages to + * the left and all zero pages to the right of the array. + */ + while (i <= j) { + uint64_t offset = pages->offset[i]; + + if (!buffer_is_zero(rb->host + offset, p->page_size)) { + i++; + continue; + } + + swap_page_offset(pages->offset, i, j); + ram_release_page(rb->idstr, offset); + j--; + } + + pages->normal_num = i; +} + +void multifd_recv_zero_page_process(MultiFDRecvParams *p) +{ + for (int i = 0; i < p->zero_num; i++) { + void *page = p->host + p->zero[i]; + if (!buffer_is_zero(page, p->page_size)) { + memset(page, 0, p->page_size); + } + } +} diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c index 37ce48621e7..99821cd4d5e 100644 --- a/migration/multifd-zlib.c +++ b/migration/multifd-zlib.c @@ -69,13 +69,13 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) err_msg = "out of memory for buf"; goto err_free_zbuff; } - p->data = z; + p->compress_data = z; return 0; err_free_zbuff: g_free(z->zbuff); err_deflate_end: - deflateEnd(&z->zs); + deflateEnd(zs); err_free_z: g_free(z); error_setg(errp, "multifd %u: %s", p->id, err_msg); @@ -92,15 +92,15 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) */ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) { - struct zlib_data *z = p->data; + struct zlib_data *z = p->compress_data; deflateEnd(&z->zs); g_free(z->zbuff); z->zbuff = NULL; g_free(z->buf); z->buf = NULL; - g_free(p->data); - p->data = NULL; + g_free(p->compress_data); + p->compress_data = NULL; } /** @@ -116,17 +116,22 @@ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) */ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) { - struct zlib_data *z = p->data; + MultiFDPages_t *pages = p->pages; + struct zlib_data *z = p->compress_data; z_stream *zs = &z->zs; uint32_t out_size = 0; int ret; uint32_t i; - for (i = 0; i < p->normal_num; i++) { + if (!multifd_send_prepare_common(p)) { + goto out; + } + + for (i = 0; i < pages->normal_num; i++) { uint32_t available = z->zbuff_len - out_size; int flush = Z_NO_FLUSH; - if (i == p->normal_num - 1) { + if (i == pages->normal_num - 1) { flush = Z_SYNC_FLUSH; } @@ -135,7 +140,7 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) * with compression. zlib does not guarantee that this is safe, * therefore copy the page before calling deflate(). */ - memcpy(z->buf, p->pages->block->host + p->normal[i], p->page_size); + memcpy(z->buf, p->pages->block->host + pages->offset[i], p->page_size); zs->avail_in = p->page_size; zs->next_in = z->buf; @@ -169,8 +174,10 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) p->iov[p->iovs_num].iov_len = out_size; p->iovs_num++; p->next_packet_size = out_size; - p->flags |= MULTIFD_FLAG_ZLIB; +out: + p->flags |= MULTIFD_FLAG_ZLIB; + multifd_send_fill_packet(p); return 0; } @@ -189,7 +196,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp) struct zlib_data *z = g_new0(struct zlib_data, 1); z_stream *zs = &z->zs; - p->data = z; + p->compress_data = z; zs->zalloc = Z_NULL; zs->zfree = Z_NULL; zs->opaque = Z_NULL; @@ -219,17 +226,17 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp) */ static void zlib_recv_cleanup(MultiFDRecvParams *p) { - struct zlib_data *z = p->data; + struct zlib_data *z = p->compress_data; inflateEnd(&z->zs); g_free(z->zbuff); z->zbuff = NULL; - g_free(p->data); - p->data = NULL; + g_free(p->compress_data); + p->compress_data = NULL; } /** - * zlib_recv_pages: read the data from the channel into actual pages + * zlib_recv: read the data from the channel into actual pages * * Read the compressed buffer, and uncompress it into the actual * pages. @@ -239,9 +246,9 @@ static void zlib_recv_cleanup(MultiFDRecvParams *p) * @p: Params for the channel that we are using * @errp: pointer to an error */ -static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) +static int zlib_recv(MultiFDRecvParams *p, Error **errp) { - struct zlib_data *z = p->data; + struct zlib_data *z = p->compress_data; z_stream *zs = &z->zs; uint32_t in_size = p->next_packet_size; /* we measure the change of total_out */ @@ -256,6 +263,14 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) p->id, flags, MULTIFD_FLAG_ZLIB); return -1; } + + multifd_recv_zero_page_process(p); + + if (!p->normal_num) { + assert(in_size == 0); + return 0; + } + ret = qio_channel_read_all(p->c, (void *)z->zbuff, in_size, errp); if (ret != 0) { @@ -305,6 +320,7 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) p->id, out_size, expected_size); return -1; } + return 0; } @@ -314,7 +330,7 @@ static MultiFDMethods multifd_zlib_ops = { .send_prepare = zlib_send_prepare, .recv_setup = zlib_recv_setup, .recv_cleanup = zlib_recv_cleanup, - .recv_pages = zlib_recv_pages + .recv = zlib_recv }; static void multifd_zlib_register(void) diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c index b471daadcd0..02112255adc 100644 --- a/migration/multifd-zstd.c +++ b/migration/multifd-zstd.c @@ -52,7 +52,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) struct zstd_data *z = g_new0(struct zstd_data, 1); int res; - p->data = z; + p->compress_data = z; z->zcs = ZSTD_createCStream(); if (!z->zcs) { g_free(z); @@ -90,14 +90,14 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) */ static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp) { - struct zstd_data *z = p->data; + struct zstd_data *z = p->compress_data; ZSTD_freeCStream(z->zcs); z->zcs = NULL; g_free(z->zbuff); z->zbuff = NULL; - g_free(p->data); - p->data = NULL; + g_free(p->compress_data); + p->compress_data = NULL; } /** @@ -113,21 +113,26 @@ static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp) */ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) { - struct zstd_data *z = p->data; + MultiFDPages_t *pages = p->pages; + struct zstd_data *z = p->compress_data; int ret; uint32_t i; + if (!multifd_send_prepare_common(p)) { + goto out; + } + z->out.dst = z->zbuff; z->out.size = z->zbuff_len; z->out.pos = 0; - for (i = 0; i < p->normal_num; i++) { + for (i = 0; i < pages->normal_num; i++) { ZSTD_EndDirective flush = ZSTD_e_continue; - if (i == p->normal_num - 1) { + if (i == pages->normal_num - 1) { flush = ZSTD_e_flush; } - z->in.src = p->pages->block->host + p->normal[i]; + z->in.src = p->pages->block->host + pages->offset[i]; z->in.size = p->page_size; z->in.pos = 0; @@ -158,8 +163,10 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) p->iov[p->iovs_num].iov_len = z->out.pos; p->iovs_num++; p->next_packet_size = z->out.pos; - p->flags |= MULTIFD_FLAG_ZSTD; +out: + p->flags |= MULTIFD_FLAG_ZSTD; + multifd_send_fill_packet(p); return 0; } @@ -178,7 +185,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) struct zstd_data *z = g_new0(struct zstd_data, 1); int ret; - p->data = z; + p->compress_data = z; z->zds = ZSTD_createDStream(); if (!z->zds) { g_free(z); @@ -216,18 +223,18 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) */ static void zstd_recv_cleanup(MultiFDRecvParams *p) { - struct zstd_data *z = p->data; + struct zstd_data *z = p->compress_data; ZSTD_freeDStream(z->zds); z->zds = NULL; g_free(z->zbuff); z->zbuff = NULL; - g_free(p->data); - p->data = NULL; + g_free(p->compress_data); + p->compress_data = NULL; } /** - * zstd_recv_pages: read the data from the channel into actual pages + * zstd_recv: read the data from the channel into actual pages * * Read the compressed buffer, and uncompress it into the actual * pages. @@ -237,13 +244,13 @@ static void zstd_recv_cleanup(MultiFDRecvParams *p) * @p: Params for the channel that we are using * @errp: pointer to an error */ -static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp) +static int zstd_recv(MultiFDRecvParams *p, Error **errp) { uint32_t in_size = p->next_packet_size; uint32_t out_size = 0; uint32_t expected_size = p->normal_num * p->page_size; uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; - struct zstd_data *z = p->data; + struct zstd_data *z = p->compress_data; int ret; int i; @@ -252,6 +259,14 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp) p->id, flags, MULTIFD_FLAG_ZSTD); return -1; } + + multifd_recv_zero_page_process(p); + + if (!p->normal_num) { + assert(in_size == 0); + return 0; + } + ret = qio_channel_read_all(p->c, (void *)z->zbuff, in_size, errp); if (ret != 0) { @@ -305,7 +320,7 @@ static MultiFDMethods multifd_zstd_ops = { .send_prepare = zstd_send_prepare, .recv_setup = zstd_recv_setup, .recv_cleanup = zstd_recv_cleanup, - .recv_pages = zstd_recv_pages + .recv = zstd_recv }; static void multifd_zstd_register(void) diff --git a/migration/multifd.c b/migration/multifd.c index 409460684f2..2802afe79d0 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -11,13 +11,14 @@ */ #include "qemu/osdep.h" +#include "qemu/cutils.h" #include "qemu/rcu.h" #include "exec/target_page.h" #include "sysemu/sysemu.h" #include "exec/ramblock.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "ram.h" +#include "file.h" #include "migration.h" #include "migration-stats.h" #include "socket.h" @@ -28,6 +29,7 @@ #include "threadinfo.h" #include "options.h" #include "qemu/yank.h" +#include "io/channel-file.h" #include "io/channel-socket.h" #include "yank_functions.h" @@ -45,20 +47,96 @@ typedef struct { uint64_t unused2[4]; /* Reserved for future use */ } __attribute__((packed)) MultiFDInit_t; +struct { + MultiFDSendParams *params; + /* array of pages to sent */ + MultiFDPages_t *pages; + /* + * Global number of generated multifd packets. + * + * Note that we used 'uintptr_t' because it'll naturally support atomic + * operations on both 32bit / 64 bits hosts. It means on 32bit systems + * multifd will overflow the packet_num easier, but that should be + * fine. + * + * Another option is to use QEMU's Stat64 then it'll be 64 bits on all + * hosts, however so far it does not support atomic fetch_add() yet. + * Make it easy for now. + */ + uintptr_t packet_num; + /* + * Synchronization point past which no more channels will be + * created. + */ + QemuSemaphore channels_created; + /* send channels ready */ + QemuSemaphore channels_ready; + /* + * Have we already run terminate threads. There is a race when it + * happens that we got one error while we are exiting. + * We will use atomic operations. Only valid values are 0 and 1. + */ + int exiting; + /* multifd ops */ + MultiFDMethods *ops; +} *multifd_send_state; + +struct { + MultiFDRecvParams *params; + MultiFDRecvData *data; + /* number of created threads */ + int count; + /* + * This is always posted by the recv threads, the migration thread + * uses it to wait for recv threads to finish assigned tasks. + */ + QemuSemaphore sem_sync; + /* global number of generated multifd packets */ + uint64_t packet_num; + int exiting; + /* multifd ops */ + MultiFDMethods *ops; +} *multifd_recv_state; + +static bool multifd_use_packets(void) +{ + return !migrate_mapped_ram(); +} + +void multifd_send_channel_created(void) +{ + qemu_sem_post(&multifd_send_state->channels_created); +} + +static void multifd_set_file_bitmap(MultiFDSendParams *p) +{ + MultiFDPages_t *pages = p->pages; + + assert(pages->block); + + for (int i = 0; i < p->pages->normal_num; i++) { + ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], true); + } + + for (int i = p->pages->normal_num; i < p->pages->num; i++) { + ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], false); + } +} + /* Multifd without compression */ /** * nocomp_send_setup: setup send side * - * For no compression this function does nothing. - * - * Returns 0 for success or -1 for error - * * @p: Params for the channel that we are using * @errp: pointer to an error */ static int nocomp_send_setup(MultiFDSendParams *p, Error **errp) { + if (migrate_zero_copy_send()) { + p->write_flags |= QIO_CHANNEL_WRITE_FLAG_ZERO_COPY; + } + return 0; } @@ -75,6 +153,19 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp) return; } +static void multifd_send_prepare_iovs(MultiFDSendParams *p) +{ + MultiFDPages_t *pages = p->pages; + + for (int i = 0; i < pages->normal_num; i++) { + p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i]; + p->iov[p->iovs_num].iov_len = p->page_size; + p->iovs_num++; + } + + p->next_packet_size = pages->normal_num * p->page_size; +} + /** * nocomp_send_prepare: prepare date to be able to send * @@ -88,16 +179,40 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp) */ static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp) { - MultiFDPages_t *pages = p->pages; + bool use_zero_copy_send = migrate_zero_copy_send(); + int ret; - for (int i = 0; i < p->normal_num; i++) { - p->iov[p->iovs_num].iov_base = pages->block->host + p->normal[i]; - p->iov[p->iovs_num].iov_len = p->page_size; - p->iovs_num++; + multifd_send_zero_page_detect(p); + + if (!multifd_use_packets()) { + multifd_send_prepare_iovs(p); + multifd_set_file_bitmap(p); + + return 0; + } + + if (!use_zero_copy_send) { + /* + * Only !zerocopy needs the header in IOV; zerocopy will + * send it separately. + */ + multifd_send_prepare_header(p); } - p->next_packet_size = p->normal_num * p->page_size; + multifd_send_prepare_iovs(p); p->flags |= MULTIFD_FLAG_NOCOMP; + + multifd_send_fill_packet(p); + + if (use_zero_copy_send) { + /* Send header first, without zerocopy */ + ret = qio_channel_write_all(p->c, (void *)p->packet, + p->packet_len, errp); + if (ret != 0) { + return -1; + } + } + return 0; } @@ -128,7 +243,7 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p) } /** - * nocomp_recv_pages: read the data from the channel into actual pages + * nocomp_recv: read the data from the channel * * For no compression we just need to read things into the correct place. * @@ -137,15 +252,28 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p) * @p: Params for the channel that we are using * @errp: pointer to an error */ -static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp) +static int nocomp_recv(MultiFDRecvParams *p, Error **errp) { - uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; + uint32_t flags; + + if (!multifd_use_packets()) { + return multifd_file_recv_data(p, errp); + } + + flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; if (flags != MULTIFD_FLAG_NOCOMP) { error_setg(errp, "multifd %u: flags received %x flags expected %x", p->id, flags, MULTIFD_FLAG_NOCOMP); return -1; } + + multifd_recv_zero_page_process(p); + + if (!p->normal_num) { + return 0; + } + for (int i = 0; i < p->normal_num; i++) { p->iov[i].iov_base = p->host + p->normal[i]; p->iov[i].iov_len = p->page_size; @@ -159,7 +287,7 @@ static MultiFDMethods multifd_nocomp_ops = { .send_prepare = nocomp_send_prepare, .recv_setup = nocomp_recv_setup, .recv_cleanup = nocomp_recv_cleanup, - .recv_pages = nocomp_recv_pages + .recv = nocomp_recv }; static MultiFDMethods *multifd_ops[MULTIFD_COMPRESSION__MAX] = { @@ -172,6 +300,18 @@ void multifd_register_ops(int method, MultiFDMethods *ops) multifd_ops[method] = ops; } +/* Reset a MultiFDPages_t* object for the next use */ +static void multifd_pages_reset(MultiFDPages_t *pages) +{ + /* + * We don't need to touch offset[] array, because it will be + * overwritten later when reused. + */ + pages->num = 0; + pages->normal_num = 0; + pages->block = NULL; +} + static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp) { MultiFDInit_t msg = {}; @@ -228,56 +368,67 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp) } if (msg.id > migrate_multifd_channels()) { - error_setg(errp, "multifd: received channel version %u " - "expected %u", msg.version, MULTIFD_VERSION); + error_setg(errp, "multifd: received channel id %u is greater than " + "number of channels %u", msg.id, migrate_multifd_channels()); return -1; } return msg.id; } -static MultiFDPages_t *multifd_pages_init(size_t size) +static MultiFDPages_t *multifd_pages_init(uint32_t n) { MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1); - pages->allocated = size; - pages->offset = g_new0(ram_addr_t, size); + pages->allocated = n; + pages->offset = g_new0(ram_addr_t, n); return pages; } static void multifd_pages_clear(MultiFDPages_t *pages) { - pages->num = 0; + multifd_pages_reset(pages); pages->allocated = 0; - pages->packet_num = 0; - pages->block = NULL; g_free(pages->offset); pages->offset = NULL; g_free(pages); } -static void multifd_send_fill_packet(MultiFDSendParams *p) +void multifd_send_fill_packet(MultiFDSendParams *p) { MultiFDPacket_t *packet = p->packet; + MultiFDPages_t *pages = p->pages; + uint64_t packet_num; + uint32_t zero_num = pages->num - pages->normal_num; int i; packet->flags = cpu_to_be32(p->flags); packet->pages_alloc = cpu_to_be32(p->pages->allocated); - packet->normal_pages = cpu_to_be32(p->normal_num); + packet->normal_pages = cpu_to_be32(pages->normal_num); + packet->zero_pages = cpu_to_be32(zero_num); packet->next_packet_size = cpu_to_be32(p->next_packet_size); - packet->packet_num = cpu_to_be64(p->packet_num); - if (p->pages->block) { - strncpy(packet->ramblock, p->pages->block->idstr, 256); + packet_num = qatomic_fetch_inc(&multifd_send_state->packet_num); + packet->packet_num = cpu_to_be64(packet_num); + + if (pages->block) { + strncpy(packet->ramblock, pages->block->idstr, 256); } - for (i = 0; i < p->normal_num; i++) { + for (i = 0; i < pages->num; i++) { /* there are architectures where ram_addr_t is 32 bit */ - uint64_t temp = p->normal[i]; + uint64_t temp = pages->offset[i]; packet->offset[i] = cpu_to_be64(temp); } + + p->packets_sent++; + p->total_normal_pages += pages->normal_num; + p->total_zero_pages += zero_num; + + trace_multifd_send(p->id, packet_num, pages->normal_num, zero_num, + p->flags, p->next_packet_size); } static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) @@ -318,15 +469,29 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) p->normal_num = be32_to_cpu(packet->normal_pages); if (p->normal_num > packet->pages_alloc) { error_setg(errp, "multifd: received packet " - "with %u pages and expected maximum pages are %u", + "with %u normal pages and expected maximum pages are %u", p->normal_num, packet->pages_alloc) ; return -1; } + p->zero_num = be32_to_cpu(packet->zero_pages); + if (p->zero_num > packet->pages_alloc - p->normal_num) { + error_setg(errp, "multifd: received packet " + "with %u zero pages and expected maximum zero pages are %u", + p->zero_num, packet->pages_alloc - p->normal_num) ; + return -1; + } + p->next_packet_size = be32_to_cpu(packet->next_packet_size); p->packet_num = be64_to_cpu(packet->packet_num); + p->packets_recved++; + p->total_normal_pages += p->normal_num; + p->total_zero_pages += p->zero_num; + + trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->zero_num, + p->flags, p->next_packet_size); - if (p->normal_num == 0) { + if (p->normal_num == 0 && p->zero_num == 0) { return 0; } @@ -352,26 +517,42 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) p->normal[i] = offset; } + for (i = 0; i < p->zero_num; i++) { + uint64_t offset = be64_to_cpu(packet->offset[p->normal_num + i]); + + if (offset > (p->block->used_length - p->page_size)) { + error_setg(errp, "multifd: offset too long %" PRIu64 + " (max " RAM_ADDR_FMT ")", + offset, p->block->used_length); + return -1; + } + p->zero[i] = offset; + } + return 0; } -struct { - MultiFDSendParams *params; - /* array of pages to sent */ - MultiFDPages_t *pages; - /* global number of generated multifd packets */ - uint64_t packet_num; - /* send channels ready */ - QemuSemaphore channels_ready; - /* - * Have we already run terminate threads. There is a race when it - * happens that we got one error while we are exiting. - * We will use atomic operations. Only valid values are 0 and 1. - */ - int exiting; - /* multifd ops */ - MultiFDMethods *ops; -} *multifd_send_state; +static bool multifd_send_should_exit(void) +{ + return qatomic_read(&multifd_send_state->exiting); +} + +static bool multifd_recv_should_exit(void) +{ + return qatomic_read(&multifd_recv_state->exiting); +} + +/* + * The migration thread can wait on either of the two semaphores. This + * function can be used to kick the main thread out of waiting on either of + * them. Should mostly only be called when something wrong happened with + * the current multifd send thread. + */ +static void multifd_send_kick_main(MultiFDSendParams *p) +{ + qemu_sem_post(&p->sem_sync); + qemu_sem_post(&multifd_send_state->channels_ready); +} /* * How we use multifd_send_state->pages and channel->pages? @@ -389,20 +570,23 @@ struct { * thread is using the channel mutex when changing it, and the channel * have to had finish with its own, otherwise pending_job can't be * false. + * + * Returns true if succeed, false otherwise. */ - -static int multifd_send_pages(QEMUFile *f) +static bool multifd_send_pages(void) { int i; static int next_channel; MultiFDSendParams *p = NULL; /* make happy gcc */ MultiFDPages_t *pages = multifd_send_state->pages; - if (qatomic_read(&multifd_send_state->exiting)) { - return -1; + if (multifd_send_should_exit()) { + return false; } + /* We wait here, until at least one channel is ready */ qemu_sem_wait(&multifd_send_state->channels_ready); + /* * next_channel can remain from a previous migration that was * using more channels, so ensure it doesn't overflow if the @@ -410,69 +594,100 @@ static int multifd_send_pages(QEMUFile *f) */ next_channel %= migrate_multifd_channels(); for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) { - p = &multifd_send_state->params[i]; - - qemu_mutex_lock(&p->mutex); - if (p->quit) { - error_report("%s: channel %d has already quit!", __func__, i); - qemu_mutex_unlock(&p->mutex); - return -1; + if (multifd_send_should_exit()) { + return false; } - if (!p->pending_job) { - p->pending_job++; + p = &multifd_send_state->params[i]; + /* + * Lockless read to p->pending_job is safe, because only multifd + * sender thread can clear it. + */ + if (qatomic_read(&p->pending_job) == false) { next_channel = (i + 1) % migrate_multifd_channels(); break; } - qemu_mutex_unlock(&p->mutex); } - assert(!p->pages->num); - assert(!p->pages->block); - p->packet_num = multifd_send_state->packet_num++; + /* + * Make sure we read p->pending_job before all the rest. Pairs with + * qatomic_store_release() in multifd_send_thread(). + */ + smp_mb_acquire(); + assert(!p->pages->num); multifd_send_state->pages = p->pages; p->pages = pages; - qemu_mutex_unlock(&p->mutex); + /* + * Making sure p->pages is setup before marking pending_job=true. Pairs + * with the qatomic_load_acquire() in multifd_send_thread(). + */ + qatomic_store_release(&p->pending_job, true); qemu_sem_post(&p->sem); - return 1; + return true; } -int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset) +static inline bool multifd_queue_empty(MultiFDPages_t *pages) { - MultiFDPages_t *pages = multifd_send_state->pages; - bool changed = false; + return pages->num == 0; +} - if (!pages->block) { - pages->block = block; - } +static inline bool multifd_queue_full(MultiFDPages_t *pages) +{ + return pages->num == pages->allocated; +} - if (pages->block == block) { - pages->offset[pages->num] = offset; - pages->num++; +static inline void multifd_enqueue(MultiFDPages_t *pages, ram_addr_t offset) +{ + pages->offset[pages->num++] = offset; +} - if (pages->num < pages->allocated) { - return 1; - } - } else { - changed = true; - } +/* Returns true if enqueue successful, false otherwise */ +bool multifd_queue_page(RAMBlock *block, ram_addr_t offset) +{ + MultiFDPages_t *pages; - if (multifd_send_pages(f) < 0) { - return -1; +retry: + pages = multifd_send_state->pages; + + /* If the queue is empty, we can already enqueue now */ + if (multifd_queue_empty(pages)) { + pages->block = block; + multifd_enqueue(pages, offset); + return true; } - if (changed) { - return multifd_queue_page(f, block, offset); + /* + * Not empty, meanwhile we need a flush. It can because of either: + * + * (1) The page is not on the same ramblock of previous ones, or, + * (2) The queue is full. + * + * After flush, always retry. + */ + if (pages->block != block || multifd_queue_full(pages)) { + if (!multifd_send_pages()) { + return false; + } + goto retry; } - return 1; + /* Not empty, and we still have space, do it! */ + multifd_enqueue(pages, offset); + return true; } -static void multifd_send_terminate_threads(Error *err) +/* Multifd send side hit an error; remember it and prepare to quit */ +static void multifd_send_set_error(Error *err) { - int i; - - trace_multifd_send_terminate_threads(err != NULL); + /* + * We don't want to exit each threads twice. Depending on where + * we get the error, or if there are two independent errors in two + * threads at the same time, we can end calling this function + * twice. + */ + if (qatomic_xchg(&multifd_send_state->exiting, 1)) { + return; + } if (err) { MigrationState *s = migrate_get_current(); @@ -485,79 +700,100 @@ static void multifd_send_terminate_threads(Error *err) MIGRATION_STATUS_FAILED); } } +} + +static void multifd_send_terminate_threads(void) +{ + int i; + + trace_multifd_send_terminate_threads(); /* - * We don't want to exit each threads twice. Depending on where - * we get the error, or if there are two independent errors in two - * threads at the same time, we can end calling this function - * twice. + * Tell everyone we're quitting. No xchg() needed here; we simply + * always set it. */ - if (qatomic_xchg(&multifd_send_state->exiting, 1)) { - return; - } + qatomic_set(&multifd_send_state->exiting, 1); + /* + * Firstly, kick all threads out; no matter whether they are just idle, + * or blocked in an IO system call. + */ for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; - qemu_mutex_lock(&p->mutex); - p->quit = true; qemu_sem_post(&p->sem); if (p->c) { qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); } - qemu_mutex_unlock(&p->mutex); } -} - -static int multifd_send_channel_destroy(QIOChannel *send) -{ - return socket_send_channel_destroy(send); -} - -void multifd_save_cleanup(void) -{ - int i; - if (!migrate_multifd()) { - return; - } - multifd_send_terminate_threads(NULL); + /* + * Finally recycle all the threads. + */ for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; - if (p->running) { + if (p->tls_thread_created) { + qemu_thread_join(&p->tls_thread); + } + + if (p->thread_created) { qemu_thread_join(&p->thread); } } - for (i = 0; i < migrate_multifd_channels(); i++) { - MultiFDSendParams *p = &multifd_send_state->params[i]; - Error *local_err = NULL; +} - if (p->registered_yank) { - migration_ioc_unregister_yank(p->c); - } - multifd_send_channel_destroy(p->c); +static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp) +{ + if (p->c) { + migration_ioc_unregister_yank(p->c); + /* + * The object_unref() cannot guarantee the fd will always be + * released because finalize() of the iochannel is only + * triggered on the last reference and it's not guaranteed + * that we always hold the last refcount when reaching here. + * + * Closing the fd explicitly has the benefit that if there is any + * registered I/O handler callbacks on such fd, that will get a + * POLLNVAL event and will further trigger the cleanup to finally + * release the IOC. + * + * FIXME: It should logically be guaranteed that all multifd + * channels have no I/O handler callback registered when reaching + * here, because migration thread will wait for all multifd channel + * establishments to complete during setup. Since + * migrate_fd_cleanup() will be scheduled in main thread too, all + * previous callbacks should guarantee to be completed when + * reaching here. See multifd_send_state.channels_created and its + * usage. In the future, we could replace this with an assert + * making sure we're the last reference, or simply drop it if above + * is more clear to be justified. + */ + qio_channel_close(p->c, &error_abort); + object_unref(OBJECT(p->c)); p->c = NULL; - qemu_mutex_destroy(&p->mutex); - qemu_sem_destroy(&p->sem); - qemu_sem_destroy(&p->sem_sync); - g_free(p->name); - p->name = NULL; - multifd_pages_clear(p->pages); - p->pages = NULL; - p->packet_len = 0; - g_free(p->packet); - p->packet = NULL; - g_free(p->iov); - p->iov = NULL; - g_free(p->normal); - p->normal = NULL; - multifd_send_state->ops->send_cleanup(p, &local_err); - if (local_err) { - migrate_set_error(migrate_get_current(), local_err); - error_free(local_err); - } } + qemu_sem_destroy(&p->sem); + qemu_sem_destroy(&p->sem_sync); + g_free(p->name); + p->name = NULL; + multifd_pages_clear(p->pages); + p->pages = NULL; + p->packet_len = 0; + g_free(p->packet); + p->packet = NULL; + g_free(p->iov); + p->iov = NULL; + multifd_send_state->ops->send_cleanup(p, errp); + + return *errp == NULL; +} + +static void multifd_send_cleanup_state(void) +{ + file_cleanup_outgoing_migration(); + socket_cleanup_outgoing_migration(); + qemu_sem_destroy(&multifd_send_state->channels_created); qemu_sem_destroy(&multifd_send_state->channels_ready); g_free(multifd_send_state->params); multifd_send_state->params = NULL; @@ -567,14 +803,37 @@ void multifd_save_cleanup(void) multifd_send_state = NULL; } -static int multifd_zero_copy_flush(QIOChannel *c) +void multifd_send_shutdown(void) { - int ret; - Error *err = NULL; + int i; - ret = qio_channel_flush(c, &err); - if (ret < 0) { - error_report_err(err); + if (!migrate_multifd()) { + return; + } + + multifd_send_terminate_threads(); + + for (i = 0; i < migrate_multifd_channels(); i++) { + MultiFDSendParams *p = &multifd_send_state->params[i]; + Error *local_err = NULL; + + if (!multifd_send_cleanup_channel(p, &local_err)) { + migrate_set_error(migrate_get_current(), local_err); + error_free(local_err); + } + } + + multifd_send_cleanup_state(); +} + +static int multifd_zero_copy_flush(QIOChannel *c) +{ + int ret; + Error *err = NULL; + + ret = qio_channel_flush(c, &err); + if (ret < 0) { + error_report_err(err); return -1; } if (ret == 1) { @@ -584,7 +843,7 @@ static int multifd_zero_copy_flush(QIOChannel *c) return ret; } -int multifd_send_sync_main(QEMUFile *f) +int multifd_send_sync_main(void) { int i; bool flush_zero_copy; @@ -593,47 +852,38 @@ int multifd_send_sync_main(QEMUFile *f) return 0; } if (multifd_send_state->pages->num) { - if (multifd_send_pages(f) < 0) { + if (!multifd_send_pages()) { error_report("%s: multifd_send_pages fail", __func__); return -1; } } - /* - * When using zero-copy, it's necessary to flush the pages before any of - * the pages can be sent again, so we'll make sure the new version of the - * pages will always arrive _later_ than the old pages. - * - * Currently we achieve this by flushing the zero-page requested writes - * per ram iteration, but in the future we could potentially optimize it - * to be less frequent, e.g. only after we finished one whole scanning of - * all the dirty bitmaps. - */ - flush_zero_copy = migrate_zero_copy_send(); for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; - trace_multifd_send_sync_main_signal(p->id); - - qemu_mutex_lock(&p->mutex); - - if (p->quit) { - error_report("%s: channel %d has already quit", __func__, i); - qemu_mutex_unlock(&p->mutex); + if (multifd_send_should_exit()) { return -1; } - p->packet_num = multifd_send_state->packet_num++; - p->flags |= MULTIFD_FLAG_SYNC; - p->pending_job++; - qemu_mutex_unlock(&p->mutex); + trace_multifd_send_sync_main_signal(p->id); + + /* + * We should be the only user so far, so not possible to be set by + * others concurrently. + */ + assert(qatomic_read(&p->pending_sync) == false); + qatomic_set(&p->pending_sync, true); qemu_sem_post(&p->sem); } for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; + if (multifd_send_should_exit()) { + return -1; + } + qemu_sem_wait(&multifd_send_state->channels_ready); trace_multifd_send_sync_main_wait(p->id); qemu_sem_wait(&p->sem_sync); @@ -653,96 +903,93 @@ static void *multifd_send_thread(void *opaque) MigrationThread *thread = NULL; Error *local_err = NULL; int ret = 0; - bool use_zero_copy_send = migrate_zero_copy_send(); + bool use_packets = multifd_use_packets(); thread = migration_threads_add(p->name, qemu_get_thread_id()); trace_multifd_send_thread_start(p->id); rcu_register_thread(); - if (multifd_send_initial_packet(p, &local_err) < 0) { - ret = -1; - goto out; + if (use_packets) { + if (multifd_send_initial_packet(p, &local_err) < 0) { + ret = -1; + goto out; + } } - /* initial packet */ - p->num_packets = 1; while (true) { qemu_sem_post(&multifd_send_state->channels_ready); qemu_sem_wait(&p->sem); - if (qatomic_read(&multifd_send_state->exiting)) { + if (multifd_send_should_exit()) { break; } - qemu_mutex_lock(&p->mutex); - - if (p->pending_job) { - uint64_t packet_num = p->packet_num; - uint32_t flags; - p->normal_num = 0; - if (use_zero_copy_send) { - p->iovs_num = 0; - } else { - p->iovs_num = 1; - } + /* + * Read pending_job flag before p->pages. Pairs with the + * qatomic_store_release() in multifd_send_pages(). + */ + if (qatomic_load_acquire(&p->pending_job)) { + MultiFDPages_t *pages = p->pages; - for (int i = 0; i < p->pages->num; i++) { - p->normal[p->normal_num] = p->pages->offset[i]; - p->normal_num++; - } + p->iovs_num = 0; + assert(pages->num); - if (p->normal_num) { - ret = multifd_send_state->ops->send_prepare(p, &local_err); - if (ret != 0) { - qemu_mutex_unlock(&p->mutex); - break; - } + ret = multifd_send_state->ops->send_prepare(p, &local_err); + if (ret != 0) { + break; } - multifd_send_fill_packet(p); - flags = p->flags; - p->flags = 0; - p->num_packets++; - p->total_normal_pages += p->normal_num; - p->pages->num = 0; - p->pages->block = NULL; - qemu_mutex_unlock(&p->mutex); - trace_multifd_send(p->id, packet_num, p->normal_num, flags, - p->next_packet_size); - - if (use_zero_copy_send) { - /* Send header first, without zerocopy */ - ret = qio_channel_write_all(p->c, (void *)p->packet, - p->packet_len, &local_err); - if (ret != 0) { - break; - } + if (migrate_mapped_ram()) { + ret = file_write_ramblock_iov(p->c, p->iov, p->iovs_num, + p->pages->block, &local_err); } else { - /* Send header using the same writev call */ - p->iov[0].iov_len = p->packet_len; - p->iov[0].iov_base = p->packet; + ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, + NULL, 0, p->write_flags, + &local_err); } - ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL, - 0, p->write_flags, &local_err); if (ret != 0) { break; } stat64_add(&mig_stats.multifd_bytes, p->next_packet_size + p->packet_len); + stat64_add(&mig_stats.normal_pages, pages->normal_num); + stat64_add(&mig_stats.zero_pages, pages->num - pages->normal_num); + + multifd_pages_reset(p->pages); p->next_packet_size = 0; - qemu_mutex_lock(&p->mutex); - p->pending_job--; - qemu_mutex_unlock(&p->mutex); - if (flags & MULTIFD_FLAG_SYNC) { - qemu_sem_post(&p->sem_sync); - } + /* + * Making sure p->pages is published before saying "we're + * free". Pairs with the smp_mb_acquire() in + * multifd_send_pages(). + */ + qatomic_store_release(&p->pending_job, false); } else { - qemu_mutex_unlock(&p->mutex); - /* sometimes there are spurious wakeups */ + /* + * If not a normal job, must be a sync request. Note that + * pending_sync is a standalone flag (unlike pending_job), so + * it doesn't require explicit memory barriers. + */ + assert(qatomic_read(&p->pending_sync)); + + if (use_packets) { + p->flags = MULTIFD_FLAG_SYNC; + multifd_send_fill_packet(p); + ret = qio_channel_write_all(p->c, (void *)p->packet, + p->packet_len, &local_err); + if (ret != 0) { + break; + } + /* p->next_packet_size will always be zero for a SYNC packet */ + stat64_add(&mig_stats.multifd_bytes, p->packet_len); + p->flags = 0; + } + + qatomic_set(&p->pending_sync, false); + qemu_sem_post(&p->sem_sync); } } @@ -750,62 +997,37 @@ static void *multifd_send_thread(void *opaque) if (ret) { assert(local_err); trace_multifd_send_error(p->id); - multifd_send_terminate_threads(local_err); - qemu_sem_post(&p->sem_sync); - qemu_sem_post(&multifd_send_state->channels_ready); + multifd_send_set_error(local_err); + multifd_send_kick_main(p); error_free(local_err); } - qemu_mutex_lock(&p->mutex); - p->running = false; - qemu_mutex_unlock(&p->mutex); - rcu_unregister_thread(); migration_threads_remove(thread); - trace_multifd_send_thread_end(p->id, p->num_packets, p->total_normal_pages); + trace_multifd_send_thread_end(p->id, p->packets_sent, p->total_normal_pages, + p->total_zero_pages); return NULL; } -static bool multifd_channel_connect(MultiFDSendParams *p, - QIOChannel *ioc, - Error **errp); - -static void multifd_tls_outgoing_handshake(QIOTask *task, - gpointer opaque) -{ - MultiFDSendParams *p = opaque; - QIOChannel *ioc = QIO_CHANNEL(qio_task_get_source(task)); - Error *err = NULL; - - if (!qio_task_propagate_error(task, &err)) { - trace_multifd_tls_outgoing_handshake_complete(ioc); - if (multifd_channel_connect(p, ioc, &err)) { - return; - } - } - - trace_multifd_tls_outgoing_handshake_error(ioc, error_get_pretty(err)); +static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque); - /* - * Error happen, mark multifd_send_thread status as 'quit' although it - * is not created, and then tell who pay attention to me. - */ - p->quit = true; - qemu_sem_post(&multifd_send_state->channels_ready); - qemu_sem_post(&p->sem_sync); -} +typedef struct { + MultiFDSendParams *p; + QIOChannelTLS *tioc; +} MultiFDTLSThreadArgs; static void *multifd_tls_handshake_thread(void *opaque) { - MultiFDSendParams *p = opaque; - QIOChannelTLS *tioc = QIO_CHANNEL_TLS(p->c); + MultiFDTLSThreadArgs *args = opaque; - qio_channel_tls_handshake(tioc, - multifd_tls_outgoing_handshake, - p, + qio_channel_tls_handshake(args->tioc, + multifd_new_send_channel_async, + args->p, NULL, NULL); + g_free(args); + return NULL; } @@ -815,6 +1037,7 @@ static bool multifd_tls_channel_connect(MultiFDSendParams *p, { MigrationState *s = migrate_get_current(); const char *hostname = s->hostname; + MultiFDTLSThreadArgs *args; QIOChannelTLS *tioc; tioc = migration_tls_client_create(ioc, hostname, errp); @@ -822,97 +1045,121 @@ static bool multifd_tls_channel_connect(MultiFDSendParams *p, return false; } + /* + * Ownership of the socket channel now transfers to the newly + * created TLS channel, which has already taken a reference. + */ object_unref(OBJECT(ioc)); trace_multifd_tls_outgoing_handshake_start(ioc, tioc, hostname); qio_channel_set_name(QIO_CHANNEL(tioc), "multifd-tls-outgoing"); - p->c = QIO_CHANNEL(tioc); - qemu_thread_create(&p->thread, "multifd-tls-handshake-worker", - multifd_tls_handshake_thread, p, + + args = g_new0(MultiFDTLSThreadArgs, 1); + args->tioc = tioc; + args->p = p; + + p->tls_thread_created = true; + qemu_thread_create(&p->tls_thread, "multifd-tls-handshake-worker", + multifd_tls_handshake_thread, args, QEMU_THREAD_JOINABLE); return true; } -static bool multifd_channel_connect(MultiFDSendParams *p, - QIOChannel *ioc, - Error **errp) +void multifd_channel_connect(MultiFDSendParams *p, QIOChannel *ioc) { - trace_multifd_set_outgoing_channel( - ioc, object_get_typename(OBJECT(ioc)), - migrate_get_current()->hostname); - - if (migrate_channel_requires_tls_upgrade(ioc)) { - /* - * tls_channel_connect will call back to this - * function after the TLS handshake, - * so we mustn't call multifd_send_thread until then - */ - return multifd_tls_channel_connect(p, ioc, errp); + qio_channel_set_delay(ioc, false); - } else { - migration_ioc_register_yank(ioc); - p->registered_yank = true; - p->c = ioc; - qemu_thread_create(&p->thread, p->name, multifd_send_thread, p, - QEMU_THREAD_JOINABLE); - } - return true; -} + migration_ioc_register_yank(ioc); + /* Setup p->c only if the channel is completely setup */ + p->c = ioc; -static void multifd_new_send_channel_cleanup(MultiFDSendParams *p, - QIOChannel *ioc, Error *err) -{ - migrate_set_error(migrate_get_current(), err); - /* Error happen, we need to tell who pay attention to me */ - qemu_sem_post(&multifd_send_state->channels_ready); - qemu_sem_post(&p->sem_sync); - /* - * Although multifd_send_thread is not created, but main migration - * thread need to judge whether it is running, so we need to mark - * its status. - */ - p->quit = true; - object_unref(OBJECT(ioc)); - error_free(err); + p->thread_created = true; + qemu_thread_create(&p->thread, p->name, multifd_send_thread, p, + QEMU_THREAD_JOINABLE); } +/* + * When TLS is enabled this function is called once to establish the + * TLS connection and a second time after the TLS handshake to create + * the multifd channel. Without TLS it goes straight into the channel + * creation. + */ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque) { MultiFDSendParams *p = opaque; QIOChannel *ioc = QIO_CHANNEL(qio_task_get_source(task)); Error *local_err = NULL; + bool ret; trace_multifd_new_send_channel_async(p->id); - if (!qio_task_propagate_error(task, &local_err)) { - qio_channel_set_delay(ioc, false); - p->running = true; - if (multifd_channel_connect(p, ioc, &local_err)) { + + if (qio_task_propagate_error(task, &local_err)) { + ret = false; + goto out; + } + + trace_multifd_set_outgoing_channel(ioc, object_get_typename(OBJECT(ioc)), + migrate_get_current()->hostname); + + if (migrate_channel_requires_tls_upgrade(ioc)) { + ret = multifd_tls_channel_connect(p, ioc, &local_err); + if (ret) { return; } + } else { + multifd_channel_connect(p, ioc); + ret = true; + } + +out: + /* + * Here we're not interested whether creation succeeded, only that + * it happened at all. + */ + multifd_send_channel_created(); + + if (ret) { + return; } trace_multifd_new_send_channel_async_error(p->id, local_err); - multifd_new_send_channel_cleanup(p, ioc, local_err); + multifd_send_set_error(local_err); + /* + * For error cases (TLS or non-TLS), IO channel is always freed here + * rather than when cleanup multifd: since p->c is not set, multifd + * cleanup code doesn't even know its existence. + */ + object_unref(OBJECT(ioc)); + error_free(local_err); } -static void multifd_new_send_channel_create(gpointer opaque) +static bool multifd_new_send_channel_create(gpointer opaque, Error **errp) { + if (!multifd_use_packets()) { + return file_send_channel_create(opaque, errp); + } + socket_send_channel_create(multifd_new_send_channel_async, opaque); + return true; } -int multifd_save_setup(Error **errp) +bool multifd_send_setup(void) { - int thread_count; + MigrationState *s = migrate_get_current(); + Error *local_err = NULL; + int thread_count, ret = 0; uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); + bool use_packets = multifd_use_packets(); uint8_t i; if (!migrate_multifd()) { - return 0; + return true; } thread_count = migrate_multifd_channels(); multifd_send_state = g_malloc0(sizeof(*multifd_send_state)); multifd_send_state->params = g_new0(MultiFDSendParams, thread_count); multifd_send_state->pages = multifd_pages_init(page_count); + qemu_sem_init(&multifd_send_state->channels_created, 0); qemu_sem_init(&multifd_send_state->channels_ready, 0); qatomic_set(&multifd_send_state->exiting, 0); multifd_send_state->ops = multifd_ops[migrate_multifd_compression()]; @@ -920,59 +1167,112 @@ int multifd_save_setup(Error **errp) for (i = 0; i < thread_count; i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; - qemu_mutex_init(&p->mutex); qemu_sem_init(&p->sem, 0); qemu_sem_init(&p->sem_sync, 0); - p->quit = false; - p->pending_job = 0; p->id = i; p->pages = multifd_pages_init(page_count); - p->packet_len = sizeof(MultiFDPacket_t) - + sizeof(uint64_t) * page_count; - p->packet = g_malloc0(p->packet_len); - p->packet->magic = cpu_to_be32(MULTIFD_MAGIC); - p->packet->version = cpu_to_be32(MULTIFD_VERSION); + + if (use_packets) { + p->packet_len = sizeof(MultiFDPacket_t) + + sizeof(uint64_t) * page_count; + p->packet = g_malloc0(p->packet_len); + p->packet->magic = cpu_to_be32(MULTIFD_MAGIC); + p->packet->version = cpu_to_be32(MULTIFD_VERSION); + + /* We need one extra place for the packet header */ + p->iov = g_new0(struct iovec, page_count + 1); + } else { + p->iov = g_new0(struct iovec, page_count); + } p->name = g_strdup_printf("multifdsend_%d", i); - /* We need one extra place for the packet header */ - p->iov = g_new0(struct iovec, page_count + 1); - p->normal = g_new0(ram_addr_t, page_count); p->page_size = qemu_target_page_size(); p->page_count = page_count; + p->write_flags = 0; - if (migrate_zero_copy_send()) { - p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZERO_COPY; - } else { - p->write_flags = 0; + if (!multifd_new_send_channel_create(p, &local_err)) { + return false; } + } - multifd_new_send_channel_create(p); + /* + * Wait until channel creation has started for all channels. The + * creation can still fail, but no more channels will be created + * past this point. + */ + for (i = 0; i < thread_count; i++) { + qemu_sem_wait(&multifd_send_state->channels_created); } for (i = 0; i < thread_count; i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; - Error *local_err = NULL; - int ret; ret = multifd_send_state->ops->send_setup(p, &local_err); if (ret) { - error_propagate(errp, local_err); - return ret; + break; } } - return 0; + + if (ret) { + migrate_set_error(s, local_err); + error_report_err(local_err); + migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, + MIGRATION_STATUS_FAILED); + return false; + } + + return true; } -struct { - MultiFDRecvParams *params; - /* number of created threads */ - int count; - /* syncs main thread and channels */ - QemuSemaphore sem_sync; - /* global number of generated multifd packets */ - uint64_t packet_num; - /* multifd ops */ - MultiFDMethods *ops; -} *multifd_recv_state; +bool multifd_recv(void) +{ + int i; + static int next_recv_channel; + MultiFDRecvParams *p = NULL; + MultiFDRecvData *data = multifd_recv_state->data; + + /* + * next_channel can remain from a previous migration that was + * using more channels, so ensure it doesn't overflow if the + * limit is lower now. + */ + next_recv_channel %= migrate_multifd_channels(); + for (i = next_recv_channel;; i = (i + 1) % migrate_multifd_channels()) { + if (multifd_recv_should_exit()) { + return false; + } + + p = &multifd_recv_state->params[i]; + + if (qatomic_read(&p->pending_job) == false) { + next_recv_channel = (i + 1) % migrate_multifd_channels(); + break; + } + } + + /* + * Order pending_job read before manipulating p->data below. Pairs + * with qatomic_store_release() at multifd_recv_thread(). + */ + smp_mb_acquire(); + + assert(!p->data->size); + multifd_recv_state->data = p->data; + p->data = data; + + /* + * Order p->data update before setting pending_job. Pairs with + * qatomic_load_acquire() at multifd_recv_thread(). + */ + qatomic_store_release(&p->pending_job, true); + qemu_sem_post(&p->sem); + + return true; +} + +MultiFDRecvData *multifd_get_recv_data(void) +{ + return multifd_recv_state->data; +} static void multifd_recv_terminate_threads(Error *err) { @@ -980,6 +1280,10 @@ static void multifd_recv_terminate_threads(Error *err) trace_multifd_recv_terminate_threads(err != NULL); + if (qatomic_xchg(&multifd_recv_state->exiting, 1)) { + return; + } + if (err) { MigrationState *s = migrate_get_current(); migrate_set_error(s, err); @@ -993,8 +1297,29 @@ static void multifd_recv_terminate_threads(Error *err) for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDRecvParams *p = &multifd_recv_state->params[i]; - qemu_mutex_lock(&p->mutex); - p->quit = true; + /* + * The migration thread and channels interact differently + * depending on the presence of packets. + */ + if (multifd_use_packets()) { + /* + * The channel receives as long as there are packets. When + * packets end (i.e. MULTIFD_FLAG_SYNC is reached), the + * channel waits for the migration thread to sync. If the + * sync never happens, do it here. + */ + qemu_sem_post(&p->sem_sync); + } else { + /* + * The channel waits for the migration thread to give it + * work. When the migration thread runs out of work, it + * releases the channel and waits for any pending work to + * finish. If we reach here (e.g. due to error) before the + * work runs out, release the channel. + */ + qemu_sem_post(&p->sem); + } + /* * We could arrive here for two reasons: * - normal quit, i.e. everything went fine, just finished @@ -1004,18 +1329,50 @@ static void multifd_recv_terminate_threads(Error *err) if (p->c) { qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); } - qemu_mutex_unlock(&p->mutex); } } -void multifd_load_shutdown(void) +void multifd_recv_shutdown(void) { if (migrate_multifd()) { multifd_recv_terminate_threads(NULL); } } -void multifd_load_cleanup(void) +static void multifd_recv_cleanup_channel(MultiFDRecvParams *p) +{ + migration_ioc_unregister_yank(p->c); + object_unref(OBJECT(p->c)); + p->c = NULL; + qemu_mutex_destroy(&p->mutex); + qemu_sem_destroy(&p->sem_sync); + qemu_sem_destroy(&p->sem); + g_free(p->name); + p->name = NULL; + p->packet_len = 0; + g_free(p->packet); + p->packet = NULL; + g_free(p->iov); + p->iov = NULL; + g_free(p->normal); + p->normal = NULL; + g_free(p->zero); + p->zero = NULL; + multifd_recv_state->ops->recv_cleanup(p); +} + +static void multifd_recv_cleanup_state(void) +{ + qemu_sem_destroy(&multifd_recv_state->sem_sync); + g_free(multifd_recv_state->params); + multifd_recv_state->params = NULL; + g_free(multifd_recv_state->data); + multifd_recv_state->data = NULL; + g_free(multifd_recv_state); + multifd_recv_state = NULL; +} + +void multifd_recv_cleanup(void) { int i; @@ -1026,56 +1383,65 @@ void multifd_load_cleanup(void) for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDRecvParams *p = &multifd_recv_state->params[i]; - if (p->running) { - /* - * multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code, - * however try to wakeup it without harm in cleanup phase. - */ - qemu_sem_post(&p->sem_sync); + if (p->thread_created) { + qemu_thread_join(&p->thread); } - - qemu_thread_join(&p->thread); } for (i = 0; i < migrate_multifd_channels(); i++) { - MultiFDRecvParams *p = &multifd_recv_state->params[i]; - - migration_ioc_unregister_yank(p->c); - object_unref(OBJECT(p->c)); - p->c = NULL; - qemu_mutex_destroy(&p->mutex); - qemu_sem_destroy(&p->sem_sync); - g_free(p->name); - p->name = NULL; - p->packet_len = 0; - g_free(p->packet); - p->packet = NULL; - g_free(p->iov); - p->iov = NULL; - g_free(p->normal); - p->normal = NULL; - multifd_recv_state->ops->recv_cleanup(p); + multifd_recv_cleanup_channel(&multifd_recv_state->params[i]); } - qemu_sem_destroy(&multifd_recv_state->sem_sync); - g_free(multifd_recv_state->params); - multifd_recv_state->params = NULL; - g_free(multifd_recv_state); - multifd_recv_state = NULL; + multifd_recv_cleanup_state(); } void multifd_recv_sync_main(void) { + int thread_count = migrate_multifd_channels(); + bool file_based = !multifd_use_packets(); int i; if (!migrate_multifd()) { return; } - for (i = 0; i < migrate_multifd_channels(); i++) { - MultiFDRecvParams *p = &multifd_recv_state->params[i]; - trace_multifd_recv_sync_main_wait(p->id); + /* + * File-based channels don't use packets and therefore need to + * wait for more work. Release them to start the sync. + */ + if (file_based) { + for (i = 0; i < thread_count; i++) { + MultiFDRecvParams *p = &multifd_recv_state->params[i]; + + trace_multifd_recv_sync_main_signal(p->id); + qemu_sem_post(&p->sem); + } + } + + /* + * Initiate the synchronization by waiting for all channels. + * + * For socket-based migration this means each channel has received + * the SYNC packet on the stream. + * + * For file-based migration this means each channel is done with + * the work (pending_job=false). + */ + for (i = 0; i < thread_count; i++) { + trace_multifd_recv_sync_main_wait(i); qemu_sem_wait(&multifd_recv_state->sem_sync); } - for (i = 0; i < migrate_multifd_channels(); i++) { + + if (file_based) { + /* + * For file-based loading is done in one iteration. We're + * done. + */ + return; + } + + /* + * Sync done. Release the channels for the next iteration. + */ + for (i = 0; i < thread_count; i++) { MultiFDRecvParams *p = &multifd_recv_state->params[i]; WITH_QEMU_LOCK_GUARD(&p->mutex) { @@ -1093,50 +1459,87 @@ static void *multifd_recv_thread(void *opaque) { MultiFDRecvParams *p = opaque; Error *local_err = NULL; + bool use_packets = multifd_use_packets(); int ret; trace_multifd_recv_thread_start(p->id); rcu_register_thread(); while (true) { - uint32_t flags; + uint32_t flags = 0; + bool has_data = false; + p->normal_num = 0; - if (p->quit) { - break; - } + if (use_packets) { + if (multifd_recv_should_exit()) { + break; + } - ret = qio_channel_read_all_eof(p->c, (void *)p->packet, - p->packet_len, &local_err); - if (ret == 0 || ret == -1) { /* 0: EOF -1: Error */ - break; - } + ret = qio_channel_read_all_eof(p->c, (void *)p->packet, + p->packet_len, &local_err); + if (ret == 0 || ret == -1) { /* 0: EOF -1: Error */ + break; + } - qemu_mutex_lock(&p->mutex); - ret = multifd_recv_unfill_packet(p, &local_err); - if (ret) { + qemu_mutex_lock(&p->mutex); + ret = multifd_recv_unfill_packet(p, &local_err); + if (ret) { + qemu_mutex_unlock(&p->mutex); + break; + } + + flags = p->flags; + /* recv methods don't know how to handle the SYNC flag */ + p->flags &= ~MULTIFD_FLAG_SYNC; + has_data = p->normal_num || p->zero_num; qemu_mutex_unlock(&p->mutex); - break; + } else { + /* + * No packets, so we need to wait for the vmstate code to + * give us work. + */ + qemu_sem_wait(&p->sem); + + if (multifd_recv_should_exit()) { + break; + } + + /* pairs with qatomic_store_release() at multifd_recv() */ + if (!qatomic_load_acquire(&p->pending_job)) { + /* + * Migration thread did not send work, this is + * equivalent to pending_sync on the sending + * side. Post sem_sync to notify we reached this + * point. + */ + qemu_sem_post(&multifd_recv_state->sem_sync); + continue; + } + + has_data = !!p->data->size; } - flags = p->flags; - /* recv methods don't know how to handle the SYNC flag */ - p->flags &= ~MULTIFD_FLAG_SYNC; - trace_multifd_recv(p->id, p->packet_num, p->normal_num, flags, - p->next_packet_size); - p->num_packets++; - p->total_normal_pages += p->normal_num; - qemu_mutex_unlock(&p->mutex); - - if (p->normal_num) { - ret = multifd_recv_state->ops->recv_pages(p, &local_err); + if (has_data) { + ret = multifd_recv_state->ops->recv(p, &local_err); if (ret != 0) { break; } } - if (flags & MULTIFD_FLAG_SYNC) { - qemu_sem_post(&multifd_recv_state->sem_sync); - qemu_sem_wait(&p->sem_sync); + if (use_packets) { + if (flags & MULTIFD_FLAG_SYNC) { + qemu_sem_post(&multifd_recv_state->sem_sync); + qemu_sem_wait(&p->sem_sync); + } + } else { + p->total_normal_pages += p->data->size / qemu_target_page_size(); + p->data->size = 0; + /* + * Order data->size update before clearing + * pending_job. Pairs with smp_mb_acquire() at + * multifd_recv(). + */ + qatomic_store_release(&p->pending_job, false); } } @@ -1144,20 +1547,20 @@ static void *multifd_recv_thread(void *opaque) multifd_recv_terminate_threads(local_err); error_free(local_err); } - qemu_mutex_lock(&p->mutex); - p->running = false; - qemu_mutex_unlock(&p->mutex); rcu_unregister_thread(); - trace_multifd_recv_thread_end(p->id, p->num_packets, p->total_normal_pages); + trace_multifd_recv_thread_end(p->id, p->packets_recved, + p->total_normal_pages, + p->total_zero_pages); return NULL; } -int multifd_load_setup(Error **errp) +int multifd_recv_setup(Error **errp) { int thread_count; uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); + bool use_packets = multifd_use_packets(); uint8_t i; /* @@ -1171,7 +1574,12 @@ int multifd_load_setup(Error **errp) thread_count = migrate_multifd_channels(); multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state)); multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count); + + multifd_recv_state->data = g_new0(MultiFDRecvData, 1); + multifd_recv_state->data->size = 0; + qatomic_set(&multifd_recv_state->count, 0); + qatomic_set(&multifd_recv_state->exiting, 0); qemu_sem_init(&multifd_recv_state->sem_sync, 0); multifd_recv_state->ops = multifd_ops[migrate_multifd_compression()]; @@ -1180,26 +1588,32 @@ int multifd_load_setup(Error **errp) qemu_mutex_init(&p->mutex); qemu_sem_init(&p->sem_sync, 0); - p->quit = false; + qemu_sem_init(&p->sem, 0); + p->pending_job = false; p->id = i; - p->packet_len = sizeof(MultiFDPacket_t) - + sizeof(uint64_t) * page_count; - p->packet = g_malloc0(p->packet_len); + + p->data = g_new0(MultiFDRecvData, 1); + p->data->size = 0; + + if (use_packets) { + p->packet_len = sizeof(MultiFDPacket_t) + + sizeof(uint64_t) * page_count; + p->packet = g_malloc0(p->packet_len); + } p->name = g_strdup_printf("multifdrecv_%d", i); p->iov = g_new0(struct iovec, page_count); p->normal = g_new0(ram_addr_t, page_count); + p->zero = g_new0(ram_addr_t, page_count); p->page_count = page_count; p->page_size = qemu_target_page_size(); } for (i = 0; i < thread_count; i++) { MultiFDRecvParams *p = &multifd_recv_state->params[i]; - Error *local_err = NULL; int ret; - ret = multifd_recv_state->ops->recv_setup(p, &local_err); + ret = multifd_recv_state->ops->recv_setup(p, errp); if (ret) { - error_propagate(errp, local_err); return ret; } } @@ -1230,18 +1644,23 @@ void multifd_recv_new_channel(QIOChannel *ioc, Error **errp) { MultiFDRecvParams *p; Error *local_err = NULL; + bool use_packets = multifd_use_packets(); int id; - id = multifd_recv_initial_packet(ioc, &local_err); - if (id < 0) { - multifd_recv_terminate_threads(local_err); - error_propagate_prepend(errp, local_err, - "failed to receive packet" - " via multifd channel %d: ", - qatomic_read(&multifd_recv_state->count)); - return; + if (use_packets) { + id = multifd_recv_initial_packet(ioc, &local_err); + if (id < 0) { + multifd_recv_terminate_threads(local_err); + error_propagate_prepend(errp, local_err, + "failed to receive packet" + " via multifd channel %d: ", + qatomic_read(&multifd_recv_state->count)); + return; + } + trace_multifd_recv_new_channel(id); + } else { + id = qatomic_read(&multifd_recv_state->count); } - trace_multifd_recv_new_channel(id); p = &multifd_recv_state->params[id]; if (p->c != NULL) { @@ -1253,11 +1672,23 @@ void multifd_recv_new_channel(QIOChannel *ioc, Error **errp) } p->c = ioc; object_ref(OBJECT(ioc)); - /* initial packet */ - p->num_packets = 1; - p->running = true; + p->thread_created = true; qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p, QEMU_THREAD_JOINABLE); qatomic_inc(&multifd_recv_state->count); } + +bool multifd_send_prepare_common(MultiFDSendParams *p) +{ + multifd_send_zero_page_detect(p); + + if (!p->pages->normal_num) { + p->next_packet_size = 0; + return false; + } + + multifd_send_prepare_header(p); + + return true; +} diff --git a/migration/multifd.h b/migration/multifd.h index a835643b48c..c9d9b092395 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -13,16 +13,23 @@ #ifndef QEMU_MIGRATION_MULTIFD_H #define QEMU_MIGRATION_MULTIFD_H -int multifd_save_setup(Error **errp); -void multifd_save_cleanup(void); -int multifd_load_setup(Error **errp); -void multifd_load_cleanup(void); -void multifd_load_shutdown(void); +#include "ram.h" + +typedef struct MultiFDRecvData MultiFDRecvData; + +bool multifd_send_setup(void); +void multifd_send_shutdown(void); +void multifd_send_channel_created(void); +int multifd_recv_setup(Error **errp); +void multifd_recv_cleanup(void); +void multifd_recv_shutdown(void); bool multifd_recv_all_channels_created(void); void multifd_recv_new_channel(QIOChannel *ioc, Error **errp); void multifd_recv_sync_main(void); -int multifd_send_sync_main(QEMUFile *f); -int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset); +int multifd_send_sync_main(void); +bool multifd_queue_page(RAMBlock *block, ram_addr_t offset); +bool multifd_recv(void); +MultiFDRecvData *multifd_get_recv_data(void); /* Multifd Compression flags */ #define MULTIFD_FLAG_SYNC (1 << 0) @@ -48,23 +55,38 @@ typedef struct { /* size of the next packet that contains pages */ uint32_t next_packet_size; uint64_t packet_num; - uint64_t unused[4]; /* Reserved for future use */ + /* zero pages */ + uint32_t zero_pages; + uint32_t unused32[1]; /* Reserved for future use */ + uint64_t unused64[3]; /* Reserved for future use */ char ramblock[256]; + /* + * This array contains the pointers to: + * - normal pages (initial normal_pages entries) + * - zero pages (following zero_pages entries) + */ uint64_t offset[]; } __attribute__((packed)) MultiFDPacket_t; typedef struct { /* number of used pages */ uint32_t num; + /* number of normal pages */ + uint32_t normal_num; /* number of allocated pages */ uint32_t allocated; - /* global number of generated multifd packets */ - uint64_t packet_num; /* offset of each page */ ram_addr_t *offset; RAMBlock *block; } MultiFDPages_t; +struct MultiFDRecvData { + void *opaque; + size_t size; + /* for preadv */ + off_t file_offset; +}; + typedef struct { /* Fields are only written at creating/deletion time */ /* No lock required for them, they are read only */ @@ -75,10 +97,11 @@ typedef struct { char *name; /* channel thread id */ QemuThread thread; + bool thread_created; + QemuThread tls_thread; + bool tls_thread_created; /* communication channel */ QIOChannel *c; - /* is the yank function registered */ - bool registered_yank; /* packet allocated len */ uint32_t packet_len; /* guest page size */ @@ -93,18 +116,19 @@ typedef struct { /* syncs main thread and channels */ QemuSemaphore sem_sync; - /* this mutex protects the following parameters */ - QemuMutex mutex; - /* is this channel thread running */ - bool running; - /* should this thread finish */ - bool quit; /* multifd flags for each packet */ uint32_t flags; - /* global number of generated multifd packets */ - uint64_t packet_num; - /* thread has work to do */ - int pending_job; + /* + * The sender thread has work to do if either of below boolean is set. + * + * @pending_job: a job is pending + * @pending_sync: a sync request is pending + * + * For both of these fields, they're only set by the requesters, and + * cleared by the multifd sender threads. + */ + bool pending_job; + bool pending_sync; /* array of pages to sent. * The owner of 'pages' depends of 'pending_job' value: * pending_job == 0 -> migration_thread can use it. @@ -119,19 +143,17 @@ typedef struct { /* size of the next packet that contains pages */ uint32_t next_packet_size; /* packets sent through this channel */ - uint64_t num_packets; + uint64_t packets_sent; /* non zero pages sent through this channel */ uint64_t total_normal_pages; + /* zero pages sent through this channel */ + uint64_t total_zero_pages; /* buffers to send */ struct iovec *iov; /* number of iovs used */ uint32_t iovs_num; - /* Pages that are not zero */ - ram_addr_t *normal; - /* num of non zero pages */ - uint32_t normal_num; /* used for compression methods */ - void *data; + void *compress_data; } MultiFDSendParams; typedef struct { @@ -144,6 +166,7 @@ typedef struct { char *name; /* channel thread id */ QemuThread thread; + bool thread_created; /* communication channel */ QIOChannel *c; /* packet allocated len */ @@ -155,17 +178,19 @@ typedef struct { /* syncs main thread and channels */ QemuSemaphore sem_sync; + /* sem where to wait for more work */ + QemuSemaphore sem; /* this mutex protects the following parameters */ QemuMutex mutex; - /* is this channel thread running */ - bool running; /* should this thread finish */ bool quit; /* multifd flags for each packet */ uint32_t flags; /* global number of generated multifd packets */ uint64_t packet_num; + int pending_job; + MultiFDRecvData *data; /* thread local variables. No locking required */ @@ -173,22 +198,28 @@ typedef struct { MultiFDPacket_t *packet; /* size of the next packet that contains pages */ uint32_t next_packet_size; - /* packets sent through this channel */ - uint64_t num_packets; + /* packets received through this channel */ + uint64_t packets_recved; /* ramblock */ RAMBlock *block; /* ramblock host address */ uint8_t *host; /* non zero pages recv through this channel */ uint64_t total_normal_pages; + /* zero pages recv through this channel */ + uint64_t total_zero_pages; /* buffers to recv */ struct iovec *iov; /* Pages that are not zero */ ram_addr_t *normal; /* num of non zero pages */ uint32_t normal_num; + /* Pages that are zero */ + ram_addr_t *zero; + /* num of zero pages */ + uint32_t zero_num; /* used for de-compression methods */ - void *data; + void *compress_data; } MultiFDRecvParams; typedef struct { @@ -202,11 +233,23 @@ typedef struct { int (*recv_setup)(MultiFDRecvParams *p, Error **errp); /* Cleanup for receiving side */ void (*recv_cleanup)(MultiFDRecvParams *p); - /* Read all pages */ - int (*recv_pages)(MultiFDRecvParams *p, Error **errp); + /* Read all data */ + int (*recv)(MultiFDRecvParams *p, Error **errp); } MultiFDMethods; void multifd_register_ops(int method, MultiFDMethods *ops); +void multifd_send_fill_packet(MultiFDSendParams *p); +bool multifd_send_prepare_common(MultiFDSendParams *p); +void multifd_send_zero_page_detect(MultiFDSendParams *p); +void multifd_recv_zero_page_process(MultiFDRecvParams *p); -#endif +static inline void multifd_send_prepare_header(MultiFDSendParams *p) +{ + p->iov[0].iov_len = p->packet_len; + p->iov[0].iov_base = p->packet; + p->iovs_num++; +} + +void multifd_channel_connect(MultiFDSendParams *p, QIOChannel *ioc); +#endif diff --git a/migration/options.c b/migration/options.c index 8d8ec73ad95..bfd7753b69a 100644 --- a/migration/options.c +++ b/migration/options.c @@ -179,6 +179,9 @@ Property migration_properties[] = { DEFINE_PROP_MIG_MODE("mode", MigrationState, parameters.mode, MIG_MODE_NORMAL), + DEFINE_PROP_ZERO_PAGE_DETECTION("zero-page-detection", MigrationState, + parameters.zero_page_detection, + ZERO_PAGE_DETECTION_MULTIFD), /* Migration capabilities */ DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), @@ -204,6 +207,7 @@ Property migration_properties[] = { DEFINE_PROP_MIG_CAP("x-switchover-ack", MIGRATION_CAPABILITY_SWITCHOVER_ACK), DEFINE_PROP_MIG_CAP("x-dirty-limit", MIGRATION_CAPABILITY_DIRTY_LIMIT), + DEFINE_PROP_MIG_CAP("mapped-ram", MIGRATION_CAPABILITY_MAPPED_RAM), DEFINE_PROP_END_OF_LIST(), }; @@ -263,6 +267,13 @@ bool migrate_events(void) return s->capabilities[MIGRATION_CAPABILITY_EVENTS]; } +bool migrate_mapped_ram(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_MAPPED_RAM]; +} + bool migrate_ignore_shared(void) { MigrationState *s = migrate_get_current(); @@ -470,9 +481,9 @@ static bool migrate_incoming_started(void) */ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp) { + ERRP_GUARD(); MigrationIncomingState *mis = migration_incoming_get_current(); - ERRP_GUARD(); #ifndef CONFIG_LIVE_BLOCK_MIGRATION if (new_caps[MIGRATION_CAPABILITY_BLOCK]) { error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) " @@ -645,6 +656,26 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp) } } + if (new_caps[MIGRATION_CAPABILITY_MAPPED_RAM]) { + if (new_caps[MIGRATION_CAPABILITY_XBZRLE]) { + error_setg(errp, + "Mapped-ram migration is incompatible with xbzrle"); + return false; + } + + if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) { + error_setg(errp, + "Mapped-ram migration is incompatible with compression"); + return false; + } + + if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { + error_setg(errp, + "Mapped-ram migration is incompatible with postcopy"); + return false; + } + } + return true; } @@ -653,7 +684,7 @@ bool migrate_cap_set(int cap, bool value, Error **errp) MigrationState *s = migrate_get_current(); bool new_caps[MIGRATION_CAPABILITY__MAX]; - if (migration_is_running(s->state)) { + if (migration_is_running()) { error_setg(errp, QERR_MIGRATION_ACTIVE); return false; } @@ -697,7 +728,7 @@ void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, MigrationCapabilityStatusList *cap; bool new_caps[MIGRATION_CAPABILITY__MAX]; - if (migration_is_running(s->state) || migration_in_colo_state()) { + if (migration_is_running() || migration_in_colo_state()) { error_setg(errp, QERR_MIGRATION_ACTIVE); return; } @@ -833,8 +864,10 @@ uint64_t migrate_max_postcopy_bandwidth(void) MigMode migrate_mode(void) { MigrationState *s = migrate_get_current(); + MigMode mode = s->parameters.mode; - return s->parameters.mode; + assert(mode >= 0 && mode < MIG_MODE__MAX); + return mode; } int migrate_multifd_channels(void) @@ -894,6 +927,13 @@ const char *migrate_tls_hostname(void) return s->parameters.tls_hostname; } +uint64_t migrate_vcpu_dirty_limit_period(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.x_vcpu_dirty_limit_period; +} + uint64_t migrate_xbzrle_cache_size(void) { MigrationState *s = migrate_get_current(); @@ -901,6 +941,13 @@ uint64_t migrate_xbzrle_cache_size(void) return s->parameters.xbzrle_cache_size; } +ZeroPageDetection migrate_zero_page_detection(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.zero_page_detection; +} + /* parameter setters */ void migrate_set_block_incremental(bool value) @@ -1011,6 +1058,8 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp) params->vcpu_dirty_limit = s->parameters.vcpu_dirty_limit; params->has_mode = true; params->mode = s->parameters.mode; + params->has_zero_page_detection = true; + params->zero_page_detection = s->parameters.zero_page_detection; return params; } @@ -1047,6 +1096,7 @@ void migrate_params_init(MigrationParameters *params) params->has_x_vcpu_dirty_limit_period = true; params->has_vcpu_dirty_limit = true; params->has_mode = true; + params->has_zero_page_detection = true; } /* @@ -1055,6 +1105,8 @@ void migrate_params_init(MigrationParameters *params) */ bool migrate_params_check(MigrationParameters *params, Error **errp) { + ERRP_GUARD(); + if (params->has_compress_level && (params->compress_level > 9)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", @@ -1216,6 +1268,13 @@ bool migrate_params_check(MigrationParameters *params, Error **errp) } #endif + if (migrate_mapped_ram() && + (migrate_multifd_compression() || migrate_tls())) { + error_setg(errp, + "Mapped-ram only available for non-compressed non-TLS multifd migration"); + return false; + } + if (params->has_x_vcpu_dirty_limit_period && (params->x_vcpu_dirty_limit_period < 1 || params->x_vcpu_dirty_limit_period > 1000)) { @@ -1310,6 +1369,12 @@ static void migrate_params_test_apply(MigrateSetParameters *params, if (params->has_multifd_compression) { dest->multifd_compression = params->multifd_compression; } + if (params->has_multifd_zlib_level) { + dest->multifd_zlib_level = params->multifd_zlib_level; + } + if (params->has_multifd_zstd_level) { + dest->multifd_zstd_level = params->multifd_zstd_level; + } if (params->has_xbzrle_cache_size) { dest->xbzrle_cache_size = params->xbzrle_cache_size; } @@ -1348,6 +1413,10 @@ static void migrate_params_test_apply(MigrateSetParameters *params, if (params->has_mode) { dest->mode = params->mode; } + + if (params->has_zero_page_detection) { + dest->zero_page_detection = params->zero_page_detection; + } } static void migrate_params_apply(MigrateSetParameters *params, Error **errp) @@ -1445,6 +1514,12 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) if (params->has_multifd_compression) { s->parameters.multifd_compression = params->multifd_compression; } + if (params->has_multifd_zlib_level) { + s->parameters.multifd_zlib_level = params->multifd_zlib_level; + } + if (params->has_multifd_zstd_level) { + s->parameters.multifd_zstd_level = params->multifd_zstd_level; + } if (params->has_xbzrle_cache_size) { s->parameters.xbzrle_cache_size = params->xbzrle_cache_size; xbzrle_cache_resize(params->xbzrle_cache_size, errp); @@ -1492,6 +1567,10 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) if (params->has_mode) { s->parameters.mode = params->mode; } + + if (params->has_zero_page_detection) { + s->parameters.zero_page_detection = params->zero_page_detection; + } } void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) diff --git a/migration/options.h b/migration/options.h index 246c160aeee..ab8199e2078 100644 --- a/migration/options.h +++ b/migration/options.h @@ -16,6 +16,7 @@ #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" +#include "migration/client-options.h" /* migration properties */ @@ -24,24 +25,21 @@ extern Property migration_properties[]; /* capabilities */ bool migrate_auto_converge(void); -bool migrate_background_snapshot(void); bool migrate_block(void); bool migrate_colo(void); bool migrate_compress(void); bool migrate_dirty_bitmaps(void); -bool migrate_dirty_limit(void); bool migrate_events(void); +bool migrate_mapped_ram(void); bool migrate_ignore_shared(void); bool migrate_late_block_activate(void); bool migrate_multifd(void); bool migrate_pause_before_switchover(void); bool migrate_postcopy_blocktime(void); bool migrate_postcopy_preempt(void); -bool migrate_postcopy_ram(void); bool migrate_rdma_pin_all(void); bool migrate_release_ram(void); bool migrate_return_path(void); -bool migrate_switchover_ack(void); bool migrate_validate_uuid(void); bool migrate_xbzrle(void); bool migrate_zero_blocks(void); @@ -83,7 +81,6 @@ uint8_t migrate_max_cpu_throttle(void); uint64_t migrate_max_bandwidth(void); uint64_t migrate_avail_switchover_bandwidth(void); uint64_t migrate_max_postcopy_bandwidth(void); -MigMode migrate_mode(void); int migrate_multifd_channels(void); MultiFDCompression migrate_multifd_compression(void); int migrate_multifd_zlib_level(void); @@ -93,6 +90,7 @@ const char *migrate_tls_authz(void); const char *migrate_tls_creds(void); const char *migrate_tls_hostname(void); uint64_t migrate_xbzrle_cache_size(void); +ZeroPageDetection migrate_zero_page_detection(void); /* parameters setters */ diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 5408e028c63..eccff499cb2 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -77,10 +77,9 @@ int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) { struct PostcopyNotifyData pnd; pnd.reason = reason; - pnd.errp = errp; return notifier_with_return_list_notify(&postcopy_notifier_list, - &pnd); + &pnd, errp); } /* @@ -102,11 +101,9 @@ void postcopy_thread_create(MigrationIncomingState *mis, * are target OS specific. */ #if defined(__linux__) - #include #include #include -#include /* for __u64 */ #endif #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) @@ -272,8 +269,8 @@ static bool request_ufd_features(int ufd, uint64_t features) return false; } - ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | - (__u64)1 << _UFFDIO_UNREGISTER; + ioctl_mask = 1ULL << _UFFDIO_REGISTER | + 1ULL << _UFFDIO_UNREGISTER; if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { error_report("Missing userfault features: %" PRIx64, (uint64_t)(~api_struct.ioctls & ioctl_mask)); @@ -286,10 +283,10 @@ static bool request_ufd_features(int ufd, uint64_t features) static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis, Error **errp) { + ERRP_GUARD(); uint64_t asked_features = 0; static uint64_t supported_features; - ERRP_GUARD(); /* * it's not possible to * request UFFD_API twice per one fd @@ -374,6 +371,7 @@ static int test_ramblock_postcopiable(RAMBlock *rb, Error **errp) */ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) { + ERRP_GUARD(); long pagesize = qemu_real_host_page_size(); int ufd = -1; bool ret = false; /* Error unless we change it */ @@ -383,7 +381,6 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) uint64_t feature_mask; RAMBlock *block; - ERRP_GUARD(); if (qemu_target_page_size() > pagesize) { error_setg(errp, "Target page size bigger than host page size"); goto out; @@ -462,9 +459,9 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) goto out; } - feature_mask = (__u64)1 << _UFFDIO_WAKE | - (__u64)1 << _UFFDIO_COPY | - (__u64)1 << _UFFDIO_ZEROPAGE; + feature_mask = 1ULL << _UFFDIO_WAKE | + 1ULL << _UFFDIO_COPY | + 1ULL << _UFFDIO_ZEROPAGE; if ((reg_struct.ioctls & feature_mask) != feature_mask) { error_setg(errp, "Missing userfault map features: %" PRIx64, (uint64_t)(~reg_struct.ioctls & feature_mask)); @@ -733,11 +730,11 @@ static int ram_block_enable_notify(RAMBlock *rb, void *opaque) error_report("%s userfault register: %s", __func__, strerror(errno)); return -1; } - if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { + if (!(reg_struct.ioctls & (1ULL << _UFFDIO_COPY))) { error_report("%s userfault: Region doesn't support COPY", __func__); return -1; } - if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) { + if (reg_struct.ioctls & (1ULL << _UFFDIO_ZEROPAGE)) { qemu_ram_set_uf_zeroable(rb); } diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index 442ab89752a..ecae9412118 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -128,7 +128,6 @@ enum PostcopyNotifyReason { struct PostcopyNotifyData { enum PostcopyNotifyReason reason; - Error **errp; }; void postcopy_add_notifier(NotifierWithReturn *nn); diff --git a/migration/qemu-file.c b/migration/qemu-file.c index 94231ff2955..a10882d47fc 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -33,6 +33,7 @@ #include "options.h" #include "qapi/error.h" #include "rdma.h" +#include "io/channel-file.h" #define IO_BUF_SIZE 32768 #define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64) @@ -62,6 +63,8 @@ struct QEMUFile { */ int qemu_file_shutdown(QEMUFile *f) { + Error *err = NULL; + /* * We must set qemufile error before the real shutdown(), otherwise * there can be a race window where we thought IO all went though @@ -90,7 +93,8 @@ int qemu_file_shutdown(QEMUFile *f) return -ENOSYS; } - if (qio_channel_shutdown(f->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL) < 0) { + if (qio_channel_shutdown(f->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, &err) < 0) { + error_report_err(err); return -EIO; } @@ -255,6 +259,10 @@ static void qemu_iovec_release_ram(QEMUFile *f) memset(f->may_free, 0, sizeof(f->may_free)); } +bool qemu_file_is_seekable(QEMUFile *f) +{ + return qio_channel_has_feature(f->ioc, QIO_CHANNEL_FEATURE_SEEKABLE); +} /** * Flushes QEMUFile buffer @@ -447,6 +455,107 @@ void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size) } } +void qemu_put_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen, + off_t pos) +{ + Error *err = NULL; + size_t ret; + + if (f->last_error) { + return; + } + + qemu_fflush(f); + ret = qio_channel_pwrite(f->ioc, (char *)buf, buflen, pos, &err); + + if (err) { + qemu_file_set_error_obj(f, -EIO, err); + return; + } + + if ((ssize_t)ret == QIO_CHANNEL_ERR_BLOCK) { + qemu_file_set_error_obj(f, -EAGAIN, NULL); + return; + } + + if (ret != buflen) { + error_setg(&err, "Partial write of size %zu, expected %zu", ret, + buflen); + qemu_file_set_error_obj(f, -EIO, err); + return; + } + + stat64_add(&mig_stats.qemu_file_transferred, buflen); + + return; +} + + +size_t qemu_get_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen, + off_t pos) +{ + Error *err = NULL; + size_t ret; + + if (f->last_error) { + return 0; + } + + ret = qio_channel_pread(f->ioc, (char *)buf, buflen, pos, &err); + + if ((ssize_t)ret == -1 || err) { + qemu_file_set_error_obj(f, -EIO, err); + return 0; + } + + if ((ssize_t)ret == QIO_CHANNEL_ERR_BLOCK) { + qemu_file_set_error_obj(f, -EAGAIN, NULL); + return 0; + } + + if (ret != buflen) { + error_setg(&err, "Partial read of size %zu, expected %zu", ret, buflen); + qemu_file_set_error_obj(f, -EIO, err); + return 0; + } + + return ret; +} + +void qemu_set_offset(QEMUFile *f, off_t off, int whence) +{ + Error *err = NULL; + off_t ret; + + if (qemu_file_is_writable(f)) { + qemu_fflush(f); + } else { + /* Drop all cached buffers if existed; will trigger a re-fill later */ + f->buf_index = 0; + f->buf_size = 0; + } + + ret = qio_channel_io_seek(f->ioc, off, whence, &err); + if (ret == (off_t)-1) { + qemu_file_set_error_obj(f, -EIO, err); + } +} + +off_t qemu_get_offset(QEMUFile *f) +{ + Error *err = NULL; + off_t ret; + + qemu_fflush(f); + + ret = qio_channel_io_seek(f->ioc, 0, SEEK_CUR, &err); + if (ret == (off_t)-1) { + qemu_file_set_error_obj(f, -EIO, err); + } + return ret; +} + + void qemu_put_byte(QEMUFile *f, int v) { if (f->last_error) { diff --git a/migration/qemu-file.h b/migration/qemu-file.h index 8aec9fabf7f..32fd4a34fd1 100644 --- a/migration/qemu-file.h +++ b/migration/qemu-file.h @@ -75,6 +75,12 @@ QEMUFile *qemu_file_get_return_path(QEMUFile *f); int qemu_fflush(QEMUFile *f); void qemu_file_set_blocking(QEMUFile *f, bool block); int qemu_file_get_to_fd(QEMUFile *f, int fd, size_t size); +void qemu_set_offset(QEMUFile *f, off_t off, int whence); +off_t qemu_get_offset(QEMUFile *f); +void qemu_put_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen, + off_t pos); +size_t qemu_get_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen, + off_t pos); QIOChannel *qemu_file_get_ioc(QEMUFile *file); diff --git a/migration/ram.c b/migration/ram.c index 8c7886ab797..8deb84984f4 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -94,6 +94,24 @@ #define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200 /* We can't use any flag that is bigger than 0x200 */ +/* + * mapped-ram migration supports O_DIRECT, so we need to make sure the + * userspace buffer, the IO operation size and the file offset are + * aligned according to the underlying device's block size. The first + * two are already aligned to page size, but we need to add padding to + * the file to align the offset. We cannot read the block size + * dynamically because the migration file can be moved between + * different systems, so use 1M to cover most block sizes and to keep + * the file offset aligned at page size as well. + */ +#define MAPPED_RAM_FILE_OFFSET_ALIGNMENT 0x100000 + +/* + * When doing mapped-ram migration, this is the amount we read from + * the pages region in the migration file at a time. + */ +#define MAPPED_RAM_LOAD_BUF_SIZE 0x100000 + XBZRLECacheStats xbzrle_counters; /* used by the search for pages to send */ @@ -426,9 +444,8 @@ int precopy_notify(PrecopyNotifyReason reason, Error **errp) { PrecopyNotifyData pnd; pnd.reason = reason; - pnd.errp = errp; - return notifier_with_return_list_notify(&precopy_notifier_list, &pnd); + return notifier_with_return_list_notify(&precopy_notifier_list, &pnd, errp); } uint64_t ram_bytes_remaining(void) @@ -1123,16 +1140,26 @@ static int save_zero_page(RAMState *rs, PageSearchStatus *pss, QEMUFile *file = pss->pss_channel; int len = 0; + if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_NONE) { + return 0; + } + if (!buffer_is_zero(p, TARGET_PAGE_SIZE)) { return 0; } + stat64_add(&mig_stats.zero_pages, 1); + + if (migrate_mapped_ram()) { + /* zero pages are not transferred with mapped-ram */ + clear_bit_atomic(offset >> TARGET_PAGE_BITS, pss->block->file_bmap); + return 1; + } + len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO); qemu_put_byte(file, 0); len += 1; ram_release_page(pss->block->idstr, offset); - - stat64_add(&mig_stats.zero_pages, 1); ram_transferred_add(len); /* @@ -1190,14 +1217,20 @@ static int save_normal_page(PageSearchStatus *pss, RAMBlock *block, { QEMUFile *file = pss->pss_channel; - ram_transferred_add(save_page_header(pss, pss->pss_channel, block, - offset | RAM_SAVE_FLAG_PAGE)); - if (async) { - qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE, - migrate_release_ram() && - migration_in_postcopy()); + if (migrate_mapped_ram()) { + qemu_put_buffer_at(file, buf, TARGET_PAGE_SIZE, + block->pages_offset + offset); + set_bit(offset >> TARGET_PAGE_BITS, block->file_bmap); } else { - qemu_put_buffer(file, buf, TARGET_PAGE_SIZE); + ram_transferred_add(save_page_header(pss, pss->pss_channel, block, + offset | RAM_SAVE_FLAG_PAGE)); + if (async) { + qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE, + migrate_release_ram() && + migration_in_postcopy()); + } else { + qemu_put_buffer(file, buf, TARGET_PAGE_SIZE); + } } ram_transferred_add(TARGET_PAGE_SIZE); stat64_add(&mig_stats.normal_pages, 1); @@ -1250,13 +1283,11 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss) return pages; } -static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block, - ram_addr_t offset) +static int ram_save_multifd_page(RAMBlock *block, ram_addr_t offset) { - if (multifd_queue_page(file, block, offset) < 0) { + if (!multifd_queue_page(block, offset)) { return -1; } - stat64_add(&mig_stats.normal_pages, 1); return 1; } @@ -1334,14 +1365,18 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) pss->block = QLIST_NEXT_RCU(pss->block, next); if (!pss->block) { if (migrate_multifd() && - !migrate_multifd_flush_after_each_section()) { + (!migrate_multifd_flush_after_each_section() || + migrate_mapped_ram())) { QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; - int ret = multifd_send_sync_main(f); + int ret = multifd_send_sync_main(); if (ret < 0) { return ret; } - qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); - qemu_fflush(f); + + if (!migrate_mapped_ram()) { + qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); + qemu_fflush(f); + } } /* * If memory migration starts over, we will meet a dirtied page @@ -2044,7 +2079,6 @@ static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, */ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) { - RAMBlock *block = pss->block; ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; int res; @@ -2060,17 +2094,33 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) return 1; } + return ram_save_page(rs, pss); +} + +/** + * ram_save_target_page_multifd: send one target page to multifd workers + * + * Returns 1 if the page was queued, -1 otherwise. + * + * @rs: current RAM state + * @pss: data about the page we want to send + */ +static int ram_save_target_page_multifd(RAMState *rs, PageSearchStatus *pss) +{ + RAMBlock *block = pss->block; + ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; + /* - * Do not use multifd in postcopy as one whole host page should be - * placed. Meanwhile postcopy requires atomic update of pages, so even - * if host page size == guest page size the dest guest during run may - * still see partially copied pages which is data corruption. + * While using multifd live migration, we still need to handle zero + * page checking on the migration main thread. */ - if (migrate_multifd() && !migration_in_postcopy()) { - return ram_save_multifd_page(pss->pss_channel, block, offset); + if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) { + if (save_zero_page(rs, pss, offset)) { + return 1; + } } - return ram_save_page(rs, pss); + return ram_save_multifd_page(block, offset); } /* Should be called before sending a host page */ @@ -2395,7 +2445,7 @@ static void ram_save_cleanup(void *opaque) /* We don't use dirty log with background snapshots */ if (!migrate_background_snapshot()) { - /* caller have hold iothread lock or is in a bh, so there is + /* caller have hold BQL or is in a bh, so there is * no writing race against the migration bitmap */ if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) { @@ -2780,6 +2830,9 @@ static void ram_list_init_bitmaps(void) */ block->bmap = bitmap_new(pages); bitmap_set(block->bmap, 0, pages); + if (migrate_mapped_ram()) { + block->file_bmap = bitmap_new(pages); + } block->clear_bmap_shift = shift; block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift)); } @@ -2874,10 +2927,9 @@ void qemu_guest_free_page_hint(void *addr, size_t len) RAMBlock *block; ram_addr_t offset; size_t used_len, start, npages; - MigrationState *s = migrate_get_current(); /* This function is currently expected to be used during live migration */ - if (!migration_is_setup_or_active(s->state)) { + if (!migration_is_setup_or_active()) { return; } @@ -2917,6 +2969,89 @@ void qemu_guest_free_page_hint(void *addr, size_t len) } } +#define MAPPED_RAM_HDR_VERSION 1 +struct MappedRamHeader { + uint32_t version; + /* + * The target's page size, so we know how many pages are in the + * bitmap. + */ + uint64_t page_size; + /* + * The offset in the migration file where the pages bitmap is + * stored. + */ + uint64_t bitmap_offset; + /* + * The offset in the migration file where the actual pages (data) + * are stored. + */ + uint64_t pages_offset; +} QEMU_PACKED; +typedef struct MappedRamHeader MappedRamHeader; + +static void mapped_ram_setup_ramblock(QEMUFile *file, RAMBlock *block) +{ + g_autofree MappedRamHeader *header = NULL; + size_t header_size, bitmap_size; + long num_pages; + + header = g_new0(MappedRamHeader, 1); + header_size = sizeof(MappedRamHeader); + + num_pages = block->used_length >> TARGET_PAGE_BITS; + bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long); + + /* + * Save the file offsets of where the bitmap and the pages should + * go as they are written at the end of migration and during the + * iterative phase, respectively. + */ + block->bitmap_offset = qemu_get_offset(file) + header_size; + block->pages_offset = ROUND_UP(block->bitmap_offset + + bitmap_size, + MAPPED_RAM_FILE_OFFSET_ALIGNMENT); + + header->version = cpu_to_be32(MAPPED_RAM_HDR_VERSION); + header->page_size = cpu_to_be64(TARGET_PAGE_SIZE); + header->bitmap_offset = cpu_to_be64(block->bitmap_offset); + header->pages_offset = cpu_to_be64(block->pages_offset); + + qemu_put_buffer(file, (uint8_t *) header, header_size); + + /* prepare offset for next ramblock */ + qemu_set_offset(file, block->pages_offset + block->used_length, SEEK_SET); +} + +static bool mapped_ram_read_header(QEMUFile *file, MappedRamHeader *header, + Error **errp) +{ + size_t ret, header_size = sizeof(MappedRamHeader); + + ret = qemu_get_buffer(file, (uint8_t *)header, header_size); + if (ret != header_size) { + error_setg(errp, "Could not read whole mapped-ram migration header " + "(expected %zd, got %zd bytes)", header_size, ret); + return false; + } + + /* migration stream is big-endian */ + header->version = be32_to_cpu(header->version); + + if (header->version > MAPPED_RAM_HDR_VERSION) { + error_setg(errp, "Migration mapped-ram capability version not " + "supported (expected <= %d, got %d)", MAPPED_RAM_HDR_VERSION, + header->version); + return false; + } + + header->page_size = be64_to_cpu(header->page_size); + header->bitmap_offset = be64_to_cpu(header->bitmap_offset); + header->pages_offset = be64_to_cpu(header->pages_offset); + + return true; +} + /* * Each of ram_save_setup, ram_save_iterate and ram_save_complete has * long-running RCU critical section. When rcu-reclaims in the code @@ -2936,7 +3071,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) { RAMState **rsp = opaque; RAMBlock *block; - int ret; + int ret, max_hg_page_size; if (compress_threads_save_setup()) { return -1; @@ -2951,6 +3086,12 @@ static int ram_save_setup(QEMUFile *f, void *opaque) } (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f; + /* + * ??? Mirrors the previous value of qemu_host_page_size, + * but is this really what was intended for the migration? + */ + max_hg_page_size = MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE); + WITH_RCU_READ_LOCK_GUARD() { qemu_put_be64(f, ram_bytes_total_with_ignored() | RAM_SAVE_FLAG_MEM_SIZE); @@ -2959,13 +3100,17 @@ static int ram_save_setup(QEMUFile *f, void *opaque) qemu_put_byte(f, strlen(block->idstr)); qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); qemu_put_be64(f, block->used_length); - if (migrate_postcopy_ram() && block->page_size != - qemu_host_page_size) { + if (migrate_postcopy_ram() && + block->page_size != max_hg_page_size) { qemu_put_be64(f, block->page_size); } if (migrate_ignore_shared()) { qemu_put_be64(f, block->mr->addr); } + + if (migrate_mapped_ram()) { + mapped_ram_setup_ramblock(f, block); + } } } @@ -2982,16 +3127,22 @@ static int ram_save_setup(QEMUFile *f, void *opaque) } migration_ops = g_malloc0(sizeof(MigrationOps)); - migration_ops->ram_save_target_page = ram_save_target_page_legacy; - qemu_mutex_unlock_iothread(); - ret = multifd_send_sync_main(f); - qemu_mutex_lock_iothread(); + if (migrate_multifd()) { + migration_ops->ram_save_target_page = ram_save_target_page_multifd; + } else { + migration_ops->ram_save_target_page = ram_save_target_page_legacy; + } + + bql_unlock(); + ret = multifd_send_sync_main(); + bql_lock(); if (ret < 0) { return ret; } - if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) { + if (migrate_multifd() && !migrate_multifd_flush_after_each_section() + && !migrate_mapped_ram()) { qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); } @@ -2999,6 +3150,37 @@ static int ram_save_setup(QEMUFile *f, void *opaque) return qemu_fflush(f); } +static void ram_save_file_bmap(QEMUFile *f) +{ + RAMBlock *block; + + RAMBLOCK_FOREACH_MIGRATABLE(block) { + long num_pages = block->used_length >> TARGET_PAGE_BITS; + long bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long); + + qemu_put_buffer_at(f, (uint8_t *)block->file_bmap, bitmap_size, + block->bitmap_offset); + ram_transferred_add(bitmap_size); + + /* + * Free the bitmap here to catch any synchronization issues + * with multifd channels. No channels should be sending pages + * after we've written the bitmap to file. + */ + g_free(block->file_bmap); + block->file_bmap = NULL; + } +} + +void ramblock_set_file_bmap_atomic(RAMBlock *block, ram_addr_t offset, bool set) +{ + if (set) { + set_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); + } else { + clear_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); + } +} + /** * ram_save_iterate: iterative stage for migration * @@ -3107,9 +3289,10 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) out: if (ret >= 0 - && migration_is_setup_or_active(migrate_get_current()->state)) { - if (migrate_multifd() && migrate_multifd_flush_after_each_section()) { - ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); + && migration_is_setup_or_active()) { + if (migrate_multifd() && migrate_multifd_flush_after_each_section() && + !migrate_mapped_ram()) { + ret = multifd_send_sync_main(); if (ret < 0) { return ret; } @@ -3131,7 +3314,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) * * Returns zero to indicate success or negative on error * - * Called with iothread lock + * Called with the BQL * * @f: QEMUFile where to send the data * @opaque: RAMState pointer @@ -3183,12 +3366,25 @@ static int ram_save_complete(QEMUFile *f, void *opaque) } } - ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); + ret = multifd_send_sync_main(); if (ret < 0) { return ret; } - if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) { + if (migrate_mapped_ram()) { + ram_save_file_bmap(f); + + if (qemu_file_get_error(f)) { + Error *local_err = NULL; + int err = qemu_file_get_error_obj(f, &local_err); + + error_reportf_err(local_err, "Failed to write bitmap to file: "); + return -err; + } + } + + if (migrate_multifd() && !migrate_multifd_flush_after_each_section() && + !migrate_mapped_ram()) { qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); } qemu_put_be64(f, RAM_SAVE_FLAG_EOS); @@ -3214,21 +3410,20 @@ static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy, static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy, uint64_t *can_postcopy) { - MigrationState *s = migrate_get_current(); RAMState **temp = opaque; RAMState *rs = *temp; + uint64_t remaining_size; - uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; - - if (!migration_in_postcopy() && remaining_size < s->threshold_size) { - qemu_mutex_lock_iothread(); + if (!migration_in_postcopy()) { + bql_lock(); WITH_RCU_READ_LOCK_GUARD() { migration_bitmap_sync_precopy(rs, false); } - qemu_mutex_unlock_iothread(); - remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; + bql_unlock(); } + remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; + if (migrate_postcopy_ram()) { /* We can do postcopy, and all the data is postcopiable */ *can_postcopy += remaining_size; @@ -3453,7 +3648,7 @@ void colo_incoming_start_dirty_log(void) { RAMBlock *block = NULL; /* For memory_global_dirty_log_start below. */ - qemu_mutex_lock_iothread(); + bql_lock(); qemu_mutex_lock_ramlist(); memory_global_dirty_log_sync(false); @@ -3467,7 +3662,7 @@ void colo_incoming_start_dirty_log(void) } ram_state->migration_dirty_pages = 0; qemu_mutex_unlock_ramlist(); - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* It is need to hold the global lock to call this helper */ @@ -3789,31 +3984,165 @@ void colo_flush_ram_cache(void) trace_colo_flush_ram_cache_end(); } +static size_t ram_load_multifd_pages(void *host_addr, size_t size, + uint64_t offset) +{ + MultiFDRecvData *data = multifd_get_recv_data(); + + data->opaque = host_addr; + data->file_offset = offset; + data->size = size; + + if (!multifd_recv()) { + return 0; + } + + return size; +} + +static bool read_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block, + long num_pages, unsigned long *bitmap, + Error **errp) +{ + ERRP_GUARD(); + unsigned long set_bit_idx, clear_bit_idx; + ram_addr_t offset; + void *host; + size_t read, unread, size; + + for (set_bit_idx = find_first_bit(bitmap, num_pages); + set_bit_idx < num_pages; + set_bit_idx = find_next_bit(bitmap, num_pages, clear_bit_idx + 1)) { + + clear_bit_idx = find_next_zero_bit(bitmap, num_pages, set_bit_idx + 1); + + unread = TARGET_PAGE_SIZE * (clear_bit_idx - set_bit_idx); + offset = set_bit_idx << TARGET_PAGE_BITS; + + while (unread > 0) { + host = host_from_ram_block_offset(block, offset); + if (!host) { + error_setg(errp, "page outside of ramblock %s range", + block->idstr); + return false; + } + + size = MIN(unread, MAPPED_RAM_LOAD_BUF_SIZE); + + if (migrate_multifd()) { + read = ram_load_multifd_pages(host, size, + block->pages_offset + offset); + } else { + read = qemu_get_buffer_at(f, host, size, + block->pages_offset + offset); + } + + if (!read) { + goto err; + } + offset += read; + unread -= read; + } + } + + return true; + +err: + qemu_file_get_error_obj(f, errp); + error_prepend(errp, "(%s) failed to read page " RAM_ADDR_FMT + "from file offset %" PRIx64 ": ", block->idstr, offset, + block->pages_offset + offset); + return false; +} + +static void parse_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block, + ram_addr_t length, Error **errp) +{ + g_autofree unsigned long *bitmap = NULL; + MappedRamHeader header; + size_t bitmap_size; + long num_pages; + + if (!mapped_ram_read_header(f, &header, errp)) { + return; + } + + block->pages_offset = header.pages_offset; + + /* + * Check the alignment of the file region that contains pages. We + * don't enforce MAPPED_RAM_FILE_OFFSET_ALIGNMENT to allow that + * value to change in the future. Do only a sanity check with page + * size alignment. + */ + if (!QEMU_IS_ALIGNED(block->pages_offset, TARGET_PAGE_SIZE)) { + error_setg(errp, + "Error reading ramblock %s pages, region has bad alignment", + block->idstr); + return; + } + + num_pages = length / header.page_size; + bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long); + + bitmap = g_malloc0(bitmap_size); + if (qemu_get_buffer_at(f, (uint8_t *)bitmap, bitmap_size, + header.bitmap_offset) != bitmap_size) { + error_setg(errp, "Error reading dirty bitmap"); + return; + } + + if (!read_ramblock_mapped_ram(f, block, num_pages, bitmap, errp)) { + return; + } + + /* Skip pages array */ + qemu_set_offset(f, block->pages_offset + length, SEEK_SET); + + return; +} + static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length) { int ret = 0; /* ADVISE is earlier, it shows the source has the postcopy capability on */ bool postcopy_advised = migration_incoming_postcopy_advised(); + int max_hg_page_size; + Error *local_err = NULL; assert(block); + if (migrate_mapped_ram()) { + parse_ramblock_mapped_ram(f, block, length, &local_err); + if (local_err) { + error_report_err(local_err); + return -EINVAL; + } + return 0; + } + if (!qemu_ram_is_migratable(block)) { error_report("block %s should not be migrated !", block->idstr); return -EINVAL; } if (length != block->used_length) { - Error *local_err = NULL; - ret = qemu_ram_resize(block, length, &local_err); if (local_err) { error_report_err(local_err); return ret; } } + + /* + * ??? Mirrors the previous value of qemu_host_page_size, + * but is this really what was intended for the migration? + */ + max_hg_page_size = MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE); + /* For postcopy we need to check hugepage sizes match */ if (postcopy_advised && migrate_postcopy_ram() && - block->page_size != qemu_host_page_size) { + block->page_size != max_hg_page_size) { uint64_t remote_page_size = qemu_get_be64(f); if (remote_page_size != block->page_size) { error_report("Mismatched RAM page size %s " @@ -3888,6 +4217,12 @@ static int ram_load_precopy(QEMUFile *f) invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE; } + if (migrate_mapped_ram()) { + invalid_flags |= (RAM_SAVE_FLAG_HOOK | RAM_SAVE_FLAG_MULTIFD_FLUSH | + RAM_SAVE_FLAG_PAGE | RAM_SAVE_FLAG_XBZRLE | + RAM_SAVE_FLAG_ZERO); + } + while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { ram_addr_t addr; void *host = NULL, *host_bak = NULL; @@ -3905,10 +4240,18 @@ static int ram_load_precopy(QEMUFile *f) i++; addr = qemu_get_be64(f); + ret = qemu_file_get_error(f); + if (ret) { + error_report("Getting RAM address failed"); + break; + } + flags = addr & ~TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK; if (flags & invalid_flags) { + error_report("Unexpected RAM flags: %d", flags & invalid_flags); + if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) { error_report("Received an unexpected compressed page"); } @@ -3961,6 +4304,16 @@ static int ram_load_precopy(QEMUFile *f) switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { case RAM_SAVE_FLAG_MEM_SIZE: ret = parse_ramblocks(f, addr); + /* + * For mapped-ram migration (to a file) using multifd, we sync + * once and for all here to make sure all tasks we queued to + * multifd threads are completed, so that all the ramblocks + * (including all the guest memory pages within) are fully + * loaded after this sync returns. + */ + if (migrate_mapped_ram()) { + multifd_recv_sync_main(); + } break; case RAM_SAVE_FLAG_ZERO: @@ -4001,7 +4354,12 @@ static int ram_load_precopy(QEMUFile *f) case RAM_SAVE_FLAG_EOS: /* normal exit */ if (migrate_multifd() && - migrate_multifd_flush_after_each_section()) { + migrate_multifd_flush_after_each_section() && + /* + * Mapped-ram migration flushes once and for all after + * parsing ramblocks. Always ignore EOS for it. + */ + !migrate_mapped_ram()) { multifd_recv_sync_main(); } break; diff --git a/migration/ram.h b/migration/ram.h index 9b937a446b7..08feecaf516 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -75,6 +75,8 @@ bool ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb, Error **errp); bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start); void postcopy_preempt_shutdown_file(MigrationState *s); void *postcopy_preempt_thread(void *opaque); +void ramblock_set_file_bmap_atomic(RAMBlock *block, ram_addr_t offset, + bool set); /* ram cache */ int colo_init_ram_cache(void); diff --git a/migration/rdma.c b/migration/rdma.c index 4141c0bd253..855753c6719 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -3357,7 +3357,7 @@ static int qemu_rdma_accept(RDMAContext *rdma) goto err_rdma_dest_wait; } - isock->host = rdma->host; + isock->host = g_strdup(rdma->host); isock->port = g_strdup_printf("%d", rdma->port); /* @@ -4039,7 +4039,6 @@ static void rdma_accept_incoming_migration(void *opaque) { RDMAContext *rdma = opaque; QEMUFile *f; - Error *local_err = NULL; trace_qemu_rdma_accept_incoming_migration(); if (qemu_rdma_accept(rdma) < 0) { @@ -4061,10 +4060,7 @@ static void rdma_accept_incoming_migration(void *opaque) } rdma->migration_started_on_destination = 1; - migration_fd_process_incoming(f, &local_err); - if (local_err) { - error_reportf_err(local_err, "RDMA ERROR:"); - } + migration_fd_process_incoming(f); } void rdma_start_incoming_migration(InetSocketAddress *host_port, diff --git a/migration/savevm.c b/migration/savevm.c index eec5503a422..e7c1215671d 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -245,6 +245,7 @@ static bool should_validate_capability(int capability) /* Validate only new capabilities to keep compatibility. */ switch (capability) { case MIGRATION_CAPABILITY_X_IGNORE_SHARED: + case MIGRATION_CAPABILITY_MAPPED_RAM: return true; default: return false; @@ -438,7 +439,7 @@ static const VMStateDescription vmstate_target_page_bits = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_target_page_bits_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(target_page_bits, SaveState), VMSTATE_END_OF_LIST() } @@ -454,7 +455,7 @@ static const VMStateDescription vmstate_capabilites = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_capabilites_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_V(caps_count, SaveState, 1), VMSTATE_VARRAY_UINT32_ALLOC(capabilities, SaveState, caps_count, 1, vmstate_info_capability, @@ -499,7 +500,7 @@ static const VMStateDescription vmstate_uuid = { .minimum_version_id = 1, .needed = vmstate_uuid_needed, .post_load = vmstate_uuid_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY_V(uuid.data, SaveState, sizeof(QemuUUID), 1), VMSTATE_END_OF_LIST() } @@ -512,12 +513,12 @@ static const VMStateDescription vmstate_configuration = { .post_load = configuration_post_load, .pre_save = configuration_pre_save, .post_save = configuration_post_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(len, SaveState), VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription *[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_target_page_bits, &vmstate_capabilites, &vmstate_uuid, @@ -551,11 +552,11 @@ static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field, } static void dump_vmstate_vmss(FILE *out_file, - const VMStateDescription **subsection, + const VMStateDescription *subsection, int indent) { - if (*subsection != NULL) { - dump_vmstate_vmsd(out_file, *subsection, indent, true); + if (subsection != NULL) { + dump_vmstate_vmsd(out_file, subsection, indent, true); } } @@ -597,7 +598,7 @@ static void dump_vmstate_vmsd(FILE *out_file, fprintf(out_file, "\n%*s]", indent, ""); } if (vmsd->subsections != NULL) { - const VMStateDescription **subsection = vmsd->subsections; + const VMStateDescription * const *subsection = vmsd->subsections; bool first; fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, ""); @@ -606,7 +607,7 @@ static void dump_vmstate_vmsd(FILE *out_file, if (!first) { fprintf(out_file, ",\n"); } - dump_vmstate_vmss(out_file, subsection, indent + 2); + dump_vmstate_vmss(out_file, *subsection, indent + 2); subsection++; first = false; } @@ -831,7 +832,7 @@ void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque) static void vmstate_check(const VMStateDescription *vmsd) { const VMStateField *field = vmsd->fields; - const VMStateDescription **subsection = vmsd->subsections; + const VMStateDescription * const *subsection = vmsd->subsections; if (field) { while (field->name) { @@ -1316,7 +1317,7 @@ void qemu_savevm_state_setup(QEMUFile *f) MigrationState *ms = migrate_get_current(); SaveStateEntry *se; Error *local_err = NULL; - int ret; + int ret = 0; json_writer_int64(ms->vmdesc, "page_size", qemu_target_page_size()); json_writer_start_array(ms->vmdesc, "devices"); @@ -1350,6 +1351,10 @@ void qemu_savevm_state_setup(QEMUFile *f) } } + if (ret) { + return; + } + if (precopy_notify(PRECOPY_NOTIFY_SETUP, &local_err)) { error_report_err(local_err); } @@ -1389,7 +1394,8 @@ int qemu_savevm_state_resume_prepare(MigrationState *s) int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) { SaveStateEntry *se; - int ret = 1; + bool all_finished = true; + int ret; trace_savevm_state_iterate(); QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { @@ -1430,16 +1436,12 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) "%d(%s): %d", se->section_id, se->idstr, ret); qemu_file_set_error(f, ret); - } - if (ret <= 0) { - /* Do not proceed to the next vmstate before this one reported - completion of the current stage. This serializes the migration - and reduces the probability that a faster changing state is - synchronized over and over again. */ - break; + return ret; + } else if (!ret) { + all_finished = false; } } - return ret; + return all_finished; } static bool should_send_vmdesc(void) @@ -1704,7 +1706,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp) MigrationState *ms = migrate_get_current(); MigrationStatus status; - if (migration_is_running(ms->state)) { + if (migration_is_running()) { error_setg(errp, QERR_MIGRATION_ACTIVE); return -EINVAL; } @@ -2171,8 +2173,6 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) runstate_set(RUN_STATE_PAUSED); } - qemu_bh_delete(mis->bh); - trace_vmstate_downtime_checkpoint("dst-postcopy-bh-vm-started"); } @@ -2188,8 +2188,7 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) } postcopy_state_set(POSTCOPY_INCOMING_RUNNING); - mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, mis); - qemu_bh_schedule(mis->bh); + migration_bh_schedule(loadvm_postcopy_handle_run_bh, mis); /* We need to finish reading the stream from the package * and also stop reading anything more from the stream that loaded the @@ -2343,6 +2342,27 @@ static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) QEMUFile *packf = qemu_file_new_input(QIO_CHANNEL(bioc)); + /* + * Before loading the guest states, ensure that the preempt channel has + * been ready to use, as some of the states (e.g. via virtio_load) might + * trigger page faults that will be handled through the preempt channel. + * So yield to the main thread in the case that the channel create event + * hasn't been dispatched. + * + * TODO: if we can move migration loadvm out of main thread, then we + * won't block main thread from polling the accept() fds. We can drop + * this as a whole when that is done. + */ + do { + if (!migrate_postcopy_preempt() || !qemu_in_coroutine() || + mis->postcopy_qemufile_dst) { + break; + } + + aio_co_schedule(qemu_get_current_aio_context(), qemu_coroutine_self()); + qemu_coroutine_yield(); + } while (1); + ret = qemu_loadvm_state_main(packf, mis); trace_loadvm_handle_cmd_packaged_main(ret); qemu_fclose(packf); @@ -3046,10 +3066,9 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, QEMUSnapshotInfo sn1, *sn = &sn1; int ret = -1, ret2; QEMUFile *f; - int saved_vm_running; + RunState saved_state = runstate_get(); uint64_t vm_state_size; g_autoptr(GDateTime) now = g_date_time_new_now_local(); - AioContext *aio_context; GLOBAL_STATE_CODE(); @@ -3092,17 +3111,12 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, if (bs == NULL) { return false; } - aio_context = bdrv_get_aio_context(bs); - - saved_vm_running = runstate_is_running(); global_state_store(); vm_stop(RUN_STATE_SAVE_VM); bdrv_drain_all_begin(); - aio_context_acquire(aio_context); - memset(sn, 0, sizeof(*sn)); /* fill auxiliary fields */ @@ -3139,14 +3153,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, goto the_end; } - /* The bdrv_all_create_snapshot() call that follows acquires the AioContext - * for itself. BDRV_POLL_WHILE() does not support nested locking because - * it only releases the lock once. Therefore synchronous I/O will deadlock - * unless we release the AioContext before bdrv_all_create_snapshot(). - */ - aio_context_release(aio_context); - aio_context = NULL; - ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, has_devices, devices, errp); if (ret < 0) { @@ -3157,15 +3163,9 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, ret = 0; the_end: - if (aio_context) { - aio_context_release(aio_context); - } - bdrv_drain_all_end(); - if (saved_vm_running) { - vm_start(); - } + vm_resume(saved_state); return ret == 0; } @@ -3258,7 +3258,6 @@ bool load_snapshot(const char *name, const char *vmstate, QEMUSnapshotInfo sn; QEMUFile *f; int ret; - AioContext *aio_context; MigrationIncomingState *mis = migration_incoming_get_current(); if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { @@ -3278,12 +3277,9 @@ bool load_snapshot(const char *name, const char *vmstate, if (!bs_vm_state) { return false; } - aio_context = bdrv_get_aio_context(bs_vm_state); /* Don't even try to load empty VM states */ - aio_context_acquire(aio_context); ret = bdrv_snapshot_find(bs_vm_state, &sn, name); - aio_context_release(aio_context); if (ret < 0) { return false; } else if (sn.vm_state_size == 0) { @@ -3320,10 +3316,8 @@ bool load_snapshot(const char *name, const char *vmstate, ret = -EINVAL; goto err_drain; } - aio_context_acquire(aio_context); ret = qemu_loadvm_state(f); migration_incoming_state_destroy(); - aio_context_release(aio_context); bdrv_drain_all_end(); @@ -3339,6 +3333,14 @@ bool load_snapshot(const char *name, const char *vmstate, return false; } +void load_snapshot_resume(RunState state) +{ + vm_resume(state); + if (state == RUN_STATE_RUNNING && runstate_get() == RUN_STATE_SUSPENDED) { + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, &error_abort); + } +} + bool delete_snapshot(const char *name, bool has_devices, strList *devices, Error **errp) { @@ -3403,16 +3405,15 @@ static void snapshot_load_job_bh(void *opaque) { Job *job = opaque; SnapshotJob *s = container_of(job, SnapshotJob, common); - int orig_vm_running; + RunState orig_state = runstate_get(); job_progress_set_remaining(&s->common, 1); - orig_vm_running = runstate_is_running(); vm_stop(RUN_STATE_RESTORE_VM); s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp); - if (s->ret && orig_vm_running) { - vm_start(); + if (s->ret) { + load_snapshot_resume(orig_state); } job_progress_update(&s->common, 1); diff --git a/migration/socket.c b/migration/socket.c index 98e3ea15147..9ab89b1e089 100644 --- a/migration/socket.c +++ b/migration/socket.c @@ -60,17 +60,6 @@ QIOChannel *socket_send_channel_create_sync(Error **errp) return QIO_CHANNEL(sioc); } -int socket_send_channel_destroy(QIOChannel *send) -{ - /* Remove channel */ - object_unref(OBJECT(send)); - if (outgoing_args.saddr) { - qapi_free_SocketAddress(outgoing_args.saddr); - outgoing_args.saddr = NULL; - } - return 0; -} - struct SocketConnectData { MigrationState *s; char *hostname; @@ -137,6 +126,14 @@ void socket_start_outgoing_migration(MigrationState *s, NULL); } +void socket_cleanup_outgoing_migration(void) +{ + if (outgoing_args.saddr) { + qapi_free_SocketAddress(outgoing_args.saddr); + outgoing_args.saddr = NULL; + } +} + static void socket_accept_incoming_migration(QIONetListener *listener, QIOChannelSocket *cioc, gpointer opaque) diff --git a/migration/socket.h b/migration/socket.h index 5e4c33b8ea5..46c233ecd29 100644 --- a/migration/socket.h +++ b/migration/socket.h @@ -23,10 +23,11 @@ void socket_send_channel_create(QIOTaskFunc f, void *data); QIOChannel *socket_send_channel_create_sync(Error **errp); -int socket_send_channel_destroy(QIOChannel *send); void socket_start_incoming_migration(SocketAddress *saddr, Error **errp); void socket_start_outgoing_migration(MigrationState *s, SocketAddress *saddr, Error **errp); +void socket_cleanup_outgoing_migration(void); + #endif diff --git a/migration/trace-events b/migration/trace-events index de4a743c8a7..f0e1cb80c75 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -128,21 +128,21 @@ postcopy_preempt_reset_channel(void) "" # multifd.c multifd_new_send_channel_async(uint8_t id) "channel %u" multifd_new_send_channel_async_error(uint8_t id, void *err) "channel=%u err=%p" -multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " pages %u flags 0x%x next packet size %u" +multifd_recv(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t zero, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u" multifd_recv_new_channel(uint8_t id) "channel %u" multifd_recv_sync_main(long packet_num) "packet num %ld" multifd_recv_sync_main_signal(uint8_t id) "channel %u" -multifd_recv_sync_main_wait(uint8_t id) "channel %u" +multifd_recv_sync_main_wait(uint8_t id) "iter %u" multifd_recv_terminate_threads(bool error) "error %d" -multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %u packets %" PRIu64 " pages %" PRIu64 +multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages, uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 " zero pages %" PRIu64 multifd_recv_thread_start(uint8_t id) "%u" -multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u flags 0x%x next packet size %u" +multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal_pages, uint32_t zero_pages, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u" multifd_send_error(uint8_t id) "channel %u" multifd_send_sync_main(long packet_num) "packet num %ld" multifd_send_sync_main_signal(uint8_t id) "channel %u" multifd_send_sync_main_wait(uint8_t id) "channel %u" -multifd_send_terminate_threads(bool error) "error %d" -multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 +multifd_send_terminate_threads(void) "" +multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages, uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 " zero pages %" PRIu64 multifd_send_thread_start(uint8_t id) "%u" multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char *hostname) "ioc=%p tioc=%p hostname=%s" multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p err=%s" diff --git a/migration/vmstate.c b/migration/vmstate.c index b7723a41871..ef26f26ccdc 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -452,13 +452,15 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, } static const VMStateDescription * -vmstate_get_subsection(const VMStateDescription **sub, char *idstr) +vmstate_get_subsection(const VMStateDescription * const *sub, + const char *idstr) { - while (sub && *sub) { - if (strcmp(idstr, (*sub)->name) == 0) { - return *sub; + if (sub) { + for (const VMStateDescription *s = *sub; s ; s = *++sub) { + if (strcmp(idstr, s->name) == 0) { + return s; + } } - sub++; } return NULL; } @@ -517,7 +519,7 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, JSONWriter *vmdesc) { - const VMStateDescription **sub = vmsd->subsections; + const VMStateDescription * const *sub = vmsd->subsections; bool vmdesc_has_subsections = false; int ret = 0; diff --git a/migration/yank_functions.c b/migration/yank_functions.c index d5a710a3f2f..979e60c7620 100644 --- a/migration/yank_functions.c +++ b/migration/yank_functions.c @@ -8,12 +8,9 @@ */ #include "qemu/osdep.h" -#include "qapi/error.h" #include "io/channel.h" #include "yank_functions.h" #include "qemu/yank.h" -#include "io/channel-socket.h" -#include "io/channel-tls.h" #include "qemu-file.h" void migration_yank_iochannel(void *opaque) @@ -26,8 +23,7 @@ void migration_yank_iochannel(void *opaque) /* Return whether yank is supported on this ioc */ static bool migration_ioc_yank_supported(QIOChannel *ioc) { - return object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_SOCKET) || - object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_TLS); + return qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN); } void migration_ioc_register_yank(QIOChannel *ioc) diff --git a/monitor/hmp-cmds-target.c b/monitor/hmp-cmds-target.c index d9fbcac08da..ff01cf9d8db 100644 --- a/monitor/hmp-cmds-target.c +++ b/monitor/hmp-cmds-target.c @@ -25,6 +25,7 @@ #include "qemu/osdep.h" #include "disas/disas.h" #include "exec/address-spaces.h" +#include "exec/memory.h" #include "monitor/hmp-target.h" #include "monitor/monitor-internal.h" #include "qapi/error.h" @@ -260,7 +261,7 @@ void *gpa2hva(MemoryRegion **p_mr, hwaddr addr, uint64_t size, Error **errp) } if (!memory_region_is_ram(mrs.mr) && !memory_region_is_romd(mrs.mr)) { - error_setg(errp, "Memory at address 0x%" HWADDR_PRIx "is not RAM", addr); + error_setg(errp, "Memory at address 0x%" HWADDR_PRIx " is not RAM", addr); memory_region_unref(mrs.mr); return NULL; } diff --git a/nbd/server.c b/nbd/server.c index 895cf0a7525..c3484cc1ebc 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -122,26 +122,28 @@ struct NBDMetaContexts { }; struct NBDClient { - int refcount; + int refcount; /* atomic */ void (*close_fn)(NBDClient *client, bool negotiated); + QemuMutex lock; + NBDExport *exp; QCryptoTLSCreds *tlscreds; char *tlsauthz; QIOChannelSocket *sioc; /* The underlying data channel */ QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */ - Coroutine *recv_coroutine; + Coroutine *recv_coroutine; /* protected by lock */ CoMutex send_lock; Coroutine *send_coroutine; - bool read_yielding; - bool quiescing; + bool read_yielding; /* protected by lock */ + bool quiescing; /* protected by lock */ QTAILQ_ENTRY(NBDClient) next; - int nb_requests; - bool closing; + int nb_requests; /* protected by lock */ + bool closing; /* protected by lock */ uint32_t check_align; /* If non-zero, check for aligned client requests */ @@ -1415,11 +1417,18 @@ nbd_read_eof(NBDClient *client, void *buffer, size_t size, Error **errp) len = qio_channel_readv(client->ioc, &iov, 1, errp); if (len == QIO_CHANNEL_ERR_BLOCK) { - client->read_yielding = true; + WITH_QEMU_LOCK_GUARD(&client->lock) { + client->read_yielding = true; + + /* Prompt main loop thread to re-run nbd_drained_poll() */ + aio_wait_kick(); + } qio_channel_yield(client->ioc, G_IO_IN); - client->read_yielding = false; - if (client->quiescing) { - return -EAGAIN; + WITH_QEMU_LOCK_GUARD(&client->lock) { + client->read_yielding = false; + if (client->quiescing) { + return -EAGAIN; + } } continue; } else if (len < 0) { @@ -1501,14 +1510,17 @@ static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *reque #define MAX_NBD_REQUESTS 16 +/* Runs in export AioContext and main loop thread */ void nbd_client_get(NBDClient *client) { - client->refcount++; + qatomic_inc(&client->refcount); } void nbd_client_put(NBDClient *client) { - if (--client->refcount == 0) { + assert(qemu_in_main_thread()); + + if (qatomic_fetch_dec(&client->refcount) == 1) { /* The last reference should be dropped by client->close, * which is called by client_close. */ @@ -1525,17 +1537,47 @@ void nbd_client_put(NBDClient *client) blk_exp_unref(&client->exp->common); } g_free(client->contexts.bitmaps); + qemu_mutex_destroy(&client->lock); g_free(client); } } +/* + * Tries to release the reference to @client, but only if other references + * remain. This is an optimization for the common case where we want to avoid + * the expense of scheduling nbd_client_put() in the main loop thread. + * + * Returns true upon success or false if the reference was not released because + * it is the last reference. + */ +static bool nbd_client_put_nonzero(NBDClient *client) +{ + int old = qatomic_read(&client->refcount); + int expected; + + do { + if (old == 1) { + return false; + } + + expected = old; + old = qatomic_cmpxchg(&client->refcount, expected, expected - 1); + } while (old != expected); + + return true; +} + static void client_close(NBDClient *client, bool negotiated) { - if (client->closing) { - return; - } + assert(qemu_in_main_thread()); - client->closing = true; + WITH_QEMU_LOCK_GUARD(&client->lock) { + if (client->closing) { + return; + } + + client->closing = true; + } /* Force requests to finish. They will drop their own references, * then we'll close the socket and free the NBDClient. @@ -1549,6 +1591,7 @@ static void client_close(NBDClient *client, bool negotiated) } } +/* Runs in export AioContext with client->lock held */ static NBDRequestData *nbd_request_get(NBDClient *client) { NBDRequestData *req; @@ -1557,11 +1600,11 @@ static NBDRequestData *nbd_request_get(NBDClient *client) client->nb_requests++; req = g_new0(NBDRequestData, 1); - nbd_client_get(client); req->client = client; return req; } +/* Runs in export AioContext with client->lock held */ static void nbd_request_put(NBDRequestData *req) { NBDClient *client = req->client; @@ -1578,8 +1621,6 @@ static void nbd_request_put(NBDRequestData *req) } nbd_client_receive_next_request(client); - - nbd_client_put(client); } static void blk_aio_attached(AioContext *ctx, void *opaque) @@ -1587,14 +1628,18 @@ static void blk_aio_attached(AioContext *ctx, void *opaque) NBDExport *exp = opaque; NBDClient *client; + assert(qemu_in_main_thread()); + trace_nbd_blk_aio_attached(exp->name, ctx); exp->common.ctx = ctx; QTAILQ_FOREACH(client, &exp->clients, next) { - assert(client->nb_requests == 0); - assert(client->recv_coroutine == NULL); - assert(client->send_coroutine == NULL); + WITH_QEMU_LOCK_GUARD(&client->lock) { + assert(client->nb_requests == 0); + assert(client->recv_coroutine == NULL); + assert(client->send_coroutine == NULL); + } } } @@ -1602,6 +1647,8 @@ static void blk_aio_detach(void *opaque) { NBDExport *exp = opaque; + assert(qemu_in_main_thread()); + trace_nbd_blk_aio_detach(exp->name, exp->common.ctx); exp->common.ctx = NULL; @@ -1612,8 +1659,12 @@ static void nbd_drained_begin(void *opaque) NBDExport *exp = opaque; NBDClient *client; + assert(qemu_in_main_thread()); + QTAILQ_FOREACH(client, &exp->clients, next) { - client->quiescing = true; + WITH_QEMU_LOCK_GUARD(&client->lock) { + client->quiescing = true; + } } } @@ -1622,28 +1673,48 @@ static void nbd_drained_end(void *opaque) NBDExport *exp = opaque; NBDClient *client; + assert(qemu_in_main_thread()); + QTAILQ_FOREACH(client, &exp->clients, next) { - client->quiescing = false; - nbd_client_receive_next_request(client); + WITH_QEMU_LOCK_GUARD(&client->lock) { + client->quiescing = false; + nbd_client_receive_next_request(client); + } } } +/* Runs in export AioContext */ +static void nbd_wake_read_bh(void *opaque) +{ + NBDClient *client = opaque; + qio_channel_wake_read(client->ioc); +} + static bool nbd_drained_poll(void *opaque) { NBDExport *exp = opaque; NBDClient *client; + assert(qemu_in_main_thread()); + QTAILQ_FOREACH(client, &exp->clients, next) { - if (client->nb_requests != 0) { - /* - * If there's a coroutine waiting for a request on nbd_read_eof() - * enter it here so we don't depend on the client to wake it up. - */ - if (client->recv_coroutine != NULL && client->read_yielding) { - qio_channel_wake_read(client->ioc); - } + WITH_QEMU_LOCK_GUARD(&client->lock) { + if (client->nb_requests != 0) { + /* + * If there's a coroutine waiting for a request on nbd_read_eof() + * enter it here so we don't depend on the client to wake it up. + * + * Schedule a BH in the export AioContext to avoid missing the + * wake up due to the race between qio_channel_wake_read() and + * qio_channel_yield(). + */ + if (client->recv_coroutine != NULL && client->read_yielding) { + aio_bh_schedule_oneshot(nbd_export_aio_context(client->exp), + nbd_wake_read_bh, client); + } - return true; + return true; + } } } @@ -1654,6 +1725,8 @@ static void nbd_eject_notifier(Notifier *n, void *data) { NBDExport *exp = container_of(n, NBDExport, eject_notifier); + assert(qemu_in_main_thread()); + blk_exp_request_shutdown(&exp->common); } @@ -2539,7 +2612,6 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, int ret; g_assert(qemu_in_coroutine()); - assert(client->recv_coroutine == qemu_coroutine_self()); ret = nbd_receive_request(client, request, errp); if (ret < 0) { return ret; @@ -2935,16 +3007,24 @@ static coroutine_fn int nbd_handle_request(NBDClient *client, /* Owns a reference to the NBDClient passed as opaque. */ static coroutine_fn void nbd_trip(void *opaque) { - NBDClient *client = opaque; - NBDRequestData *req; + NBDRequestData *req = opaque; + NBDClient *client = req->client; NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */ int ret; Error *local_err = NULL; + /* + * Note that nbd_client_put() and client_close() must be called from the + * main loop thread. Use aio_co_reschedule_self() to switch AioContext + * before calling these functions. + */ + trace_nbd_trip(); + + qemu_mutex_lock(&client->lock); + if (client->closing) { - nbd_client_put(client); - return; + goto done; } if (client->quiescing) { @@ -2952,14 +3032,25 @@ static coroutine_fn void nbd_trip(void *opaque) * We're switching between AIO contexts. Don't attempt to receive a new * request and kick the main context which may be waiting for us. */ - nbd_client_put(client); client->recv_coroutine = NULL; aio_wait_kick(); - return; + goto done; } - req = nbd_request_get(client); - ret = nbd_co_receive_request(req, &request, &local_err); + /* + * nbd_co_receive_request() returns -EAGAIN when nbd_drained_begin() has + * set client->quiescing but by the time we get back nbd_drained_end() may + * have already cleared client->quiescing. In that case we try again + * because nothing else will spawn an nbd_trip() coroutine until we set + * client->recv_coroutine = NULL further down. + */ + do { + assert(client->recv_coroutine == qemu_coroutine_self()); + qemu_mutex_unlock(&client->lock); + ret = nbd_co_receive_request(req, &request, &local_err); + qemu_mutex_lock(&client->lock); + } while (ret == -EAGAIN && !client->quiescing); + client->recv_coroutine = NULL; if (client->closing) { @@ -2971,15 +3062,16 @@ static coroutine_fn void nbd_trip(void *opaque) } if (ret == -EAGAIN) { - assert(client->quiescing); goto done; } nbd_client_receive_next_request(client); + if (ret == -EIO) { goto disconnect; } + qemu_mutex_unlock(&client->lock); qio_channel_set_cork(client->ioc, true); if (ret < 0) { @@ -2999,6 +3091,10 @@ static coroutine_fn void nbd_trip(void *opaque) g_free(request.contexts->bitmaps); g_free(request.contexts); } + + qio_channel_set_cork(client->ioc, false); + qemu_mutex_lock(&client->lock); + if (ret < 0) { error_prepend(&local_err, "Failed to send reply: "); goto disconnect; @@ -3013,27 +3109,43 @@ static coroutine_fn void nbd_trip(void *opaque) goto disconnect; } - qio_channel_set_cork(client->ioc, false); done: nbd_request_put(req); - nbd_client_put(client); + + qemu_mutex_unlock(&client->lock); + + if (!nbd_client_put_nonzero(client)) { + aio_co_reschedule_self(qemu_get_aio_context()); + nbd_client_put(client); + } return; disconnect: if (local_err) { error_reportf_err(local_err, "Disconnect client, due to: "); } + nbd_request_put(req); + qemu_mutex_unlock(&client->lock); + + aio_co_reschedule_self(qemu_get_aio_context()); client_close(client, true); nbd_client_put(client); } +/* + * Runs in export AioContext and main loop thread. Caller must hold + * client->lock. + */ static void nbd_client_receive_next_request(NBDClient *client) { + NBDRequestData *req; + if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS && !client->quiescing) { nbd_client_get(client); - client->recv_coroutine = qemu_coroutine_create(nbd_trip, client); + req = nbd_request_get(client); + client->recv_coroutine = qemu_coroutine_create(nbd_trip, req); aio_co_schedule(client->exp->common.ctx, client->recv_coroutine); } } @@ -3053,7 +3165,9 @@ static coroutine_fn void nbd_co_client_start(void *opaque) return; } - nbd_client_receive_next_request(client); + WITH_QEMU_LOCK_GUARD(&client->lock) { + nbd_client_receive_next_request(client); + } } /* @@ -3070,6 +3184,7 @@ void nbd_client_new(QIOChannelSocket *sioc, Coroutine *co; client = g_new0(NBDClient, 1); + qemu_mutex_init(&client->lock); client->refcount = 1; client->tlscreds = tlscreds; if (tlscreds) { diff --git a/net/af-xdp.c b/net/af-xdp.c index 6c65028fb00..01c5fb914ec 100644 --- a/net/af-xdp.c +++ b/net/af-xdp.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" #include -#include #include #include #include @@ -447,7 +446,7 @@ int net_init_af_xdp(const Netdev *netdev, NetClientState *nc, *nc0 = NULL; unsigned int ifindex; uint32_t prog_id = 0; - int *sock_fds = NULL; + g_autofree int *sock_fds = NULL; int64_t i, queues; Error *err = NULL; AFXDPState *s; @@ -517,7 +516,6 @@ int net_init_af_xdp(const Netdev *netdev, return 0; err: - g_free(sock_fds); if (nc0) { qemu_del_net_client(nc0); } diff --git a/net/can/meson.build b/net/can/meson.build index 45693c82c9d..af3b27921cd 100644 --- a/net/can/meson.build +++ b/net/can/meson.build @@ -1,5 +1,7 @@ can_ss = ss.source_set() can_ss.add(files('can_core.c', 'can_host.c')) -can_ss.add(when: 'CONFIG_LINUX', if_true: files('can_socketcan.c')) +if host_os == 'linux' + can_ss.add(files('can_socketcan.c')) +endif system_ss.add_all(when: 'CONFIG_CAN_BUS', if_true: can_ss) diff --git a/net/colo-compare.c b/net/colo-compare.c index 7f9e6f89ce0..c4ad0ab71fa 100644 --- a/net/colo-compare.c +++ b/net/colo-compare.c @@ -28,7 +28,6 @@ #include "sysemu/iothread.h" #include "net/colo-compare.h" #include "migration/colo.h" -#include "migration/migration.h" #include "util.h" #include "block/aio-wait.h" @@ -189,7 +188,7 @@ static void colo_compare_inconsistency_notify(CompareState *s) notify_remote_frame(s); } else { notifier_list_notify(&colo_compare_notifiers, - migrate_get_current()); + NULL); } } @@ -1439,12 +1438,10 @@ static void colo_compare_finalize(Object *obj) qemu_bh_delete(s->event_bh); AioContext *ctx = iothread_get_aio_context(s->iothread); - aio_context_acquire(ctx); AIO_WAIT_WHILE(ctx, !s->out_sendco.done); if (s->notify_dev) { AIO_WAIT_WHILE(ctx, !s->notify_sendco.done); } - aio_context_release(ctx); /* Release all unhandled packets after compare thead exited */ g_queue_foreach(&s->conn_list, colo_flush_packets, s); diff --git a/net/meson.build b/net/meson.build index ce99bd4447f..9432a588e4e 100644 --- a/net/meson.build +++ b/net/meson.build @@ -41,23 +41,21 @@ system_ss.add(when: libxdp, if_true: files('af-xdp.c')) if have_vhost_net_user system_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-user.c'), if_false: files('vhost-user-stub.c')) - system_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-user-stub.c')) endif -if targetos == 'windows' +if host_os == 'windows' system_ss.add(files('tap-win32.c')) -elif targetos == 'linux' +elif host_os == 'linux' system_ss.add(files('tap.c', 'tap-linux.c')) -elif targetos in bsd_oses +elif host_os in bsd_oses system_ss.add(files('tap.c', 'tap-bsd.c')) -elif targetos == 'sunos' +elif host_os == 'sunos' system_ss.add(files('tap.c', 'tap-solaris.c')) else system_ss.add(files('tap.c', 'tap-stub.c')) endif if have_vhost_net_vdpa system_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-vdpa.c'), if_false: files('vhost-vdpa-stub.c')) - system_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-vdpa-stub.c')) endif vmnet_files = files( diff --git a/net/net.c b/net/net.c index 0520bc1681a..a2f0c828bbf 100644 --- a/net/net.c +++ b/net/net.c @@ -75,6 +75,11 @@ typedef QSIMPLEQ_HEAD(, NetdevQueueEntry) NetdevQueue; static NetdevQueue nd_queue = QSIMPLEQ_HEAD_INITIALIZER(nd_queue); +static GHashTable *nic_model_help; + +static int nb_nics; +static NICInfo nd_table[MAX_NICS]; + /***********************************************************/ /* network device redirectors */ @@ -975,51 +980,6 @@ GPtrArray *qemu_get_nic_models(const char *device_type) return nic_models; } -int qemu_show_nic_models(const char *arg, const char *const *models) -{ - int i; - - if (!arg || !is_help_option(arg)) { - return 0; - } - - printf("Available NIC models:\n"); - for (i = 0 ; models[i]; i++) { - printf("%s\n", models[i]); - } - return 1; -} - -void qemu_check_nic_model(NICInfo *nd, const char *model) -{ - const char *models[2]; - - models[0] = model; - models[1] = NULL; - - if (qemu_show_nic_models(nd->model, models)) - exit(0); - if (qemu_find_nic_model(nd, models, model) < 0) - exit(1); -} - -int qemu_find_nic_model(NICInfo *nd, const char * const *models, - const char *default_model) -{ - int i; - - if (!nd->model) - nd->model = g_strdup(default_model); - - for (i = 0 ; models[i]; i++) { - if (strcmp(nd->model, models[i]) == 0) - return i; - } - - error_report("Unsupported NIC model: %s", nd->model); - return -1; -} - static int net_init_nic(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { @@ -1087,6 +1047,192 @@ static int net_init_nic(const Netdev *netdev, const char *name, return idx; } +static gboolean add_nic_result(gpointer key, gpointer value, gpointer user_data) +{ + GPtrArray *results = user_data; + GPtrArray *alias_list = value; + const char *model = key; + char *result; + + if (!alias_list) { + result = g_strdup(model); + } else { + GString *result_str = g_string_new(model); + int i; + + g_string_append(result_str, " (aka "); + for (i = 0; i < alias_list->len; i++) { + if (i) { + g_string_append(result_str, ", "); + } + g_string_append(result_str, alias_list->pdata[i]); + } + g_string_append(result_str, ")"); + result = result_str->str; + g_string_free(result_str, false); + g_ptr_array_unref(alias_list); + } + g_ptr_array_add(results, result); + return true; +} + +static int model_cmp(char **a, char **b) +{ + return strcmp(*a, *b); +} + +static void show_nic_models(void) +{ + GPtrArray *results = g_ptr_array_new(); + int i; + + g_hash_table_foreach_remove(nic_model_help, add_nic_result, results); + g_ptr_array_sort(results, (GCompareFunc)model_cmp); + + printf("Available NIC models for this configuration:\n"); + for (i = 0 ; i < results->len; i++) { + printf("%s\n", (char *)results->pdata[i]); + } + g_hash_table_unref(nic_model_help); + nic_model_help = NULL; +} + +static void add_nic_model_help(const char *model, const char *alias) +{ + GPtrArray *alias_list = NULL; + + if (g_hash_table_lookup_extended(nic_model_help, model, NULL, + (gpointer *)&alias_list)) { + /* Already exists, no alias to add: return */ + if (!alias) { + return; + } + if (alias_list) { + /* Check if this alias is already in the list. Add if not. */ + if (!g_ptr_array_find_with_equal_func(alias_list, alias, + g_str_equal, NULL)) { + g_ptr_array_add(alias_list, g_strdup(alias)); + } + return; + } + } + /* Either this model wasn't in the list already, or a first alias added */ + if (alias) { + alias_list = g_ptr_array_new(); + g_ptr_array_set_free_func(alias_list, g_free); + g_ptr_array_add(alias_list, g_strdup(alias)); + } + g_hash_table_replace(nic_model_help, g_strdup(model), alias_list); +} + +NICInfo *qemu_find_nic_info(const char *typename, bool match_default, + const char *alias) +{ + NICInfo *nd; + int i; + + if (nic_model_help) { + add_nic_model_help(typename, alias); + } + + for (i = 0; i < nb_nics; i++) { + nd = &nd_table[i]; + + if (!nd->used || nd->instantiated) { + continue; + } + + if ((match_default && !nd->model) || !g_strcmp0(nd->model, typename) + || (alias && !g_strcmp0(nd->model, alias))) { + return nd; + } + } + return NULL; +} + + +/* "I have created a device. Please configure it if you can" */ +bool qemu_configure_nic_device(DeviceState *dev, bool match_default, + const char *alias) +{ + NICInfo *nd = qemu_find_nic_info(object_get_typename(OBJECT(dev)), + match_default, alias); + + if (nd) { + qdev_set_nic_properties(dev, nd); + return true; + } + return false; +} + +/* "Please create a device, if you have a configuration for it" */ +DeviceState *qemu_create_nic_device(const char *typename, bool match_default, + const char *alias) +{ + NICInfo *nd = qemu_find_nic_info(typename, match_default, alias); + DeviceState *dev; + + if (!nd) { + return NULL; + } + + dev = qdev_new(typename); + qdev_set_nic_properties(dev, nd); + return dev; +} + +void qemu_create_nic_bus_devices(BusState *bus, const char *parent_type, + const char *default_model, + const char *alias, const char *alias_target) +{ + GPtrArray *nic_models = qemu_get_nic_models(parent_type); + const char *model; + DeviceState *dev; + NICInfo *nd; + int i; + + if (nic_model_help) { + if (alias_target) { + add_nic_model_help(alias_target, alias); + } + for (i = 0; i < nic_models->len - 1; i++) { + add_nic_model_help(nic_models->pdata[i], NULL); + } + } + + /* Drop the NULL terminator which would make g_str_equal() unhappy */ + nic_models->len--; + + for (i = 0; i < nb_nics; i++) { + nd = &nd_table[i]; + + if (!nd->used || nd->instantiated) { + continue; + } + + model = nd->model ? nd->model : default_model; + if (!model) { + continue; + } + + /* Each bus type is allowed *one* substitution */ + if (g_str_equal(model, alias)) { + model = alias_target; + } + + if (!g_ptr_array_find_with_equal_func(nic_models, model, + g_str_equal, NULL)) { + /* This NIC does not live on this bus. */ + continue; + } + + dev = qdev_new(model); + qdev_set_nic_properties(dev, nd); + qdev_realize_and_unref(dev, bus, &error_fatal); + } + + g_ptr_array_free(nic_models, true); +} static int (* const net_client_init_fun[NET_CLIENT_DRIVER__MAX])( const Netdev *netdev, @@ -1555,6 +1701,10 @@ void net_check_clients(void) NetClientState *nc; int i; + if (nic_model_help) { + show_nic_models(); + exit(0); + } net_hub_check_clients(); QTAILQ_FOREACH(nc, &net_clients, next) { @@ -1612,9 +1762,14 @@ static int net_param_nic(void *dummy, QemuOpts *opts, Error **errp) } if (is_help_option(type)) { GPtrArray *nic_models = qemu_get_nic_models(TYPE_DEVICE); + int i; show_netdevs(); printf("\n"); - qemu_show_nic_models(type, (const char **)nic_models->pdata); + printf("Available NIC models " + "(use -nic model=help for a filtered list):\n"); + for (i = 0 ; nic_models->pdata[i]; i++) { + printf("%s\n", (char *)nic_models->pdata[i]); + } g_ptr_array_free(nic_models, true); exit(0); } @@ -1634,6 +1789,12 @@ static int net_param_nic(void *dummy, QemuOpts *opts, Error **errp) memset(ni, 0, sizeof(*ni)); ni->model = qemu_opt_get_del(opts, "model"); + if (!nic_model_help && !g_strcmp0(ni->model, "help")) { + nic_model_help = g_hash_table_new_full(g_str_hash, g_str_equal, + g_free, NULL); + return 0; + } + /* Create an ID if the user did not specify one */ nd_id = g_strdup(qemu_opts_id(opts)); if (!nd_id) { diff --git a/net/stream.c b/net/stream.c index 9204b4c96e4..97e6ec6679e 100644 --- a/net/stream.c +++ b/net/stream.c @@ -165,6 +165,7 @@ static gboolean net_stream_send(QIOChannel *ioc, s->ioc_write_tag = 0; } if (s->listener) { + qemu_set_info_str(&s->nc, "listening"); qio_net_listener_set_client_func(s->listener, net_stream_listen, s, NULL); } @@ -173,7 +174,6 @@ static gboolean net_stream_send(QIOChannel *ioc, net_socket_rs_init(&s->rs, net_stream_rs_finalize, false); s->nc.link_down = true; - qemu_set_info_str(&s->nc, "%s", ""); qapi_event_send_netdev_stream_disconnected(s->nc.name); net_stream_arm_reconnect(s); @@ -272,9 +272,11 @@ static void net_stream_server_listening(QIOTask *task, gpointer opaque) QIOChannelSocket *listen_sioc = QIO_CHANNEL_SOCKET(s->listen_ioc); SocketAddress *addr; int ret; + Error *err = NULL; - if (listen_sioc->fd < 0) { - qemu_set_info_str(&s->nc, "connection error"); + if (qio_task_propagate_error(task, &err)) { + qemu_set_info_str(&s->nc, "error: %s", error_get_pretty(err)); + error_free(err); return; } @@ -292,6 +294,7 @@ static void net_stream_server_listening(QIOTask *task, gpointer opaque) s->nc.link_down = true; s->listener = qio_net_listener_new(); + qemu_set_info_str(&s->nc, "listening"); net_socket_rs_init(&s->rs, net_stream_rs_finalize, false); qio_net_listener_set_client_func(s->listener, net_stream_listen, s, NULL); qio_net_listener_add(s->listener, listen_sioc); @@ -309,6 +312,7 @@ static int net_stream_server_init(NetClientState *peer, nc = qemu_new_net_client(&net_stream_info, peer, model, name); s = DO_UPCAST(NetStreamState, nc, nc); + qemu_set_info_str(&s->nc, "initializing"); s->listen_ioc = QIO_CHANNEL(listen_sioc); qio_channel_socket_listen_async(listen_sioc, addr, 0, @@ -325,9 +329,11 @@ static void net_stream_client_connected(QIOTask *task, gpointer opaque) SocketAddress *addr; gchar *uri; int ret; + Error *err = NULL; - if (sioc->fd < 0) { - qemu_set_info_str(&s->nc, "connection error"); + if (qio_task_propagate_error(task, &err)) { + qemu_set_info_str(&s->nc, "error: %s", error_get_pretty(err)); + error_free(err); goto error; } @@ -382,6 +388,7 @@ static gboolean net_stream_reconnect(gpointer data) static void net_stream_arm_reconnect(NetStreamState *s) { if (s->reconnect && s->timer_tag == 0) { + qemu_set_info_str(&s->nc, "connecting"); s->timer_tag = g_timeout_add_seconds(s->reconnect, net_stream_reconnect, s); } @@ -400,6 +407,7 @@ static int net_stream_client_init(NetClientState *peer, nc = qemu_new_net_client(&net_stream_info, peer, model, name); s = DO_UPCAST(NetStreamState, nc, nc); + qemu_set_info_str(&s->nc, "connecting"); s->ioc = QIO_CHANNEL(sioc); s->nc.link_down = true; diff --git a/net/tap-win32.c b/net/tap-win32.c index 7b8b4be02cf..7edbd716337 100644 --- a/net/tap-win32.c +++ b/net/tap-win32.c @@ -707,70 +707,16 @@ static void tap_win32_send(void *opaque) } } -static bool tap_has_ufo(NetClientState *nc) -{ - return false; -} - -static bool tap_has_vnet_hdr(NetClientState *nc) -{ - return false; -} - -int tap_probe_vnet_hdr_len(int fd, int len) -{ - return 0; -} - -void tap_fd_set_vnet_hdr_len(int fd, int len) -{ -} - -int tap_fd_set_vnet_le(int fd, int is_le) -{ - return -EINVAL; -} - -int tap_fd_set_vnet_be(int fd, int is_be) -{ - return -EINVAL; -} - -static void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr) -{ -} - -static void tap_set_offload(NetClientState *nc, int csum, int tso4, - int tso6, int ecn, int ufo, int uso4, int uso6) -{ -} - struct vhost_net *tap_get_vhost_net(NetClientState *nc) { return NULL; } -static bool tap_has_vnet_hdr_len(NetClientState *nc, int len) -{ - return false; -} - -static void tap_set_vnet_hdr_len(NetClientState *nc, int len) -{ - abort(); -} - static NetClientInfo net_tap_win32_info = { .type = NET_CLIENT_DRIVER_TAP, .size = sizeof(TAPState), .receive = tap_receive, .cleanup = tap_cleanup, - .has_ufo = tap_has_ufo, - .has_vnet_hdr = tap_has_vnet_hdr, - .has_vnet_hdr_len = tap_has_vnet_hdr_len, - .using_vnet_hdr = tap_using_vnet_hdr, - .set_offload = tap_set_offload, - .set_vnet_hdr_len = tap_set_vnet_hdr_len, }; static int tap_win32_init(NetClientState *peer, const char *model, diff --git a/net/tap.c b/net/tap.c index c23d0323c2a..baaa2f7a9ac 100644 --- a/net/tap.c +++ b/net/tap.c @@ -219,7 +219,7 @@ static void tap_send(void *opaque) /* * When the host keeps receiving more packets while tap_send() is - * running we can hog the QEMU global mutex. Limit the number of + * running we can hog the BQL. Limit the number of * packets that are processed per tap_send() callback to prevent * stalling the guest. */ @@ -743,11 +743,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, if (vhostfdname) { vhostfd = monitor_fd_param(monitor_cur(), vhostfdname, &err); if (vhostfd == -1) { - if (tap->has_vhostforce && tap->vhostforce) { - error_propagate(errp, err); - } else { - warn_report_err(err); - } + error_propagate(errp, err); goto failed; } if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { @@ -758,13 +754,8 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, } else { vhostfd = open("/dev/vhost-net", O_RDWR); if (vhostfd < 0) { - if (tap->has_vhostforce && tap->vhostforce) { - error_setg_errno(errp, errno, - "tap: open vhost char device failed"); - } else { - warn_report("tap: open vhost char device failed: %s", - strerror(errno)); - } + error_setg_errno(errp, errno, + "tap: open vhost char device failed"); goto failed; } if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { @@ -777,11 +768,8 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, s->vhost_net = vhost_net_init(&options); if (!s->vhost_net) { - if (tap->has_vhostforce && tap->vhostforce) { - error_setg(errp, VHOST_NET_INIT_FAILED); - } else { - warn_report(VHOST_NET_INIT_FAILED); - } + error_setg(errp, + "vhost-net requested but could not be initialized"); goto failed; } } else if (vhostfdname) { diff --git a/net/trace-events b/net/trace-events index 823a071bdce..cda960f42bc 100644 --- a/net/trace-events +++ b/net/trace-events @@ -23,3 +23,9 @@ colo_compare_tcp_info(const char *pkt, uint32_t seq, uint32_t ack, int hdlen, in # filter-rewriter.c colo_filter_rewriter_pkt_info(const char *func, const char *src, const char *dst, uint32_t seq, uint32_t ack, uint32_t flag) "%s: src/dst: %s/%s p: seq/ack=%u/%u flags=0x%x" colo_filter_rewriter_conn_offset(uint32_t offset) ": offset=%u" + +# vhost-vdpa.c +vhost_vdpa_set_address_space_id(void *v, unsigned vq_group, unsigned asid_num) "vhost_vdpa: %p vq_group: %u asid: %u" +vhost_vdpa_net_load_cmd(void *s, uint8_t class, uint8_t cmd, int data_num, int data_size) "vdpa state: %p class: %u cmd: %u sg_num: %d size: %d" +vhost_vdpa_net_load_cmd_retval(void *s, uint8_t class, uint8_t cmd, int r) "vdpa state: %p class: %u cmd: %u retval: %d" +vhost_vdpa_net_load_mq(void *s, int ncurqps) "vdpa state: %p current_qpairs: %d" diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index d0614d79549..85e73dd6a73 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -26,15 +26,15 @@ #include #include "standard-headers/linux/virtio_net.h" #include "monitor/monitor.h" -#include "migration/migration.h" #include "migration/misc.h" #include "hw/virtio/vhost.h" +#include "trace.h" /* Todo:need to add the multiqueue support here */ typedef struct VhostVDPAState { NetClientState nc; struct vhost_vdpa vhost_vdpa; - Notifier migration_state; + NotifierWithReturn migration_state; VHostNetState *vhost_net; /* Control commands shadow buffers */ @@ -236,10 +236,11 @@ static void vhost_vdpa_cleanup(NetClientState *nc) g_free(s->vhost_net); s->vhost_net = NULL; } - if (s->vhost_vdpa.device_fd >= 0) { - qemu_close(s->vhost_vdpa.device_fd); - s->vhost_vdpa.device_fd = -1; + if (s->vhost_vdpa.index != 0) { + return; } + qemu_close(s->vhost_vdpa.shared->device_fd); + g_free(s->vhost_vdpa.shared); } /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */ @@ -286,13 +287,19 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, return size; } -/** From any vdpa net client, get the netclient of the first queue pair */ -static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) + +/** From any vdpa net client, get the netclient of the i-th queue pair */ +static VhostVDPAState *vhost_vdpa_net_get_nc_vdpa(VhostVDPAState *s, int i) { NICState *nic = qemu_get_nic(s->nc.peer); - NetClientState *nc0 = qemu_get_peer(nic->ncs, 0); + NetClientState *nc_i = qemu_get_peer(nic->ncs, i); + + return DO_UPCAST(VhostVDPAState, nc, nc_i); +} - return DO_UPCAST(VhostVDPAState, nc, nc0); +static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) +{ + return vhost_vdpa_net_get_nc_vdpa(s, 0); } static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) @@ -316,6 +323,8 @@ static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ? n->max_ncs - n->max_queue_pairs : 0; + v->shared->svq_switching = enable ? + SVQ_TSTATE_ENABLING : SVQ_TSTATE_DISABLING; /* * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter * in the future and resume the device if read-only operations between @@ -328,19 +337,20 @@ static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) if (unlikely(r < 0)) { error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); } + v->shared->svq_switching = SVQ_TSTATE_DONE; } -static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data) +static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, Error **errp) { - MigrationState *migration = data; - VhostVDPAState *s = container_of(notifier, VhostVDPAState, - migration_state); + VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state); - if (migration_in_setup(migration)) { + if (e->type == MIG_EVENT_PRECOPY_SETUP) { vhost_vdpa_net_log_global_enable(s, true); - } else if (migration_has_failed(migration)) { + } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { vhost_vdpa_net_log_global_enable(s, false); } + return 0; } static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) @@ -350,8 +360,8 @@ static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) migration_add_notifier(&s->migration_state, vdpa_net_migration_state_notifier); if (v->shadow_vqs_enabled) { - v->iova_tree = vhost_iova_tree_new(v->iova_range.first, - v->iova_range.last); + v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, + v->shared->iova_range.last); } } @@ -363,24 +373,18 @@ static int vhost_vdpa_net_data_start(NetClientState *nc) assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); if (s->always_svq || - migration_is_setup_or_active(migrate_get_current()->state)) { + migration_is_setup_or_active()) { v->shadow_vqs_enabled = true; - v->shadow_data = true; } else { v->shadow_vqs_enabled = false; - v->shadow_data = false; } if (v->index == 0) { + v->shared->shadow_data = v->shadow_vqs_enabled; vhost_vdpa_net_data_start_first(s); return 0; } - if (v->shadow_vqs_enabled) { - VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s); - v->iova_tree = s0->vhost_vdpa.iova_tree; - } - return 0; } @@ -413,9 +417,8 @@ static void vhost_vdpa_net_client_stop(NetClientState *nc) dev = s->vhost_vdpa.dev; if (dev->vq_index + dev->nvqs == dev->vq_index_end) { - g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); - } else { - s->vhost_vdpa.iova_tree = NULL; + g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, + vhost_iova_tree_delete); } } @@ -460,7 +463,9 @@ static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, }; int r; - r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); + trace_vhost_vdpa_set_address_space_id(v, vq_group, asid_num); + + r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); if (unlikely(r < 0)) { error_report("Can't set vq group %u asid %u, errno=%d (%s)", asid.index, asid.num, errno, g_strerror(errno)); @@ -470,7 +475,7 @@ static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) { - VhostIOVATree *tree = v->iova_tree; + VhostIOVATree *tree = v->shared->iova_tree; DMAMap needle = { /* * No need to specify size or to look for more translations since @@ -486,7 +491,8 @@ static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) return; } - r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1); + r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, + map->size + 1); if (unlikely(r != 0)) { error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); } @@ -504,13 +510,13 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, map.translated_addr = (hwaddr)(uintptr_t)buf; map.size = size - 1; map.perm = write ? IOMMU_RW : IOMMU_RO, - r = vhost_iova_tree_map_alloc(v->iova_tree, &map); + r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map); if (unlikely(r != IOVA_OK)) { error_report("Cannot map injected element"); return r; } - r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova, + r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); if (unlikely(r < 0)) { goto dma_map_err; @@ -519,7 +525,7 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, return 0; dma_map_err: - vhost_iova_tree_remove(v->iova_tree, map); + vhost_iova_tree_remove(v->shared->iova_tree, map); return r; } @@ -537,11 +543,10 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc) v = &s->vhost_vdpa; s0 = vhost_vdpa_net_first_nc_vdpa(s); - v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled; v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled; s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; - if (s->vhost_vdpa.shadow_data) { + if (v->shared->shadow_data) { /* SVQ is already configured for all virtqueues */ goto out; } @@ -558,7 +563,7 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc) return 0; } - cvq_group = vhost_vdpa_get_vring_group(v->device_fd, + cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd, v->dev->vq_index_end - 1, &err); if (unlikely(cvq_group < 0)) { @@ -579,24 +584,22 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc) return 0; } - if (s0->vhost_vdpa.iova_tree) { - /* - * SVQ is already configured for all virtqueues. Reuse IOVA tree for - * simplicity, whether CVQ shares ASID with guest or not, because: - * - Memory listener need access to guest's memory addresses allocated - * in the IOVA tree. - * - There should be plenty of IOVA address space for both ASID not to - * worry about collisions between them. Guest's translations are - * still validated with virtio virtqueue_pop so there is no risk for - * the guest to access memory that it shouldn't. - * - * To allocate a iova tree per ASID is doable but it complicates the - * code and it is not worth it for the moment. - */ - v->iova_tree = s0->vhost_vdpa.iova_tree; - } else { - v->iova_tree = vhost_iova_tree_new(v->iova_range.first, - v->iova_range.last); + /* + * If other vhost_vdpa already have an iova_tree, reuse it for simplicity, + * whether CVQ shares ASID with guest or not, because: + * - Memory listener need access to guest's memory addresses allocated in + * the IOVA tree. + * - There should be plenty of IOVA address space for both ASID not to + * worry about collisions between them. Guest's translations are still + * validated with virtio virtqueue_pop so there is no risk for the guest + * to access memory that it shouldn't. + * + * To allocate a iova tree per ASID is doable but it complicates the code + * and it is not worth it for the moment. + */ + if (!v->shared->iova_tree) { + v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, + v->shared->iova_range.last); } r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, @@ -714,6 +717,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); cmd_size = sizeof(ctrl) + data_size; + trace_vhost_vdpa_net_load_cmd(s, class, cmd, data_num, data_size); if (vhost_svq_available_slots(svq) < 2 || iov_size(out_cursor, 1) < cmd_size) { /* @@ -745,6 +749,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); if (unlikely(r < 0)) { + trace_vhost_vdpa_net_load_cmd_retval(s, class, cmd, r); return r; } @@ -936,6 +941,8 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, return 0; } + trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs); + mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); const struct iovec data = { .iov_base = &mq, @@ -1575,14 +1582,13 @@ static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, int cvq_index, Error **errp) { + ERRP_GUARD(); uint64_t backend_features; int64_t cvq_group; uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER; int r; - ERRP_GUARD(); - r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); if (unlikely(r < 0)) { error_setg_errno(errp, errno, "Cannot get vdpa backend_features"); @@ -1661,6 +1667,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, bool svq, struct vhost_vdpa_iova_range iova_range, uint64_t features, + VhostVDPAShared *shared, Error **errp) { NetClientState *nc = NULL; @@ -1686,16 +1693,17 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, qemu_set_info_str(nc, TYPE_VHOST_VDPA); s = DO_UPCAST(VhostVDPAState, nc, nc); - s->vhost_vdpa.device_fd = vdpa_device_fd; s->vhost_vdpa.index = queue_pair_index; s->always_svq = svq; s->migration_state.notify = NULL; s->vhost_vdpa.shadow_vqs_enabled = svq; - s->vhost_vdpa.iova_range = iova_range; - s->vhost_vdpa.shadow_data = svq; if (queue_pair_index == 0) { vhost_vdpa_net_valid_svq_features(features, &s->vhost_vdpa.migration_blocker); + s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1); + s->vhost_vdpa.shared->device_fd = vdpa_device_fd; + s->vhost_vdpa.shared->iova_range = iova_range; + s->vhost_vdpa.shared->shadow_data = svq; } else if (!is_datapath) { s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), PROT_READ | PROT_WRITE, @@ -1708,11 +1716,16 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.shadow_vq_ops_opaque = s; s->cvq_isolated = cvq_isolated; } + if (queue_pair_index != 0) { + s->vhost_vdpa.shared = shared; + } + ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); if (ret) { qemu_del_net_client(nc); return NULL; } + return nc; } @@ -1762,6 +1775,7 @@ static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, int net_init_vhost_vdpa(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { + ERRP_GUARD(); const NetdevVhostVDPAOptions *opts; uint64_t features; int vdpa_device_fd; @@ -1824,17 +1838,26 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, ncs = g_malloc0(sizeof(*ncs) * queue_pairs); for (i = 0; i < queue_pairs; i++) { + VhostVDPAShared *shared = NULL; + + if (i) { + shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared; + } ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd, i, 2, true, opts->x_svq, - iova_range, features, errp); + iova_range, features, shared, errp); if (!ncs[i]) goto err; } if (has_cvq) { + VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]); + VhostVDPAShared *shared = s0->vhost_vdpa.shared; + nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd, i, 1, false, - opts->x_svq, iova_range, features, errp); + opts->x_svq, iova_range, features, shared, + errp); if (!nc) goto err; } diff --git a/os-posix.c b/os-posix.c index 52ef6990ff9..a4284e2c07b 100644 --- a/os-posix.c +++ b/os-posix.c @@ -24,6 +24,7 @@ */ #include "qemu/osdep.h" +#include #include #include #include @@ -256,6 +257,27 @@ void os_daemonize(void) } } +void os_setup_limits(void) +{ + struct rlimit nofile; + + if (getrlimit(RLIMIT_NOFILE, &nofile) < 0) { + warn_report("unable to query NOFILE limit: %s", strerror(errno)); + return; + } + + if (nofile.rlim_cur == nofile.rlim_max) { + return; + } + + nofile.rlim_cur = nofile.rlim_max; + + if (setrlimit(RLIMIT_NOFILE, &nofile) < 0) { + warn_report("unable to set NOFILE limit: %s", strerror(errno)); + return; + } +} + void os_setup_post(void) { int fd = 0; diff --git a/pc-bios/README b/pc-bios/README index 4189bb28cce..7ffb2f43a46 100644 --- a/pc-bios/README +++ b/pc-bios/README @@ -67,7 +67,7 @@ and enable the use of well-known bootloaders such as U-Boot. OpenSBI is distributed under the terms of the BSD 2-clause license ("Simplified BSD License" or "FreeBSD License", SPDX: BSD-2-Clause). OpenSBI - source code also contains code reused from other projects desribed here: + source code also contains code reused from other projects described here: https://github.com/riscv/opensbi/blob/master/ThirdPartyNotices.md. - npcm7xx_bootrom.bin is a simplified, free (Apache 2.0) boot ROM for Nuvoton @@ -75,3 +75,9 @@ initialize and run boot images stored in SPI flash, but may grow more features over time as needed. The source code is available at: https://github.com/google/vbootrom + +- hppa-firmware.img (32-bit) and hppa-firmware64.img (64-bit) are firmware + files for the HP-PARISC (hppa) architecture. + They are built form the SeaBIOS-hppa sources, which is a fork of SeaBIOS + adapted for hppa. + SeaBIOS-hppa is available at https://github.com/hdeller/seabios-hppa diff --git a/pc-bios/edk2-aarch64-code.fd.bz2 b/pc-bios/edk2-aarch64-code.fd.bz2 index 5d0a57173d2..e763982db4d 100644 Binary files a/pc-bios/edk2-aarch64-code.fd.bz2 and b/pc-bios/edk2-aarch64-code.fd.bz2 differ diff --git a/pc-bios/edk2-arm-code.fd.bz2 b/pc-bios/edk2-arm-code.fd.bz2 index af49559f34b..329646dafa9 100644 Binary files a/pc-bios/edk2-arm-code.fd.bz2 and b/pc-bios/edk2-arm-code.fd.bz2 differ diff --git a/pc-bios/edk2-i386-code.fd.bz2 b/pc-bios/edk2-i386-code.fd.bz2 index ecd0c6b1ae8..271ce659b64 100644 Binary files a/pc-bios/edk2-i386-code.fd.bz2 and b/pc-bios/edk2-i386-code.fd.bz2 differ diff --git a/pc-bios/edk2-i386-secure-code.fd.bz2 b/pc-bios/edk2-i386-secure-code.fd.bz2 index 983e1778c45..00335cde4b1 100644 Binary files a/pc-bios/edk2-i386-secure-code.fd.bz2 and b/pc-bios/edk2-i386-secure-code.fd.bz2 differ diff --git a/pc-bios/edk2-riscv-code.fd.bz2 b/pc-bios/edk2-riscv-code.fd.bz2 index b6cd3c6f742..f3a98d6ed82 100644 Binary files a/pc-bios/edk2-riscv-code.fd.bz2 and b/pc-bios/edk2-riscv-code.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-code.fd.bz2 b/pc-bios/edk2-x86_64-code.fd.bz2 index 8707f695ef7..a1a8c05a1b6 100644 Binary files a/pc-bios/edk2-x86_64-code.fd.bz2 and b/pc-bios/edk2-x86_64-code.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-microvm.fd.bz2 b/pc-bios/edk2-x86_64-microvm.fd.bz2 index 334da49917d..6b7cd544a43 100644 Binary files a/pc-bios/edk2-x86_64-microvm.fd.bz2 and b/pc-bios/edk2-x86_64-microvm.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-secure-code.fd.bz2 b/pc-bios/edk2-x86_64-secure-code.fd.bz2 index abeb60bcf32..ef40a8bfd65 100644 Binary files a/pc-bios/edk2-x86_64-secure-code.fd.bz2 and b/pc-bios/edk2-x86_64-secure-code.fd.bz2 differ diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img old mode 100644 new mode 100755 index 1b3a8418250..e065e48ded2 Binary files a/pc-bios/hppa-firmware.img and b/pc-bios/hppa-firmware.img differ diff --git a/pc-bios/hppa-firmware64.img b/pc-bios/hppa-firmware64.img new file mode 100755 index 00000000000..7f6d837f0da Binary files /dev/null and b/pc-bios/hppa-firmware64.img differ diff --git a/pc-bios/meson.build b/pc-bios/meson.build index e67fa433a1b..0760612beac 100644 --- a/pc-bios/meson.build +++ b/pc-bios/meson.build @@ -73,6 +73,7 @@ blobs = [ 'qemu_vga.ndrv', 'edk2-licenses.txt', 'hppa-firmware.img', + 'hppa-firmware64.img', 'opensbi-riscv32-generic-fw_dynamic.bin', 'opensbi-riscv64-generic-fw_dynamic.bin', 'npcm7xx_bootrom.bin', diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin index 9a2ba3f2a4d..60ca1165c82 100644 Binary files a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin differ diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin index 5d4e812819e..bae158d4577 100644 Binary files a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin differ diff --git a/pc-bios/skiboot.lid b/pc-bios/skiboot.lid index 58ec5ec38ed..906bd512717 100644 Binary files a/pc-bios/skiboot.lid and b/pc-bios/skiboot.lid differ diff --git a/plugins/api.c b/plugins/api.c index 5521b0ad36c..8fa5a600ac3 100644 --- a/plugins/api.c +++ b/plugins/api.c @@ -8,6 +8,7 @@ * * qemu_plugin_tb * qemu_plugin_insn + * qemu_plugin_register * * Which can then be passed back into the API to do additional things. * As such all the public functions in here are exported in @@ -35,10 +36,12 @@ */ #include "qemu/osdep.h" +#include "qemu/main-loop.h" #include "qemu/plugin.h" #include "qemu/log.h" #include "tcg/tcg.h" #include "exec/exec-all.h" +#include "exec/gdbstub.h" #include "exec/ram_addr.h" #include "disas/disas.h" #include "plugin.h" @@ -89,17 +92,24 @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb, void *udata) { if (!tb->mem_only) { - plugin_register_dyn_cb__udata(&tb->cbs[PLUGIN_CB_REGULAR], + int index = flags == QEMU_PLUGIN_CB_R_REGS || + flags == QEMU_PLUGIN_CB_RW_REGS ? + PLUGIN_CB_REGULAR_R : PLUGIN_CB_REGULAR; + + plugin_register_dyn_cb__udata(&tb->cbs[index], cb, flags, udata); } } -void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb, - enum qemu_plugin_op op, - void *ptr, uint64_t imm) +void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + struct qemu_plugin_tb *tb, + enum qemu_plugin_op op, + qemu_plugin_u64 entry, + uint64_t imm) { if (!tb->mem_only) { - plugin_register_inline_op(&tb->cbs[PLUGIN_CB_INLINE], 0, op, ptr, imm); + plugin_register_inline_op_on_entry( + &tb->cbs[PLUGIN_CB_INLINE], 0, op, entry, imm); } } @@ -109,18 +119,24 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn, void *udata) { if (!insn->mem_only) { - plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], + int index = flags == QEMU_PLUGIN_CB_R_REGS || + flags == QEMU_PLUGIN_CB_RW_REGS ? + PLUGIN_CB_REGULAR_R : PLUGIN_CB_REGULAR; + + plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][index], cb, flags, udata); } } -void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn, - enum qemu_plugin_op op, - void *ptr, uint64_t imm) +void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + struct qemu_plugin_insn *insn, + enum qemu_plugin_op op, + qemu_plugin_u64 entry, + uint64_t imm) { if (!insn->mem_only) { - plugin_register_inline_op(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], - 0, op, ptr, imm); + plugin_register_inline_op_on_entry( + &insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], 0, op, entry, imm); } } @@ -136,16 +152,18 @@ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn, void *udata) { plugin_register_vcpu_mem_cb(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], - cb, flags, rw, udata); + cb, flags, rw, udata); } -void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, - enum qemu_plugin_mem_rw rw, - enum qemu_plugin_op op, void *ptr, - uint64_t imm) +void qemu_plugin_register_vcpu_mem_inline_per_vcpu( + struct qemu_plugin_insn *insn, + enum qemu_plugin_mem_rw rw, + enum qemu_plugin_op op, + qemu_plugin_u64 entry, + uint64_t imm) { - plugin_register_inline_op(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE], - rw, op, ptr, imm); + plugin_register_inline_op_on_entry( + &insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE], rw, op, entry, imm); } void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id, @@ -342,34 +360,9 @@ const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h) #endif } -/* - * Queries to the number and potential maximum number of vCPUs there - * will be. This helps the plugin dimension per-vcpu arrays. - */ - -#ifndef CONFIG_USER_ONLY -static MachineState * get_ms(void) +int qemu_plugin_num_vcpus(void) { - return MACHINE(qdev_get_machine()); -} -#endif - -int qemu_plugin_n_vcpus(void) -{ -#ifdef CONFIG_USER_ONLY - return -1; -#else - return get_ms()->smp.cpus; -#endif -} - -int qemu_plugin_n_max_vcpus(void) -{ -#ifdef CONFIG_USER_ONLY - return -1; -#else - return get_ms()->smp.max_cpus; -#endif + return plugin_num_vcpus(); } /* @@ -392,7 +385,7 @@ const char *qemu_plugin_path_to_binary(void) { char *path = NULL; #ifdef CONFIG_USER_ONLY - TaskState *ts = (TaskState *) current_cpu->opaque; + TaskState *ts = get_task_state(current_cpu); path = g_strdup(ts->bprm->filename); #endif return path; @@ -402,7 +395,7 @@ uint64_t qemu_plugin_start_code(void) { uint64_t start = 0; #ifdef CONFIG_USER_ONLY - TaskState *ts = (TaskState *) current_cpu->opaque; + TaskState *ts = get_task_state(current_cpu); start = ts->info->start_code; #endif return start; @@ -412,7 +405,7 @@ uint64_t qemu_plugin_end_code(void) { uint64_t end = 0; #ifdef CONFIG_USER_ONLY - TaskState *ts = (TaskState *) current_cpu->opaque; + TaskState *ts = get_task_state(current_cpu); end = ts->info->end_code; #endif return end; @@ -422,8 +415,113 @@ uint64_t qemu_plugin_entry_code(void) { uint64_t entry = 0; #ifdef CONFIG_USER_ONLY - TaskState *ts = (TaskState *) current_cpu->opaque; + TaskState *ts = get_task_state(current_cpu); entry = ts->info->entry; #endif return entry; } + +/* + * Create register handles. + * + * We need to create a handle for each register so the plugin + * infrastructure can call gdbstub to read a register. They are + * currently just a pointer encapsulation of the gdb_reg but in + * future may hold internal plugin state so its important plugin + * authors are not tempted to treat them as numbers. + * + * We also construct a result array with those handles and some + * ancillary data the plugin might find useful. + */ + +static GArray *create_register_handles(GArray *gdbstub_regs) +{ + GArray *find_data = g_array_new(true, true, + sizeof(qemu_plugin_reg_descriptor)); + + for (int i = 0; i < gdbstub_regs->len; i++) { + GDBRegDesc *grd = &g_array_index(gdbstub_regs, GDBRegDesc, i); + qemu_plugin_reg_descriptor desc; + + /* skip "un-named" regs */ + if (!grd->name) { + continue; + } + + /* Create a record for the plugin */ + desc.handle = GINT_TO_POINTER(grd->gdb_reg); + desc.name = g_intern_string(grd->name); + desc.feature = g_intern_string(grd->feature_name); + g_array_append_val(find_data, desc); + } + + return find_data; +} + +GArray *qemu_plugin_get_registers(void) +{ + g_assert(current_cpu); + + g_autoptr(GArray) regs = gdb_get_register_list(current_cpu); + return create_register_handles(regs); +} + +int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf) +{ + g_assert(current_cpu); + + return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg)); +} + +struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size) +{ + return plugin_scoreboard_new(element_size); +} + +void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score) +{ + plugin_scoreboard_free(score); +} + +void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score, + unsigned int vcpu_index) +{ + g_assert(vcpu_index < qemu_plugin_num_vcpus()); + /* we can't use g_array_index since entry size is not statically known */ + char *base_ptr = score->data->data; + return base_ptr + vcpu_index * g_array_get_element_size(score->data); +} + +static uint64_t *plugin_u64_address(qemu_plugin_u64 entry, + unsigned int vcpu_index) +{ + char *ptr = qemu_plugin_scoreboard_find(entry.score, vcpu_index); + return (uint64_t *)(ptr + entry.offset); +} + +void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index, + uint64_t added) +{ + *plugin_u64_address(entry, vcpu_index) += added; +} + +uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry, + unsigned int vcpu_index) +{ + return *plugin_u64_address(entry, vcpu_index); +} + +void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index, + uint64_t val) +{ + *plugin_u64_address(entry, vcpu_index) = val; +} + +uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry) +{ + uint64_t total = 0; + for (int i = 0, n = qemu_plugin_num_vcpus(); i < n; ++i) { + total += qemu_plugin_u64_get(entry, i); + } + return total; +} diff --git a/plugins/core.c b/plugins/core.c index 49588285dd0..11ca20e6267 100644 --- a/plugins/core.c +++ b/plugins/core.c @@ -17,6 +17,8 @@ #include "qapi/error.h" #include "qemu/lockable.h" #include "qemu/option.h" +#include "qemu/plugin.h" +#include "qemu/queue.h" #include "qemu/rcu_queue.h" #include "qemu/xxhash.h" #include "qemu/rcu.h" @@ -27,7 +29,6 @@ #include "tcg/tcg.h" #include "tcg/tcg-op.h" #include "plugin.h" -#include "qemu/compiler.h" struct qemu_plugin_cb { struct qemu_plugin_ctx *ctx; @@ -54,7 +55,8 @@ struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) { - bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX); + bitmap_copy(cpu->plugin_state->event_mask, + &data.host_ulong, QEMU_PLUGIN_EV_MAX); tcg_flush_jmp_cache(cpu); } @@ -209,15 +211,51 @@ plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, do_plugin_register_cb(id, ev, func, udata); } +CPUPluginState *qemu_plugin_create_vcpu_state(void) +{ + return g_new0(CPUPluginState, 1); +} + +static void plugin_grow_scoreboards__locked(CPUState *cpu) +{ + if (cpu->cpu_index < plugin.scoreboard_alloc_size) { + return; + } + + bool need_realloc = FALSE; + while (cpu->cpu_index >= plugin.scoreboard_alloc_size) { + plugin.scoreboard_alloc_size *= 2; + need_realloc = TRUE; + } + + + if (!need_realloc || QLIST_EMPTY(&plugin.scoreboards)) { + /* nothing to do, we just updated sizes for future scoreboards */ + return; + } + + /* cpus must be stopped, as tb might still use an existing scoreboard. */ + start_exclusive(); + struct qemu_plugin_scoreboard *score; + QLIST_FOREACH(score, &plugin.scoreboards, entry) { + g_array_set_size(score->data, plugin.scoreboard_alloc_size); + } + /* force all tb to be flushed, as scoreboard pointers were changed. */ + tb_flush(cpu); + end_exclusive(); +} + void qemu_plugin_vcpu_init_hook(CPUState *cpu) { bool success; qemu_rec_mutex_lock(&plugin.lock); + plugin.num_vcpus = MAX(plugin.num_vcpus, cpu->cpu_index + 1); plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL); success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, &cpu->cpu_index); g_assert(success); + plugin_grow_scoreboards__locked(cpu); qemu_rec_mutex_unlock(&plugin.lock); plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); @@ -278,17 +316,19 @@ static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr) return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); } -void plugin_register_inline_op(GArray **arr, - enum qemu_plugin_mem_rw rw, - enum qemu_plugin_op op, void *ptr, - uint64_t imm) +void plugin_register_inline_op_on_entry(GArray **arr, + enum qemu_plugin_mem_rw rw, + enum qemu_plugin_op op, + qemu_plugin_u64 entry, + uint64_t imm) { struct qemu_plugin_dyn_cb *dyn_cb; dyn_cb = plugin_get_dyn_cb(arr); - dyn_cb->userp = ptr; + dyn_cb->userp = NULL; dyn_cb->type = PLUGIN_CB_INLINE; dyn_cb->rw = rw; + dyn_cb->inline_insn.entry = entry; dyn_cb->inline_insn.op = op; dyn_cb->inline_insn.imm = imm; } @@ -356,7 +396,7 @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, struct qemu_plugin_cb *cb, *next; enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL; - if (!test_bit(ev, cpu->plugin_mask)) { + if (!test_bit(ev, cpu->plugin_state->event_mask)) { return; } @@ -378,7 +418,7 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) struct qemu_plugin_cb *cb, *next; enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET; - if (!test_bit(ev, cpu->plugin_mask)) { + if (!test_bit(ev, cpu->plugin_state->event_mask)) { return; } @@ -391,12 +431,17 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) void qemu_plugin_vcpu_idle_cb(CPUState *cpu) { - plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); + /* idle and resume cb may be called before init, ignore in this case */ + if (cpu->cpu_index < plugin.num_vcpus) { + plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); + } } void qemu_plugin_vcpu_resume_cb(CPUState *cpu) { - plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); + if (cpu->cpu_index < plugin.num_vcpus) { + plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); + } } void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, @@ -431,9 +476,13 @@ void qemu_plugin_flush_cb(void) plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); } -void exec_inline_op(struct qemu_plugin_dyn_cb *cb) +void exec_inline_op(struct qemu_plugin_dyn_cb *cb, int cpu_index) { - uint64_t *val = cb->userp; + char *ptr = cb->inline_insn.entry.score->data->data; + size_t elem_size = g_array_get_element_size( + cb->inline_insn.entry.score->data); + size_t offset = cb->inline_insn.entry.offset; + uint64_t *val = (uint64_t *)(ptr + offset + cpu_index * elem_size); switch (cb->inline_insn.op) { case QEMU_PLUGIN_INLINE_ADD_U64: @@ -466,7 +515,7 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, vaddr, cb->userp); break; case PLUGIN_CB_INLINE: - exec_inline_op(cb); + exec_inline_op(cb, cpu->cpu_index); break; default: g_assert_not_reached(); @@ -566,8 +615,39 @@ static void __attribute__((__constructor__)) plugin_init(void) qemu_rec_mutex_init(&plugin.lock); plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); + QLIST_INIT(&plugin.scoreboards); + plugin.scoreboard_alloc_size = 16; /* avoid frequent reallocation */ QTAILQ_INIT(&plugin.ctxs); qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, QHT_MODE_AUTO_RESIZE); atexit(qemu_plugin_atexit_cb); } + +int plugin_num_vcpus(void) +{ + return plugin.num_vcpus; +} + +struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size) +{ + struct qemu_plugin_scoreboard *score = + g_malloc0(sizeof(struct qemu_plugin_scoreboard)); + score->data = g_array_new(FALSE, TRUE, element_size); + g_array_set_size(score->data, plugin.scoreboard_alloc_size); + + qemu_rec_mutex_lock(&plugin.lock); + QLIST_INSERT_HEAD(&plugin.scoreboards, score, entry); + qemu_rec_mutex_unlock(&plugin.lock); + + return score; +} + +void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score) +{ + qemu_rec_mutex_lock(&plugin.lock); + QLIST_REMOVE(score, entry); + qemu_rec_mutex_unlock(&plugin.lock); + + g_array_free(score->data, TRUE); + g_free(score); +} diff --git a/plugins/loader.c b/plugins/loader.c index 734c11cae04..513a429c57d 100644 --- a/plugins/loader.c +++ b/plugins/loader.c @@ -33,7 +33,6 @@ #ifndef CONFIG_USER_ONLY #include "hw/boards.h" #endif -#include "qemu/compiler.h" #include "plugin.h" @@ -391,7 +390,7 @@ void plugin_reset_uninstall(qemu_plugin_id_t id, bool reset) { struct qemu_plugin_reset_data *data; - struct qemu_plugin_ctx *ctx; + struct qemu_plugin_ctx *ctx = NULL; WITH_QEMU_LOCK_GUARD(&plugin.lock) { ctx = plugin_id_to_ctx_locked(id); diff --git a/plugins/meson.build b/plugins/meson.build index 6b2d7a92926..51b4350c2a0 100644 --- a/plugins/meson.build +++ b/plugins/meson.build @@ -1,7 +1,7 @@ plugin_ldflags = [] # Modules need more symbols than just those in plugins/qemu-plugins.symbols if not enable_modules - if targetos == 'darwin' + if host_os == 'darwin' configure_file( input: files('qemu-plugins.symbols'), output: 'qemu-plugins-ld64.symbols', @@ -14,7 +14,7 @@ if not enable_modules endif if get_option('plugins') - if targetos == 'windows' + if host_os == 'windows' dlltool = find_program('dlltool', required: true) # Generate a .lib file for plugins to link against. diff --git a/plugins/plugin.h b/plugins/plugin.h index 5eb2fdbc85e..7c34f23cfcb 100644 --- a/plugins/plugin.h +++ b/plugins/plugin.h @@ -15,7 +15,7 @@ #include #include "qemu/qht.h" -#define QEMU_PLUGIN_MIN_VERSION 0 +#define QEMU_PLUGIN_MIN_VERSION 2 /* global state */ struct qemu_plugin_state { @@ -31,6 +31,8 @@ struct qemu_plugin_state { * but with the HT we avoid adding a field to CPUState. */ GHashTable *cpu_ht; + QLIST_HEAD(, qemu_plugin_scoreboard) scoreboards; + size_t scoreboard_alloc_size; DECLARE_BITMAP(mask, QEMU_PLUGIN_EV_MAX); /* * @lock protects the struct as well as ctx->uninstalling. @@ -44,6 +46,8 @@ struct qemu_plugin_state { * the code cache is flushed. */ struct qht dyn_cb_arr_ht; + /* How many vcpus were started */ + int num_vcpus; }; @@ -64,10 +68,11 @@ struct qemu_plugin_ctx { struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id); -void plugin_register_inline_op(GArray **arr, - enum qemu_plugin_mem_rw rw, - enum qemu_plugin_op op, void *ptr, - uint64_t imm); +void plugin_register_inline_op_on_entry(GArray **arr, + enum qemu_plugin_mem_rw rw, + enum qemu_plugin_op op, + qemu_plugin_u64 entry, + uint64_t imm); void plugin_reset_uninstall(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb, @@ -95,6 +100,12 @@ void plugin_register_vcpu_mem_cb(GArray **arr, enum qemu_plugin_mem_rw rw, void *udata); -void exec_inline_op(struct qemu_plugin_dyn_cb *cb); +void exec_inline_op(struct qemu_plugin_dyn_cb *cb, int cpu_index); + +int plugin_num_vcpus(void); + +struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size); + +void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score); #endif /* PLUGIN_H */ diff --git a/plugins/qemu-plugins.symbols b/plugins/qemu-plugins.symbols index 71f6c90549d..a9fac056c7f 100644 --- a/plugins/qemu-plugins.symbols +++ b/plugins/qemu-plugins.symbols @@ -3,6 +3,7 @@ qemu_plugin_end_code; qemu_plugin_entry_code; qemu_plugin_get_hwaddr; + qemu_plugin_get_registers; qemu_plugin_hwaddr_device_name; qemu_plugin_hwaddr_is_io; qemu_plugin_hwaddr_phys_addr; @@ -16,30 +17,37 @@ qemu_plugin_mem_is_sign_extended; qemu_plugin_mem_is_store; qemu_plugin_mem_size_shift; - qemu_plugin_n_max_vcpus; - qemu_plugin_n_vcpus; + qemu_plugin_num_vcpus; qemu_plugin_outs; qemu_plugin_path_to_binary; + qemu_plugin_read_register; qemu_plugin_register_atexit_cb; qemu_plugin_register_flush_cb; qemu_plugin_register_vcpu_exit_cb; qemu_plugin_register_vcpu_idle_cb; qemu_plugin_register_vcpu_init_cb; qemu_plugin_register_vcpu_insn_exec_cb; - qemu_plugin_register_vcpu_insn_exec_inline; + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu; qemu_plugin_register_vcpu_mem_cb; - qemu_plugin_register_vcpu_mem_inline; + qemu_plugin_register_vcpu_mem_inline_per_vcpu; qemu_plugin_register_vcpu_resume_cb; qemu_plugin_register_vcpu_syscall_cb; qemu_plugin_register_vcpu_syscall_ret_cb; qemu_plugin_register_vcpu_tb_exec_cb; - qemu_plugin_register_vcpu_tb_exec_inline; + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu; qemu_plugin_register_vcpu_tb_trans_cb; qemu_plugin_reset; + qemu_plugin_scoreboard_free; + qemu_plugin_scoreboard_find; + qemu_plugin_scoreboard_new; qemu_plugin_start_code; qemu_plugin_tb_get_insn; qemu_plugin_tb_n_insns; qemu_plugin_tb_vaddr; + qemu_plugin_u64_add; + qemu_plugin_u64_get; + qemu_plugin_u64_set; + qemu_plugin_u64_sum; qemu_plugin_uninstall; qemu_plugin_vcpu_for_each; }; diff --git a/qapi/acpi.json b/qapi/acpi.json index e0739bd6ae9..aa4dbe57943 100644 --- a/qapi/acpi.json +++ b/qapi/acpi.json @@ -113,12 +113,12 @@ # # Example: # -# -> { "execute": "query-acpi-ospm-status" } -# <- { "return": [ { "device": "d1", "slot": "0", "slot-type": "DIMM", "source": 1, "status": 0}, -# { "slot": "1", "slot-type": "DIMM", "source": 0, "status": 0}, -# { "slot": "2", "slot-type": "DIMM", "source": 0, "status": 0}, -# { "slot": "3", "slot-type": "DIMM", "source": 0, "status": 0} -# ]} +# -> { "execute": "query-acpi-ospm-status" } +# <- { "return": [ { "device": "d1", "slot": "0", "slot-type": "DIMM", "source": 1, "status": 0}, +# { "slot": "1", "slot-type": "DIMM", "source": 0, "status": 0}, +# { "slot": "2", "slot-type": "DIMM", "source": 0, "status": 0}, +# { "slot": "3", "slot-type": "DIMM", "source": 0, "status": 0} +# ]} ## { 'command': 'query-acpi-ospm-status', 'returns': ['ACPIOSTInfo'] } @@ -133,10 +133,10 @@ # # Example: # -# <- { "event": "ACPI_DEVICE_OST", -# "data": { "info": { "device": "d1", "slot": "0", -# "slot-type": "DIMM", "source": 1, "status": 0 } }, -# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# <- { "event": "ACPI_DEVICE_OST", +# "data": { "info": { "device": "d1", "slot": "0", +# "slot-type": "DIMM", "source": 1, "status": 0 } }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'ACPI_DEVICE_OST', 'data': { 'info': 'ACPIOSTInfo' } } diff --git a/qapi/block-core.json b/qapi/block-core.json index ca390c57002..746d1694c25 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -196,6 +196,8 @@ ## # @ImageInfoSpecificQCow2Wrapper: # +# @data: image information specific to QCOW2 +# # Since: 1.7 ## { 'struct': 'ImageInfoSpecificQCow2Wrapper', @@ -204,6 +206,8 @@ ## # @ImageInfoSpecificVmdkWrapper: # +# @data: image information specific to VMDK +# # Since: 6.1 ## { 'struct': 'ImageInfoSpecificVmdkWrapper', @@ -212,6 +216,8 @@ ## # @ImageInfoSpecificLUKSWrapper: # +# @data: image information specific to LUKS +# # Since: 2.7 ## { 'struct': 'ImageInfoSpecificLUKSWrapper', @@ -223,6 +229,8 @@ ## # @ImageInfoSpecificRbdWrapper: # +# @data: image information specific to RBD +# # Since: 6.1 ## { 'struct': 'ImageInfoSpecificRbdWrapper', @@ -231,6 +239,8 @@ ## # @ImageInfoSpecificFileWrapper: # +# @data: image information specific to files +# # Since: 8.0 ## { 'struct': 'ImageInfoSpecificFileWrapper', @@ -242,6 +252,8 @@ # A discriminated record of image format specific information # structures. # +# @type: block driver name +# # Since: 1.7 ## { 'union': 'ImageInfoSpecific', @@ -656,9 +668,7 @@ # @bins: list of io request counts corresponding to histogram # intervals, one more element than @boundaries has. For the # example above, @bins may be something like [3, 1, 5, 2], and -# corresponding histogram looks like: -# -# :: +# corresponding histogram looks like:: # # 5| * # 4| * @@ -756,87 +766,87 @@ # # Example: # -# -> { "execute": "query-block" } -# <- { -# "return":[ -# { -# "io-status": "ok", -# "device":"ide0-hd0", -# "locked":false, -# "removable":false, -# "inserted":{ -# "ro":false, -# "drv":"qcow2", -# "encrypted":false, -# "file":"disks/test.qcow2", -# "backing_file_depth":1, -# "bps":1000000, -# "bps_rd":0, -# "bps_wr":0, -# "iops":1000000, -# "iops_rd":0, -# "iops_wr":0, -# "bps_max": 8000000, -# "bps_rd_max": 0, -# "bps_wr_max": 0, -# "iops_max": 0, -# "iops_rd_max": 0, -# "iops_wr_max": 0, -# "iops_size": 0, -# "detect_zeroes": "on", -# "write_threshold": 0, -# "image":{ -# "filename":"disks/test.qcow2", -# "format":"qcow2", -# "virtual-size":2048000, -# "backing_file":"base.qcow2", -# "full-backing-filename":"disks/base.qcow2", -# "backing-filename-format":"qcow2", -# "snapshots":[ -# { -# "id": "1", -# "name": "snapshot1", -# "vm-state-size": 0, -# "date-sec": 10000200, -# "date-nsec": 12, -# "vm-clock-sec": 206, -# "vm-clock-nsec": 30 -# } -# ], -# "backing-image":{ -# "filename":"disks/base.qcow2", +# -> { "execute": "query-block" } +# <- { +# "return":[ +# { +# "io-status": "ok", +# "device":"ide0-hd0", +# "locked":false, +# "removable":false, +# "inserted":{ +# "ro":false, +# "drv":"qcow2", +# "encrypted":false, +# "file":"disks/test.qcow2", +# "backing_file_depth":1, +# "bps":1000000, +# "bps_rd":0, +# "bps_wr":0, +# "iops":1000000, +# "iops_rd":0, +# "iops_wr":0, +# "bps_max": 8000000, +# "bps_rd_max": 0, +# "bps_wr_max": 0, +# "iops_max": 0, +# "iops_rd_max": 0, +# "iops_wr_max": 0, +# "iops_size": 0, +# "detect_zeroes": "on", +# "write_threshold": 0, +# "image":{ +# "filename":"disks/test.qcow2", # "format":"qcow2", -# "virtual-size":2048000 -# } -# } -# }, -# "qdev": "ide_disk", -# "type":"unknown" -# }, -# { -# "io-status": "ok", -# "device":"ide1-cd0", -# "locked":false, -# "removable":true, -# "qdev": "/machine/unattached/device[23]", -# "tray_open": false, -# "type":"unknown" -# }, -# { -# "device":"floppy0", -# "locked":false, -# "removable":true, -# "qdev": "/machine/unattached/device[20]", -# "type":"unknown" -# }, -# { -# "device":"sd0", -# "locked":false, -# "removable":true, -# "type":"unknown" -# } -# ] -# } +# "virtual-size":2048000, +# "backing_file":"base.qcow2", +# "full-backing-filename":"disks/base.qcow2", +# "backing-filename-format":"qcow2", +# "snapshots":[ +# { +# "id": "1", +# "name": "snapshot1", +# "vm-state-size": 0, +# "date-sec": 10000200, +# "date-nsec": 12, +# "vm-clock-sec": 206, +# "vm-clock-nsec": 30 +# } +# ], +# "backing-image":{ +# "filename":"disks/base.qcow2", +# "format":"qcow2", +# "virtual-size":2048000 +# } +# } +# }, +# "qdev": "ide_disk", +# "type":"unknown" +# }, +# { +# "io-status": "ok", +# "device":"ide1-cd0", +# "locked":false, +# "removable":true, +# "qdev": "/machine/unattached/device[23]", +# "tray_open": false, +# "type":"unknown" +# }, +# { +# "device":"floppy0", +# "locked":false, +# "removable":true, +# "qdev": "/machine/unattached/device[20]", +# "type":"unknown" +# }, +# { +# "device":"sd0", +# "locked":false, +# "removable":true, +# "type":"unknown" +# } +# ] +# } ## { 'command': 'query-block', 'returns': ['BlockInfo'], 'allow-preconfig': true } @@ -1094,6 +1104,8 @@ # # Block driver specific statistics # +# @driver: block driver name +# # Since: 4.2 ## { 'union': 'BlockStatsSpecific', @@ -1158,105 +1170,105 @@ # # Example: # -# -> { "execute": "query-blockstats" } -# <- { -# "return":[ -# { -# "device":"ide0-hd0", -# "parent":{ -# "stats":{ -# "wr_highest_offset":3686448128, -# "wr_bytes":9786368, -# "wr_operations":751, -# "rd_bytes":122567168, -# "rd_operations":36772 -# "wr_total_times_ns":313253456 -# "rd_total_times_ns":3465673657 -# "flush_total_times_ns":49653 -# "flush_operations":61, -# "rd_merged":0, -# "wr_merged":0, -# "idle_time_ns":2953431879, -# "account_invalid":true, -# "account_failed":false -# } -# }, -# "stats":{ -# "wr_highest_offset":2821110784, -# "wr_bytes":9786368, -# "wr_operations":692, -# "rd_bytes":122739200, -# "rd_operations":36604 -# "flush_operations":51, -# "wr_total_times_ns":313253456 -# "rd_total_times_ns":3465673657 -# "flush_total_times_ns":49653, -# "rd_merged":0, -# "wr_merged":0, -# "idle_time_ns":2953431879, -# "account_invalid":true, -# "account_failed":false -# }, -# "qdev": "/machine/unattached/device[23]" -# }, -# { -# "device":"ide1-cd0", -# "stats":{ -# "wr_highest_offset":0, -# "wr_bytes":0, -# "wr_operations":0, -# "rd_bytes":0, -# "rd_operations":0 -# "flush_operations":0, -# "wr_total_times_ns":0 -# "rd_total_times_ns":0 -# "flush_total_times_ns":0, -# "rd_merged":0, -# "wr_merged":0, -# "account_invalid":false, -# "account_failed":false -# }, -# "qdev": "/machine/unattached/device[24]" -# }, -# { -# "device":"floppy0", -# "stats":{ -# "wr_highest_offset":0, -# "wr_bytes":0, -# "wr_operations":0, -# "rd_bytes":0, -# "rd_operations":0 -# "flush_operations":0, -# "wr_total_times_ns":0 -# "rd_total_times_ns":0 -# "flush_total_times_ns":0, -# "rd_merged":0, -# "wr_merged":0, -# "account_invalid":false, -# "account_failed":false -# }, -# "qdev": "/machine/unattached/device[16]" -# }, -# { -# "device":"sd0", -# "stats":{ -# "wr_highest_offset":0, -# "wr_bytes":0, -# "wr_operations":0, -# "rd_bytes":0, -# "rd_operations":0 -# "flush_operations":0, -# "wr_total_times_ns":0 -# "rd_total_times_ns":0 -# "flush_total_times_ns":0, -# "rd_merged":0, -# "wr_merged":0, -# "account_invalid":false, -# "account_failed":false -# } -# } -# ] -# } +# -> { "execute": "query-blockstats" } +# <- { +# "return":[ +# { +# "device":"ide0-hd0", +# "parent":{ +# "stats":{ +# "wr_highest_offset":3686448128, +# "wr_bytes":9786368, +# "wr_operations":751, +# "rd_bytes":122567168, +# "rd_operations":36772 +# "wr_total_times_ns":313253456 +# "rd_total_times_ns":3465673657 +# "flush_total_times_ns":49653 +# "flush_operations":61, +# "rd_merged":0, +# "wr_merged":0, +# "idle_time_ns":2953431879, +# "account_invalid":true, +# "account_failed":false +# } +# }, +# "stats":{ +# "wr_highest_offset":2821110784, +# "wr_bytes":9786368, +# "wr_operations":692, +# "rd_bytes":122739200, +# "rd_operations":36604 +# "flush_operations":51, +# "wr_total_times_ns":313253456 +# "rd_total_times_ns":3465673657 +# "flush_total_times_ns":49653, +# "rd_merged":0, +# "wr_merged":0, +# "idle_time_ns":2953431879, +# "account_invalid":true, +# "account_failed":false +# }, +# "qdev": "/machine/unattached/device[23]" +# }, +# { +# "device":"ide1-cd0", +# "stats":{ +# "wr_highest_offset":0, +# "wr_bytes":0, +# "wr_operations":0, +# "rd_bytes":0, +# "rd_operations":0 +# "flush_operations":0, +# "wr_total_times_ns":0 +# "rd_total_times_ns":0 +# "flush_total_times_ns":0, +# "rd_merged":0, +# "wr_merged":0, +# "account_invalid":false, +# "account_failed":false +# }, +# "qdev": "/machine/unattached/device[24]" +# }, +# { +# "device":"floppy0", +# "stats":{ +# "wr_highest_offset":0, +# "wr_bytes":0, +# "wr_operations":0, +# "rd_bytes":0, +# "rd_operations":0 +# "flush_operations":0, +# "wr_total_times_ns":0 +# "rd_total_times_ns":0 +# "flush_total_times_ns":0, +# "rd_merged":0, +# "wr_merged":0, +# "account_invalid":false, +# "account_failed":false +# }, +# "qdev": "/machine/unattached/device[16]" +# }, +# { +# "device":"sd0", +# "stats":{ +# "wr_highest_offset":0, +# "wr_bytes":0, +# "wr_operations":0, +# "rd_bytes":0, +# "rd_operations":0 +# "flush_operations":0, +# "wr_total_times_ns":0 +# "rd_total_times_ns":0 +# "flush_total_times_ns":0, +# "rd_merged":0, +# "wr_merged":0, +# "account_invalid":false, +# "account_failed":false +# } +# } +# ] +# } ## { 'command': 'query-blockstats', 'data': { '*query-nodes': 'bool' }, @@ -1361,7 +1373,7 @@ # target, i.e. same data and new writes are done synchronously to # both. # -# Since 8.2 +# Since: 8.2 ## { 'struct': 'BlockJobInfoMirror', 'data': { 'actively-synced': 'bool' } } @@ -1444,17 +1456,16 @@ # # @size: new image size in bytes # -# Returns: -# - nothing on success +# Errors: # - If @device is not a valid block device, DeviceNotFound # # Since: 0.14 # # Example: # -# -> { "execute": "block_resize", -# "arguments": { "device": "scratch", "size": 1073741824 } } -# <- { "return": {} } +# -> { "execute": "block_resize", +# "arguments": { "device": "scratch", "size": 1073741824 } } +# <- { "return": {} } ## { 'command': 'block_resize', 'data': { '*device': 'str', @@ -1662,20 +1673,19 @@ # # For the arguments, see the documentation of BlockdevSnapshotSync. # -# Returns: -# - nothing on success +# Errors: # - If @device is not a valid block device, DeviceNotFound # # Since: 0.14 # # Example: # -# -> { "execute": "blockdev-snapshot-sync", -# "arguments": { "device": "ide-hd0", -# "snapshot-file": -# "/some/place/my-image", -# "format": "qcow2" } } -# <- { "return": {} } +# -> { "execute": "blockdev-snapshot-sync", +# "arguments": { "device": "ide-hd0", +# "snapshot-file": +# "/some/place/my-image", +# "format": "qcow2" } } +# <- { "return": {} } ## { 'command': 'blockdev-snapshot-sync', 'data': 'BlockdevSnapshotSync', @@ -1703,19 +1713,19 @@ # # Example: # -# -> { "execute": "blockdev-add", -# "arguments": { "driver": "qcow2", -# "node-name": "node1534", -# "file": { "driver": "file", -# "filename": "hd1.qcow2" }, -# "backing": null } } +# -> { "execute": "blockdev-add", +# "arguments": { "driver": "qcow2", +# "node-name": "node1534", +# "file": { "driver": "file", +# "filename": "hd1.qcow2" }, +# "backing": null } } # -# <- { "return": {} } +# <- { "return": {} } # -# -> { "execute": "blockdev-snapshot", -# "arguments": { "node": "ide-hd0", -# "overlay": "node1534" } } -# <- { "return": {} } +# -> { "execute": "blockdev-snapshot", +# "arguments": { "node": "ide-hd0", +# "overlay": "node1534" } } +# <- { "return": {} } ## { 'command': 'blockdev-snapshot', 'data': 'BlockdevSnapshot', @@ -1742,8 +1752,7 @@ # is not validated, so care should be taken when specifying the # string or the image chain may not be able to be reopened again. # -# Returns: -# - Nothing on success +# Errors: # - If "device" does not exist or cannot be determined, # DeviceNotFound # @@ -1810,6 +1819,11 @@ # Care should be taken when specifying the string, to specify a # valid filename or protocol. (Since 2.1) # +# @backing-mask-protocol: If true, replace any protocol mentioned in +# the 'backing file format' with 'raw', rather than storing the +# protocol name as the backing format. Can be used even when no +# image header will be updated (default false; since 9.0). +# # @speed: the maximum speed, in bytes per second # # @on-error: the action to take on an error. 'ignore' means that the @@ -1837,8 +1851,7 @@ # @deprecated: Members @base and @top are deprecated. Use @base-node # and @top-node instead. # -# Returns: -# - Nothing on success +# Errors: # - If @device does not exist, DeviceNotFound # - Any other error returns a GenericError. # @@ -1846,17 +1859,18 @@ # # Example: # -# -> { "execute": "block-commit", -# "arguments": { "device": "virtio0", -# "top": "/tmp/snap1.qcow2" } } -# <- { "return": {} } +# -> { "execute": "block-commit", +# "arguments": { "device": "virtio0", +# "top": "/tmp/snap1.qcow2" } } +# <- { "return": {} } ## { 'command': 'block-commit', 'data': { '*job-id': 'str', 'device': 'str', '*base-node': 'str', '*base': { 'type': 'str', 'features': [ 'deprecated' ] }, '*top-node': 'str', '*top': { 'type': 'str', 'features': [ 'deprecated' ] }, - '*backing-file': 'str', '*speed': 'int', + '*backing-file': 'str', '*backing-mask-protocol': 'bool', + '*speed': 'int', '*on-error': 'BlockdevOnError', '*filter-node-name': 'str', '*auto-finalize': 'bool', '*auto-dismiss': 'bool' }, @@ -1876,19 +1890,18 @@ # @deprecated: This command is deprecated. Use @blockdev-backup # instead. # -# Returns: -# - nothing on success +# Errors: # - If @device is not a valid block device, GenericError # # Since: 1.6 # # Example: # -# -> { "execute": "drive-backup", -# "arguments": { "device": "drive0", -# "sync": "full", -# "target": "backup.img" } } -# <- { "return": {} } +# -> { "execute": "drive-backup", +# "arguments": { "device": "drive0", +# "sync": "full", +# "target": "backup.img" } } +# <- { "return": {} } ## { 'command': 'drive-backup', 'boxed': true, 'data': 'DriveBackup', 'features': ['deprecated'], @@ -1903,19 +1916,18 @@ # 'backup'. The operation can be stopped before it has completed using # the block-job-cancel command. # -# Returns: -# - nothing on success +# Errors: # - If @device is not a valid block device, DeviceNotFound # # Since: 2.3 # # Example: # -# -> { "execute": "blockdev-backup", -# "arguments": { "device": "src-id", -# "sync": "full", -# "target": "tgt-id" } } -# <- { "return": {} } +# -> { "execute": "blockdev-backup", +# "arguments": { "device": "src-id", +# "sync": "full", +# "target": "tgt-id" } } +# <- { "return": {} } ## { 'command': 'blockdev-backup', 'boxed': true, 'data': 'BlockdevBackup', @@ -1935,52 +1947,52 @@ # # Example: # -# -> { "execute": "query-named-block-nodes" } -# <- { "return": [ { "ro":false, -# "drv":"qcow2", -# "encrypted":false, -# "file":"disks/test.qcow2", -# "node-name": "my-node", -# "backing_file_depth":1, -# "detect_zeroes":"off", -# "bps":1000000, -# "bps_rd":0, -# "bps_wr":0, -# "iops":1000000, -# "iops_rd":0, -# "iops_wr":0, -# "bps_max": 8000000, -# "bps_rd_max": 0, -# "bps_wr_max": 0, -# "iops_max": 0, -# "iops_rd_max": 0, -# "iops_wr_max": 0, -# "iops_size": 0, -# "write_threshold": 0, -# "image":{ -# "filename":"disks/test.qcow2", -# "format":"qcow2", -# "virtual-size":2048000, -# "backing_file":"base.qcow2", -# "full-backing-filename":"disks/base.qcow2", -# "backing-filename-format":"qcow2", -# "snapshots":[ -# { -# "id": "1", -# "name": "snapshot1", -# "vm-state-size": 0, -# "date-sec": 10000200, -# "date-nsec": 12, -# "vm-clock-sec": 206, -# "vm-clock-nsec": 30 -# } -# ], -# "backing-image":{ -# "filename":"disks/base.qcow2", +# -> { "execute": "query-named-block-nodes" } +# <- { "return": [ { "ro":false, +# "drv":"qcow2", +# "encrypted":false, +# "file":"disks/test.qcow2", +# "node-name": "my-node", +# "backing_file_depth":1, +# "detect_zeroes":"off", +# "bps":1000000, +# "bps_rd":0, +# "bps_wr":0, +# "iops":1000000, +# "iops_rd":0, +# "iops_wr":0, +# "bps_max": 8000000, +# "bps_rd_max": 0, +# "bps_wr_max": 0, +# "iops_max": 0, +# "iops_rd_max": 0, +# "iops_wr_max": 0, +# "iops_size": 0, +# "write_threshold": 0, +# "image":{ +# "filename":"disks/test.qcow2", # "format":"qcow2", -# "virtual-size":2048000 -# } -# } } ] } +# "virtual-size":2048000, +# "backing_file":"base.qcow2", +# "full-backing-filename":"disks/base.qcow2", +# "backing-filename-format":"qcow2", +# "snapshots":[ +# { +# "id": "1", +# "name": "snapshot1", +# "vm-state-size": 0, +# "date-sec": 10000200, +# "date-nsec": 12, +# "vm-clock-sec": 206, +# "vm-clock-nsec": 30 +# } +# ], +# "backing-image":{ +# "filename":"disks/base.qcow2", +# "format":"qcow2", +# "virtual-size":2048000 +# } +# } } ] } ## { 'command': 'query-named-block-nodes', 'returns': [ 'BlockDeviceInfo' ], @@ -2105,24 +2117,23 @@ # Start mirroring a block device's writes to a new destination. # target specifies the target of the new image. If the file exists, # or if it is a device, it will be used as the new destination for -# writes. If it does not exist, a new file will be created. format +# writes. If it does not exist, a new file will be created. @format # specifies the format of the mirror image, default is to probe if # mode='existing', else the format of the source. # -# Returns: -# - nothing on success +# Errors: # - If @device is not a valid block device, GenericError # # Since: 1.3 # # Example: # -# -> { "execute": "drive-mirror", -# "arguments": { "device": "ide-hd0", -# "target": "/some/place/my-image", -# "sync": "full", -# "format": "qcow2" } } -# <- { "return": {} } +# -> { "execute": "drive-mirror", +# "arguments": { "device": "ide-hd0", +# "target": "/some/place/my-image", +# "sync": "full", +# "format": "qcow2" } } +# <- { "return": {} } ## { 'command': 'drive-mirror', 'boxed': true, 'data': 'DriveMirror', @@ -2286,8 +2297,7 @@ # Create a dirty bitmap with a name on the node, and start tracking # the writes. # -# Returns: -# - nothing on success +# Errors: # - If @node is not a valid block device or node, DeviceNotFound # - If @name is already taken, GenericError with an explanation # @@ -2295,9 +2305,9 @@ # # Example: # -# -> { "execute": "block-dirty-bitmap-add", -# "arguments": { "node": "drive0", "name": "bitmap0" } } -# <- { "return": {} } +# -> { "execute": "block-dirty-bitmap-add", +# "arguments": { "node": "drive0", "name": "bitmap0" } } +# <- { "return": {} } ## { 'command': 'block-dirty-bitmap-add', 'data': 'BlockDirtyBitmapAdd', @@ -2310,8 +2320,7 @@ # with block-dirty-bitmap-add. If the bitmap is persistent, remove it # from its storage too. # -# Returns: -# - nothing on success +# Errors: # - If @node is not a valid block device or node, DeviceNotFound # - If @name is not found, GenericError with an explanation # - if @name is frozen by an operation, GenericError @@ -2320,9 +2329,9 @@ # # Example: # -# -> { "execute": "block-dirty-bitmap-remove", -# "arguments": { "node": "drive0", "name": "bitmap0" } } -# <- { "return": {} } +# -> { "execute": "block-dirty-bitmap-remove", +# "arguments": { "node": "drive0", "name": "bitmap0" } } +# <- { "return": {} } ## { 'command': 'block-dirty-bitmap-remove', 'data': 'BlockDirtyBitmap', @@ -2335,8 +2344,7 @@ # backup from this point in time forward will only backup clusters # modified after this clear operation. # -# Returns: -# - nothing on success +# Errors: # - If @node is not a valid block device, DeviceNotFound # - If @name is not found, GenericError with an explanation # @@ -2344,9 +2352,9 @@ # # Example: # -# -> { "execute": "block-dirty-bitmap-clear", -# "arguments": { "node": "drive0", "name": "bitmap0" } } -# <- { "return": {} } +# -> { "execute": "block-dirty-bitmap-clear", +# "arguments": { "node": "drive0", "name": "bitmap0" } } +# <- { "return": {} } ## { 'command': 'block-dirty-bitmap-clear', 'data': 'BlockDirtyBitmap', @@ -2357,8 +2365,7 @@ # # Enables a dirty bitmap so that it will begin tracking disk changes. # -# Returns: -# - nothing on success +# Errors: # - If @node is not a valid block device, DeviceNotFound # - If @name is not found, GenericError with an explanation # @@ -2366,9 +2373,9 @@ # # Example: # -# -> { "execute": "block-dirty-bitmap-enable", -# "arguments": { "node": "drive0", "name": "bitmap0" } } -# <- { "return": {} } +# -> { "execute": "block-dirty-bitmap-enable", +# "arguments": { "node": "drive0", "name": "bitmap0" } } +# <- { "return": {} } ## { 'command': 'block-dirty-bitmap-enable', 'data': 'BlockDirtyBitmap', @@ -2379,8 +2386,7 @@ # # Disables a dirty bitmap so that it will stop tracking disk changes. # -# Returns: -# - nothing on success +# Errors: # - If @node is not a valid block device, DeviceNotFound # - If @name is not found, GenericError with an explanation # @@ -2388,9 +2394,9 @@ # # Example: # -# -> { "execute": "block-dirty-bitmap-disable", -# "arguments": { "node": "drive0", "name": "bitmap0" } } -# <- { "return": {} } +# -> { "execute": "block-dirty-bitmap-disable", +# "arguments": { "node": "drive0", "name": "bitmap0" } } +# <- { "return": {} } ## { 'command': 'block-dirty-bitmap-disable', 'data': 'BlockDirtyBitmap', @@ -2409,8 +2415,7 @@ # dirty in any of the source bitmaps. This can be used to achieve # backup checkpoints, or in simpler usages, to copy bitmaps. # -# Returns: -# - nothing on success +# Errors: # - If @node is not a valid block device, DeviceNotFound # - If any bitmap in @bitmaps or @target is not found, # GenericError @@ -2421,10 +2426,10 @@ # # Example: # -# -> { "execute": "block-dirty-bitmap-merge", -# "arguments": { "node": "drive0", "target": "bitmap0", -# "bitmaps": ["bitmap1"] } } -# <- { "return": {} } +# -> { "execute": "block-dirty-bitmap-merge", +# "arguments": { "node": "drive0", "target": "bitmap0", +# "bitmaps": ["bitmap1"] } } +# <- { "return": {} } ## { 'command': 'block-dirty-bitmap-merge', 'data': 'BlockDirtyBitmapMerge', @@ -2452,7 +2457,9 @@ # @unstable: This command is meant for debugging. # # Returns: -# - BlockDirtyBitmapSha256 on success +# BlockDirtyBitmapSha256 +# +# Errors: # - If @node is not a valid block device, DeviceNotFound # - If @name is not found or if hashing has failed, GenericError # with an explanation @@ -2524,17 +2531,15 @@ # disappear from the query list without user intervention. # Defaults to true. (Since 3.1) # -# Returns: nothing on success. -# # Since: 2.6 # # Example: # -# -> { "execute": "blockdev-mirror", -# "arguments": { "device": "ide-hd0", -# "target": "target0", -# "sync": "full" } } -# <- { "return": {} } +# -> { "execute": "blockdev-mirror", +# "arguments": { "device": "ide-hd0", +# "target": "target0", +# "sync": "full" } } +# <- { "return": {} } ## { 'command': 'blockdev-mirror', 'data': { '*job-id': 'str', 'device': 'str', 'target': 'str', @@ -2588,27 +2593,27 @@ # # @bps_max_length: maximum length of the @bps_max burst period, in # seconds. It must only be set if @bps_max is set as well. -# Defaults to 1. (Since 2.6) +# Defaults to 1. (Since 2.6) # # @bps_rd_max_length: maximum length of the @bps_rd_max burst period, # in seconds. It must only be set if @bps_rd_max is set as well. -# Defaults to 1. (Since 2.6) +# Defaults to 1. (Since 2.6) # # @bps_wr_max_length: maximum length of the @bps_wr_max burst period, # in seconds. It must only be set if @bps_wr_max is set as well. -# Defaults to 1. (Since 2.6) +# Defaults to 1. (Since 2.6) # # @iops_max_length: maximum length of the @iops burst period, in # seconds. It must only be set if @iops_max is set as well. -# Defaults to 1. (Since 2.6) +# Defaults to 1. (Since 2.6) # # @iops_rd_max_length: maximum length of the @iops_rd_max burst # period, in seconds. It must only be set if @iops_rd_max is set -# as well. Defaults to 1. (Since 2.6) +# as well. Defaults to 1. (Since 2.6) # # @iops_wr_max_length: maximum length of the @iops_wr_max burst # period, in seconds. It must only be set if @iops_wr_max is set -# as well. Defaults to 1. (Since 2.6) +# as well. Defaults to 1. (Since 2.6) # # @iops_size: an I/O size in bytes (Since 1.7) # @@ -2820,6 +2825,11 @@ # Care should be taken when specifying the string, to specify a # valid filename or protocol. (Since 2.1) # +# @backing-mask-protocol: If true, replace any protocol mentioned in +# the 'backing file format' with 'raw', rather than storing the +# protocol name as the backing format. Can be used even when no +# image header will be updated (default false; since 9.0). +# # @speed: the maximum speed, in bytes per second # # @on-error: the action to take on an error (default report). 'stop' @@ -2843,22 +2853,23 @@ # disappear from the query list without user intervention. # Defaults to true. (Since 3.1) # -# Returns: -# - Nothing on success. +# Errors: # - If @device does not exist, DeviceNotFound. # # Since: 1.1 # # Example: # -# -> { "execute": "block-stream", -# "arguments": { "device": "virtio0", -# "base": "/tmp/master.qcow2" } } -# <- { "return": {} } +# -> { "execute": "block-stream", +# "arguments": { "device": "virtio0", +# "base": "/tmp/master.qcow2" } } +# <- { "return": {} } ## { 'command': 'block-stream', 'data': { '*job-id': 'str', 'device': 'str', '*base': 'str', - '*base-node': 'str', '*backing-file': 'str', '*bottom': 'str', + '*base-node': 'str', '*backing-file': 'str', + '*backing-mask-protocol': 'bool', + '*bottom': 'str', '*speed': 'int', '*on-error': 'BlockdevOnError', '*filter-node-name': 'str', '*auto-finalize': 'bool', '*auto-dismiss': 'bool' }, @@ -2880,8 +2891,7 @@ # @speed: the maximum speed, in bytes per second, or 0 for unlimited. # Defaults to 0. # -# Returns: -# - Nothing on success +# Errors: # - If no background operation is active on this device, # DeviceNotActive # @@ -2925,8 +2935,7 @@ # paused) instead of waiting for the destination to complete its # final synchronization (since 1.3) # -# Returns: -# - Nothing on success +# Errors: # - If no background operation is active on this device, # DeviceNotActive # @@ -2952,8 +2961,7 @@ # the name of the parameter), but since QEMU 2.7 it can have other # values. # -# Returns: -# - Nothing on success +# Errors: # - If no background operation is active on this device, # DeviceNotActive # @@ -2977,8 +2985,7 @@ # the name of the parameter), but since QEMU 2.7 it can have other # values. # -# Returns: -# - Nothing on success +# Errors: # - If no background operation is active on this device, # DeviceNotActive # @@ -3009,8 +3016,7 @@ # the name of the parameter), but since QEMU 2.7 it can have other # values. # -# Returns: -# - Nothing on success +# Errors: # - If no background operation is active on this device, # DeviceNotActive # @@ -3034,8 +3040,6 @@ # # @id: The job identifier. # -# Returns: Nothing on success -# # Since: 2.12 ## { 'command': 'block-job-dismiss', 'data': { 'id': 'str' }, @@ -3053,8 +3057,6 @@ # # @id: The job identifier. # -# Returns: Nothing on success -# # Since: 2.12 ## { 'command': 'block-job-finalize', 'data': { 'id': 'str' }, @@ -3080,7 +3082,7 @@ # # @type: The job type # -# Since 8.2 +# Since: 8.2 ## { 'union': 'BlockJobChangeOptions', 'base': { 'id': 'str', 'type': 'JobType' }, @@ -3352,11 +3354,14 @@ # decryption key (since 2.6). Mandatory except when doing a # metadata-only probe of the image. # +# @header: block device holding a detached LUKS header. (since 9.0) +# # Since: 2.9 ## { 'struct': 'BlockdevOptionsLUKS', 'base': 'BlockdevOptionsGenericFormat', - 'data': { '*key-secret': 'str' } } + 'data': { '*key-secret': 'str', + '*header': 'BlockdevRef'} } ## # @BlockdevOptionsGenericCOWFormat: @@ -3398,14 +3403,31 @@ # @Qcow2OverlapCheckFlags: # # Structure of flags for each metadata structure. Setting a field to -# 'true' makes qemu guard that structure against unintended -# overwriting. The default value is chosen according to the template -# given. +# 'true' makes QEMU guard that Qcow2 format structure against +# unintended overwriting. See Qcow2 format specification for detailed +# information on these structures. The default value is chosen +# according to the template given. # # @template: Specifies a template mode which can be adjusted using the # other flags, defaults to 'cached' # -# @bitmap-directory: since 3.0 +# @main-header: Qcow2 format header +# +# @active-l1: Qcow2 active L1 table +# +# @active-l2: Qcow2 active L2 table +# +# @refcount-table: Qcow2 refcount table +# +# @refcount-block: Qcow2 refcount blocks +# +# @snapshot-table: Qcow2 snapshot table +# +# @inactive-l1: Qcow2 inactive L1 tables +# +# @inactive-l2: Qcow2 inactive L2 tables +# +# @bitmap-directory: Qcow2 bitmap directory (since 3.0) # # Since: 2.9 ## @@ -3451,6 +3473,8 @@ ## # @BlockdevQcowEncryption: # +# @format: encryption format +# # Since: 2.10 ## { 'union': 'BlockdevQcowEncryption', @@ -3485,6 +3509,8 @@ ## # @BlockdevQcow2Encryption: # +# @format: encryption format +# # Since: 2.10 ## { 'union': 'BlockdevQcow2Encryption', @@ -3538,10 +3564,10 @@ # re-allocating them later. Besides potential performance # degradation, such fragmentation can lead to increased allocation # of clusters past the end of the image file, resulting in image -# files whose file length can grow much larger than their guest disk -# size would suggest. If image file length is of concern (e.g. when -# storing qcow2 images directly on block devices), you should -# consider enabling this option. (since 8.1) +# files whose file length can grow much larger than their guest +# disk size would suggest. If image file length is of concern +# (e.g. when storing qcow2 images directly on block devices), you +# should consider enabling this option. (since 8.1) # # @overlap-check: which overlap checks to perform for writes to the # image, defaults to 'cached' (since 2.2) @@ -3635,6 +3661,8 @@ ## # @SshHostKeyCheck: # +# @mode: How to check the host key +# # Since: 2.12 ## { 'union': 'SshHostKeyCheck', @@ -4057,6 +4085,8 @@ ## # @BlockdevOptionsIscsi: # +# Driver specific block device options for iscsi +# # @transport: The iscsi transport type # # @portal: The address of the iscsi portal @@ -4081,8 +4111,6 @@ # @timeout: Timeout in seconds after which a request will timeout. 0 # means no timeout and is the default. # -# Driver specific block device options for iscsi -# # Since: 2.9 ## { 'struct': 'BlockdevOptionsIscsi', @@ -4204,6 +4232,8 @@ ## # @RbdEncryptionCreateOptions: # +# @format: Encryption format. +# # Since: 6.1 ## { 'union': 'RbdEncryptionCreateOptions', @@ -4606,7 +4636,7 @@ # seconds for copy-before-write operation. When a timeout occurs, # the respective copy-before-write operation will fail, and the # @on-cbw-error parameter will decide how this failure is handled. -# Default 0. (Since 7.1) +# Default 0. (Since 7.1) # # Since: 6.2 ## @@ -4652,8 +4682,6 @@ # @force-share: force share all permission on added nodes. Requires # read-only=true. (Since 2.10) # -# Remaining options are determined by the block driver. -# # Since: 2.9 ## { 'union': 'BlockdevOptions', @@ -4771,41 +4799,41 @@ # # Examples: # -# -> { "execute": "blockdev-add", -# "arguments": { -# "driver": "qcow2", -# "node-name": "test1", -# "file": { -# "driver": "file", -# "filename": "test.qcow2" -# } -# } -# } -# <- { "return": {} } -# -# -> { "execute": "blockdev-add", -# "arguments": { -# "driver": "qcow2", -# "node-name": "node0", -# "discard": "unmap", -# "cache": { -# "direct": true -# }, -# "file": { -# "driver": "file", -# "filename": "/tmp/test.qcow2" -# }, -# "backing": { -# "driver": "raw", +# -> { "execute": "blockdev-add", +# "arguments": { +# "driver": "qcow2", +# "node-name": "test1", # "file": { +# "driver": "file", +# "filename": "test.qcow2" +# } +# } +# } +# <- { "return": {} } +# +# -> { "execute": "blockdev-add", +# "arguments": { +# "driver": "qcow2", +# "node-name": "node0", +# "discard": "unmap", +# "cache": { +# "direct": true +# }, +# "file": { # "driver": "file", -# "filename": "/dev/fdset/4" +# "filename": "/tmp/test.qcow2" +# }, +# "backing": { +# "driver": "raw", +# "file": { +# "driver": "file", +# "filename": "/dev/fdset/4" +# } # } # } -# } -# } +# } # -# <- { "return": {} } +# <- { "return": {} } ## { 'command': 'blockdev-add', 'data': 'BlockdevOptions', 'boxed': true, 'allow-preconfig': true } @@ -4869,22 +4897,22 @@ # # Example: # -# -> { "execute": "blockdev-add", -# "arguments": { -# "driver": "qcow2", -# "node-name": "node0", -# "file": { -# "driver": "file", -# "filename": "test.qcow2" -# } -# } -# } -# <- { "return": {} } +# -> { "execute": "blockdev-add", +# "arguments": { +# "driver": "qcow2", +# "node-name": "node0", +# "file": { +# "driver": "file", +# "filename": "test.qcow2" +# } +# } +# } +# <- { "return": {} } # -# -> { "execute": "blockdev-del", -# "arguments": { "node-name": "node0" } -# } -# <- { "return": {} } +# -> { "execute": "blockdev-del", +# "arguments": { "node-name": "node0" } +# } +# <- { "return": {} } ## { 'command': 'blockdev-del', 'data': { 'node-name': 'str' }, 'allow-preconfig': true } @@ -4941,7 +4969,10 @@ # # Driver specific image creation options for LUKS. # -# @file: Node to create the image format on +# @file: Node to create the image format on, mandatory except when +# 'preallocation' is not requested +# +# @header: Block device holding a detached LUKS header. (since 9.0) # # @size: Size of the virtual disk in bytes # @@ -4952,7 +4983,8 @@ ## { 'struct': 'BlockdevCreateOptionsLUKS', 'base': 'QCryptoBlockCreateOptionsLUKS', - 'data': { 'file': 'BlockdevRef', + 'data': { '*file': 'BlockdevRef', + '*header': 'BlockdevRef', 'size': 'size', '*preallocation': 'PreallocMode' } } @@ -5514,10 +5546,10 @@ # # Example: # -# <- { "event": "BLOCK_IMAGE_CORRUPTED", -# "data": { "device": "", "node-name": "drive", "fatal": false, -# "msg": "L2 table offset 0x2a2a2a00 unaligned (L1 index: 0)" }, -# "timestamp": { "seconds": 1648243240, "microseconds": 906060 } } +# <- { "event": "BLOCK_IMAGE_CORRUPTED", +# "data": { "device": "", "node-name": "drive", "fatal": false, +# "msg": "L2 table offset 0x2a2a2a00 unaligned (L1 index: 0)" }, +# "timestamp": { "seconds": 1648243240, "microseconds": 906060 } } # # Since: 1.7 ## @@ -5563,13 +5595,13 @@ # # Example: # -# <- { "event": "BLOCK_IO_ERROR", -# "data": { "device": "ide0-hd1", -# "node-name": "#block212", -# "operation": "write", -# "action": "stop", -# "reason": "No space left on device" }, -# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# <- { "event": "BLOCK_IO_ERROR", +# "data": { "device": "ide0-hd1", +# "node-name": "#block212", +# "operation": "write", +# "action": "stop", +# "reason": "No space left on device" }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'BLOCK_IO_ERROR', 'data': { 'device': 'str', '*node-name': 'str', @@ -5603,11 +5635,11 @@ # # Example: # -# <- { "event": "BLOCK_JOB_COMPLETED", -# "data": { "type": "stream", "device": "virtio-disk0", -# "len": 10737418240, "offset": 10737418240, -# "speed": 0 }, -# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } +# <- { "event": "BLOCK_JOB_COMPLETED", +# "data": { "type": "stream", "device": "virtio-disk0", +# "len": 10737418240, "offset": 10737418240, +# "speed": 0 }, +# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } ## { 'event': 'BLOCK_JOB_COMPLETED', 'data': { 'type' : 'JobType', @@ -5638,11 +5670,11 @@ # # Example: # -# <- { "event": "BLOCK_JOB_CANCELLED", -# "data": { "type": "stream", "device": "virtio-disk0", -# "len": 10737418240, "offset": 134217728, -# "speed": 0 }, -# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } +# <- { "event": "BLOCK_JOB_CANCELLED", +# "data": { "type": "stream", "device": "virtio-disk0", +# "len": 10737418240, "offset": 134217728, +# "speed": 0 }, +# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } ## { 'event': 'BLOCK_JOB_CANCELLED', 'data': { 'type' : 'JobType', @@ -5667,11 +5699,11 @@ # # Example: # -# <- { "event": "BLOCK_JOB_ERROR", -# "data": { "device": "ide0-hd1", -# "operation": "write", -# "action": "stop" }, -# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# <- { "event": "BLOCK_JOB_ERROR", +# "data": { "device": "ide0-hd1", +# "operation": "write", +# "action": "stop" }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'BLOCK_JOB_ERROR', 'data': { 'device' : 'str', @@ -5702,10 +5734,10 @@ # # Example: # -# <- { "event": "BLOCK_JOB_READY", -# "data": { "device": "drive0", "type": "mirror", "speed": 0, -# "len": 2097152, "offset": 2097152 }, -# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# <- { "event": "BLOCK_JOB_READY", +# "data": { "device": "drive0", "type": "mirror", "speed": 0, +# "len": 2097152, "offset": 2097152 }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'BLOCK_JOB_READY', 'data': { 'type' : 'JobType', @@ -5730,9 +5762,9 @@ # # Example: # -# <- { "event": "BLOCK_JOB_PENDING", -# "data": { "type": "mirror", "id": "backup_1" }, -# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# <- { "event": "BLOCK_JOB_PENDING", +# "data": { "type": "mirror", "id": "backup_1" }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'BLOCK_JOB_PENDING', 'data': { 'type' : 'JobType', @@ -5804,10 +5836,10 @@ # # Example: # -# -> { "execute": "block-set-write-threshold", -# "arguments": { "node-name": "mydev", -# "write-threshold": 17179869184 } } -# <- { "return": {} } +# -> { "execute": "block-set-write-threshold", +# "arguments": { "node-name": "mydev", +# "write-threshold": 17179869184 } } +# <- { "return": {} } ## { 'command': 'block-set-write-threshold', 'data': { 'node-name': 'str', 'write-threshold': 'uint64' }, @@ -5849,26 +5881,26 @@ # # Examples: # -# 1. Add a new node to a quorum +# 1. Add a new node to a quorum # -# -> { "execute": "blockdev-add", -# "arguments": { -# "driver": "raw", -# "node-name": "new_node", -# "file": { "driver": "file", -# "filename": "test.raw" } } } -# <- { "return": {} } -# -> { "execute": "x-blockdev-change", -# "arguments": { "parent": "disk1", -# "node": "new_node" } } -# <- { "return": {} } +# -> { "execute": "blockdev-add", +# "arguments": { +# "driver": "raw", +# "node-name": "new_node", +# "file": { "driver": "file", +# "filename": "test.raw" } } } +# <- { "return": {} } +# -> { "execute": "x-blockdev-change", +# "arguments": { "parent": "disk1", +# "node": "new_node" } } +# <- { "return": {} } # -# 2. Delete a quorum's node +# 2. Delete a quorum's node # -# -> { "execute": "x-blockdev-change", -# "arguments": { "parent": "disk1", -# "child": "children.1" } } -# <- { "return": {} } +# -> { "execute": "x-blockdev-change", +# "arguments": { "parent": "disk1", +# "child": "children.1" } } +# <- { "return": {} } ## { 'command': 'x-blockdev-change', 'data' : { 'parent': 'str', @@ -5901,19 +5933,19 @@ # # Examples: # -# 1. Move a node into an IOThread +# 1. Move a node into an IOThread # -# -> { "execute": "x-blockdev-set-iothread", -# "arguments": { "node-name": "disk1", -# "iothread": "iothread0" } } -# <- { "return": {} } +# -> { "execute": "x-blockdev-set-iothread", +# "arguments": { "node-name": "disk1", +# "iothread": "iothread0" } } +# <- { "return": {} } # -# 2. Move a node into the main loop +# 2. Move a node into the main loop # -# -> { "execute": "x-blockdev-set-iothread", -# "arguments": { "node-name": "disk1", -# "iothread": null } } -# <- { "return": {} } +# -> { "execute": "x-blockdev-set-iothread", +# "arguments": { "node-name": "disk1", +# "iothread": null } } +# <- { "return": {} } ## { 'command': 'x-blockdev-set-iothread', 'data' : { 'node-name': 'str', @@ -5955,9 +5987,9 @@ # # Example: # -# <- { "event": "QUORUM_FAILURE", -# "data": { "reference": "usr1", "sector-num": 345435, "sectors-count": 5 }, -# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } } +# <- { "event": "QUORUM_FAILURE", +# "data": { "reference": "usr1", "sector-num": 345435, "sectors-count": 5 }, +# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } } ## { 'event': 'QUORUM_FAILURE', 'data': { 'reference': 'str', 'sector-num': 'int', 'sectors-count': 'int' } } @@ -5986,19 +6018,19 @@ # # Examples: # -# 1. Read operation +# 1. Read operation # -# <- { "event": "QUORUM_REPORT_BAD", -# "data": { "node-name": "node0", "sector-num": 345435, "sectors-count": 5, -# "type": "read" }, -# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } } +# <- { "event": "QUORUM_REPORT_BAD", +# "data": { "node-name": "node0", "sector-num": 345435, "sectors-count": 5, +# "type": "read" }, +# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } } # -# 2. Flush operation +# 2. Flush operation # -# <- { "event": "QUORUM_REPORT_BAD", -# "data": { "node-name": "node0", "sector-num": 0, "sectors-count": 2097120, -# "type": "flush", "error": "Broken pipe" }, -# "timestamp": { "seconds": 1456406829, "microseconds": 291763 } } +# <- { "event": "QUORUM_REPORT_BAD", +# "data": { "node-name": "node0", "sector-num": 0, "sectors-count": 2097120, +# "type": "flush", "error": "Broken pipe" }, +# "timestamp": { "seconds": 1456406829, "microseconds": 291763 } } ## { 'event': 'QUORUM_REPORT_BAD', 'data': { 'type': 'QuorumOpType', '*error': 'str', 'node-name': 'str', @@ -6032,8 +6064,7 @@ # For the arguments, see the documentation of # BlockdevSnapshotInternal. # -# Returns: -# - nothing on success +# Errors: # - If @device is not a valid block device, GenericError # - If any snapshot matching @name exists, or @name is empty, # GenericError @@ -6044,11 +6075,11 @@ # # Example: # -# -> { "execute": "blockdev-snapshot-internal-sync", -# "arguments": { "device": "ide-hd0", -# "name": "snapshot0" } -# } -# <- { "return": {} } +# -> { "execute": "blockdev-snapshot-internal-sync", +# "arguments": { "device": "ide-hd0", +# "name": "snapshot0" } +# } +# <- { "return": {} } ## { 'command': 'blockdev-snapshot-internal-sync', 'data': 'BlockdevSnapshotInternal', @@ -6070,7 +6101,9 @@ # @name: optional the snapshot's name to be deleted # # Returns: -# - SnapshotInfo on success +# SnapshotInfo +# +# Errors: # - If @device is not a valid block device, GenericError # - If snapshot not found, GenericError # - If the format of the image used does not support it, @@ -6081,21 +6114,21 @@ # # Example: # -# -> { "execute": "blockdev-snapshot-delete-internal-sync", -# "arguments": { "device": "ide-hd0", -# "name": "snapshot0" } -# } -# <- { "return": { -# "id": "1", -# "name": "snapshot0", -# "vm-state-size": 0, -# "date-sec": 1000012, -# "date-nsec": 10, -# "vm-clock-sec": 100, -# "vm-clock-nsec": 20, -# "icount": 220414 -# } -# } +# -> { "execute": "blockdev-snapshot-delete-internal-sync", +# "arguments": { "device": "ide-hd0", +# "name": "snapshot0" } +# } +# <- { "return": { +# "id": "1", +# "name": "snapshot0", +# "vm-state-size": 0, +# "date-sec": 1000012, +# "date-nsec": 10, +# "vm-clock-sec": 100, +# "vm-clock-nsec": 20, +# "icount": 220414 +# } +# } ## { 'command': 'blockdev-snapshot-delete-internal-sync', 'data': { 'device': 'str', '*id': 'str', '*name': 'str'}, diff --git a/qapi/block-export.json b/qapi/block-export.json index 7874a49ba71..3919a2d5b9d 100644 --- a/qapi/block-export.json +++ b/qapi/block-export.json @@ -65,7 +65,8 @@ # server from advertising multiple client support (since 5.2; # default: 0). # -# Returns: error if the server is already running. +# Errors: +# - if the server is already running # # Since: 1.3 ## @@ -247,8 +248,9 @@ # @deprecated: This command is deprecated. Use @block-export-add # instead. # -# Returns: error if the server is not running, or export with the same -# name already exists. +# Errors: +# - if the server is not running +# - if an export with the same name already exists # # Since: 1.3 ## @@ -266,13 +268,14 @@ # # @hard: Drop all connections immediately and remove export. # -# Potential additional modes to be added in the future: +# TODO: Potential additional modes to be added in the future: # -# hide: Just hide export from new clients, leave existing connections -# as is. Remove export after all clients are disconnected. +# - hide: Just hide export from new clients, leave existing +# connections as is. Remove export after all clients are +# disconnected. # -# soft: Hide export from new clients, answer with ESHUTDOWN for all -# further requests from existing clients. +# - soft: Hide export from new clients, answer with ESHUTDOWN for +# all further requests from existing clients. # # Since: 2.12 ## @@ -293,11 +296,10 @@ # @deprecated: This command is deprecated. Use @block-export-del # instead. # -# Returns: error if -# -# - the server is not running -# - export is not found -# - mode is 'safe' and there are existing connections +# Errors: +# - if the server is not running +# - if export is not found +# - if mode is 'safe' and there are existing connections # # Since: 2.12 ## @@ -345,6 +347,8 @@ # Describes a block export, i.e. how single node should be exported on # an external interface. # +# @type: Block export type +# # @id: A unique identifier for the block export (across all export # types) # @@ -412,8 +416,10 @@ # @mode: Mode of command operation. See @BlockExportRemoveMode # description. Default is 'safe'. # -# Returns: Error if the export is not found or @mode is 'safe' and the -# export is still in use (e.g. by existing client connections) +# Errors: +# - if the export is not found +# - if @mode is 'safe' and the export is still in use (e.g. by +# existing client connections) # # Since: 5.2 ## diff --git a/qapi/block.json b/qapi/block.json index 998008cfa8f..5de99fe09d9 100644 --- a/qapi/block.json +++ b/qapi/block.json @@ -110,8 +110,7 @@ # # @deprecated: Member @device is deprecated. Use @id instead. # -# Returns: -# - Nothing on success +# Errors: # - If @device is not a valid block device, DeviceNotFound # # Notes: Ejecting a device with no media results in success @@ -120,8 +119,8 @@ # # Example: # -# -> { "execute": "eject", "arguments": { "id": "ide1-0-1" } } -# <- { "return": {} } +# -> { "execute": "eject", "arguments": { "id": "ide1-0-1" } } +# <- { "return": {} } ## { 'command': 'eject', 'data': { '*device': { 'type': 'str', 'features': [ 'deprecated' ] }, @@ -164,17 +163,17 @@ # # Example: # -# -> { "execute": "blockdev-open-tray", -# "arguments": { "id": "ide0-1-0" } } +# -> { "execute": "blockdev-open-tray", +# "arguments": { "id": "ide0-1-0" } } # -# <- { "timestamp": { "seconds": 1418751016, -# "microseconds": 716996 }, -# "event": "DEVICE_TRAY_MOVED", -# "data": { "device": "ide1-cd0", -# "id": "ide0-1-0", -# "tray-open": true } } +# <- { "timestamp": { "seconds": 1418751016, +# "microseconds": 716996 }, +# "event": "DEVICE_TRAY_MOVED", +# "data": { "device": "ide1-cd0", +# "id": "ide0-1-0", +# "tray-open": true } } # -# <- { "return": {} } +# <- { "return": {} } ## { 'command': 'blockdev-open-tray', 'data': { '*device': { 'type': 'str', 'features': [ 'deprecated' ] }, @@ -202,17 +201,17 @@ # # Example: # -# -> { "execute": "blockdev-close-tray", -# "arguments": { "id": "ide0-1-0" } } +# -> { "execute": "blockdev-close-tray", +# "arguments": { "id": "ide0-1-0" } } # -# <- { "timestamp": { "seconds": 1418751345, -# "microseconds": 272147 }, -# "event": "DEVICE_TRAY_MOVED", -# "data": { "device": "ide1-cd0", -# "id": "ide0-1-0", -# "tray-open": false } } +# <- { "timestamp": { "seconds": 1418751345, +# "microseconds": 272147 }, +# "event": "DEVICE_TRAY_MOVED", +# "data": { "device": "ide1-cd0", +# "id": "ide0-1-0", +# "tray-open": false } } # -# <- { "return": {} } +# <- { "return": {} } ## { 'command': 'blockdev-close-tray', 'data': { '*device': { 'type': 'str', 'features': [ 'deprecated' ] }, @@ -234,28 +233,28 @@ # # Example: # -# -> { "execute": "blockdev-remove-medium", -# "arguments": { "id": "ide0-1-0" } } +# -> { "execute": "blockdev-remove-medium", +# "arguments": { "id": "ide0-1-0" } } # -# <- { "error": { "class": "GenericError", -# "desc": "Tray of device 'ide0-1-0' is not open" } } +# <- { "error": { "class": "GenericError", +# "desc": "Tray of device 'ide0-1-0' is not open" } } # -# -> { "execute": "blockdev-open-tray", -# "arguments": { "id": "ide0-1-0" } } +# -> { "execute": "blockdev-open-tray", +# "arguments": { "id": "ide0-1-0" } } # -# <- { "timestamp": { "seconds": 1418751627, -# "microseconds": 549958 }, -# "event": "DEVICE_TRAY_MOVED", -# "data": { "device": "ide1-cd0", -# "id": "ide0-1-0", -# "tray-open": true } } +# <- { "timestamp": { "seconds": 1418751627, +# "microseconds": 549958 }, +# "event": "DEVICE_TRAY_MOVED", +# "data": { "device": "ide1-cd0", +# "id": "ide0-1-0", +# "tray-open": true } } # -# <- { "return": {} } +# <- { "return": {} } # -# -> { "execute": "blockdev-remove-medium", -# "arguments": { "id": "ide0-1-0" } } +# -> { "execute": "blockdev-remove-medium", +# "arguments": { "id": "ide0-1-0" } } # -# <- { "return": {} } +# <- { "return": {} } ## { 'command': 'blockdev-remove-medium', 'data': { 'id': 'str' } } @@ -275,19 +274,19 @@ # # Example: # -# -> { "execute": "blockdev-add", -# "arguments": { -# "node-name": "node0", -# "driver": "raw", -# "file": { "driver": "file", -# "filename": "fedora.iso" } } } -# <- { "return": {} } +# -> { "execute": "blockdev-add", +# "arguments": { +# "node-name": "node0", +# "driver": "raw", +# "file": { "driver": "file", +# "filename": "fedora.iso" } } } +# <- { "return": {} } # -# -> { "execute": "blockdev-insert-medium", -# "arguments": { "id": "ide0-1-0", -# "node-name": "node0" } } +# -> { "execute": "blockdev-insert-medium", +# "arguments": { "id": "ide0-1-0", +# "node-name": "node0" } } # -# <- { "return": {} } +# <- { "return": {} } ## { 'command': 'blockdev-insert-medium', 'data': { 'id': 'str', @@ -345,33 +344,33 @@ # # Examples: # -# 1. Change a removable medium +# 1. Change a removable medium # -# -> { "execute": "blockdev-change-medium", -# "arguments": { "id": "ide0-1-0", -# "filename": "/srv/images/Fedora-12-x86_64-DVD.iso", -# "format": "raw" } } -# <- { "return": {} } +# -> { "execute": "blockdev-change-medium", +# "arguments": { "id": "ide0-1-0", +# "filename": "/srv/images/Fedora-12-x86_64-DVD.iso", +# "format": "raw" } } +# <- { "return": {} } # -# 2. Load a read-only medium into a writable drive +# 2. Load a read-only medium into a writable drive # -# -> { "execute": "blockdev-change-medium", -# "arguments": { "id": "floppyA", -# "filename": "/srv/images/ro.img", -# "format": "raw", -# "read-only-mode": "retain" } } +# -> { "execute": "blockdev-change-medium", +# "arguments": { "id": "floppyA", +# "filename": "/srv/images/ro.img", +# "format": "raw", +# "read-only-mode": "retain" } } # -# <- { "error": -# { "class": "GenericError", -# "desc": "Could not open '/srv/images/ro.img': Permission denied" } } +# <- { "error": +# { "class": "GenericError", +# "desc": "Could not open '/srv/images/ro.img': Permission denied" } } # -# -> { "execute": "blockdev-change-medium", -# "arguments": { "id": "floppyA", -# "filename": "/srv/images/ro.img", -# "format": "raw", -# "read-only-mode": "read-only" } } +# -> { "execute": "blockdev-change-medium", +# "arguments": { "id": "floppyA", +# "filename": "/srv/images/ro.img", +# "format": "raw", +# "read-only-mode": "read-only" } } # -# <- { "return": {} } +# <- { "return": {} } ## { 'command': 'blockdev-change-medium', 'data': { '*device': { 'type': 'str', 'features': [ 'deprecated' ] }, @@ -400,12 +399,12 @@ # # Example: # -# <- { "event": "DEVICE_TRAY_MOVED", -# "data": { "device": "ide1-cd0", -# "id": "/machine/unattached/device[22]", -# "tray-open": true -# }, -# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# <- { "event": "DEVICE_TRAY_MOVED", +# "data": { "device": "ide1-cd0", +# "id": "/machine/unattached/device[22]", +# "tray-open": true +# }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'DEVICE_TRAY_MOVED', 'data': { 'device': 'str', 'id': 'str', 'tray-open': 'bool' } } @@ -424,11 +423,11 @@ # # Example: # -# <- { "event": "PR_MANAGER_STATUS_CHANGED", -# "data": { "id": "pr-helper0", -# "connected": true -# }, -# "timestamp": { "seconds": 1519840375, "microseconds": 450486 } } +# <- { "event": "PR_MANAGER_STATUS_CHANGED", +# "data": { "id": "pr-helper0", +# "connected": true +# }, +# "timestamp": { "seconds": 1519840375, "microseconds": 450486 } } ## { 'event': 'PR_MANAGER_STATUS_CHANGED', 'data': { 'id': 'str', 'connected': 'bool' } } @@ -459,49 +458,48 @@ # the device will be removed from its group and the rest of its # members will not be affected. The 'group' parameter is ignored. # -# Returns: -# - Nothing on success +# Errors: # - If @device is not a valid block device, DeviceNotFound # # Since: 1.1 # # Examples: # -# -> { "execute": "block_set_io_throttle", -# "arguments": { "id": "virtio-blk-pci0/virtio-backend", -# "bps": 0, -# "bps_rd": 0, -# "bps_wr": 0, -# "iops": 512, -# "iops_rd": 0, -# "iops_wr": 0, -# "bps_max": 0, -# "bps_rd_max": 0, -# "bps_wr_max": 0, -# "iops_max": 0, -# "iops_rd_max": 0, -# "iops_wr_max": 0, -# "bps_max_length": 0, -# "iops_size": 0 } } -# <- { "return": {} } -# -# -> { "execute": "block_set_io_throttle", -# "arguments": { "id": "ide0-1-0", -# "bps": 1000000, -# "bps_rd": 0, -# "bps_wr": 0, -# "iops": 0, -# "iops_rd": 0, -# "iops_wr": 0, -# "bps_max": 8000000, -# "bps_rd_max": 0, -# "bps_wr_max": 0, -# "iops_max": 0, -# "iops_rd_max": 0, -# "iops_wr_max": 0, -# "bps_max_length": 60, -# "iops_size": 0 } } -# <- { "return": {} } +# -> { "execute": "block_set_io_throttle", +# "arguments": { "id": "virtio-blk-pci0/virtio-backend", +# "bps": 0, +# "bps_rd": 0, +# "bps_wr": 0, +# "iops": 512, +# "iops_rd": 0, +# "iops_wr": 0, +# "bps_max": 0, +# "bps_rd_max": 0, +# "bps_wr_max": 0, +# "iops_max": 0, +# "iops_rd_max": 0, +# "iops_wr_max": 0, +# "bps_max_length": 0, +# "iops_size": 0 } } +# <- { "return": {} } +# +# -> { "execute": "block_set_io_throttle", +# "arguments": { "id": "ide0-1-0", +# "bps": 1000000, +# "bps_rd": 0, +# "bps_wr": 0, +# "iops": 0, +# "iops_rd": 0, +# "iops_wr": 0, +# "bps_max": 8000000, +# "bps_rd_max": 0, +# "bps_wr_max": 0, +# "iops_max": 0, +# "iops_rd_max": 0, +# "iops_wr_max": 0, +# "bps_max_length": 60, +# "iops_size": 0 } } +# <- { "return": {} } ## { 'command': 'block_set_io_throttle', 'boxed': true, 'data': 'BlockIOThrottle', @@ -540,50 +538,50 @@ # @boundaries-flush: list of interval boundary values for flush # latency histogram. # -# Returns: error if device is not found or any boundary arrays are -# invalid. +# Errors: +# - if device is not found or any boundary arrays are invalid. # # Since: 4.0 # # Example: # -# Set new histograms for all io types with intervals [0, 10), [10, -# 50), [50, 100), [100, +inf): +# Set new histograms for all io types with intervals +# [0, 10), [10, 50), [50, 100), [100, +inf): # -# -> { "execute": "block-latency-histogram-set", -# "arguments": { "id": "drive0", -# "boundaries": [10, 50, 100] } } -# <- { "return": {} } +# -> { "execute": "block-latency-histogram-set", +# "arguments": { "id": "drive0", +# "boundaries": [10, 50, 100] } } +# <- { "return": {} } # # Example: # -# Set new histogram only for write, other histograms will remain not -# changed (or not created): +# Set new histogram only for write, other histograms will remain +# not changed (or not created): # -# -> { "execute": "block-latency-histogram-set", -# "arguments": { "id": "drive0", -# "boundaries-write": [10, 50, 100] } } -# <- { "return": {} } +# -> { "execute": "block-latency-histogram-set", +# "arguments": { "id": "drive0", +# "boundaries-write": [10, 50, 100] } } +# <- { "return": {} } # # Example: # -# Set new histograms with the following intervals: read, flush: [0, -# 10), [10, 50), [50, 100), [100, +inf) write: [0, 1000), [1000, -# 5000), [5000, +inf) +# Set new histograms with the following intervals: +# read, flush: [0, 10), [10, 50), [50, 100), [100, +inf) +# write: [0, 1000), [1000, 5000), [5000, +inf) # -# -> { "execute": "block-latency-histogram-set", -# "arguments": { "id": "drive0", -# "boundaries": [10, 50, 100], -# "boundaries-write": [1000, 5000] } } -# <- { "return": {} } +# -> { "execute": "block-latency-histogram-set", +# "arguments": { "id": "drive0", +# "boundaries": [10, 50, 100], +# "boundaries-write": [1000, 5000] } } +# <- { "return": {} } # # Example: # -# Remove all latency histograms: +# Remove all latency histograms: # -# -> { "execute": "block-latency-histogram-set", -# "arguments": { "id": "drive0" } } -# <- { "return": {} } +# -> { "execute": "block-latency-histogram-set", +# "arguments": { "id": "drive0" } } +# <- { "return": {} } ## { 'command': 'block-latency-histogram-set', 'data': {'id': 'str', diff --git a/qapi/char.json b/qapi/char.json index c1bab7b8550..777dde55d97 100644 --- a/qapi/char.json +++ b/qapi/char.json @@ -42,26 +42,26 @@ # # Example: # -# -> { "execute": "query-chardev" } -# <- { -# "return": [ -# { -# "label": "charchannel0", -# "filename": "unix:/var/lib/libvirt/qemu/seabios.rhel6.agent,server=on", -# "frontend-open": false -# }, -# { -# "label": "charmonitor", -# "filename": "unix:/var/lib/libvirt/qemu/seabios.rhel6.monitor,server=on", -# "frontend-open": true -# }, -# { -# "label": "charserial0", -# "filename": "pty:/dev/pts/2", -# "frontend-open": true -# } -# ] -# } +# -> { "execute": "query-chardev" } +# <- { +# "return": [ +# { +# "label": "charchannel0", +# "filename": "unix:/var/lib/libvirt/qemu/seabios.rhel6.agent,server=on", +# "frontend-open": false +# }, +# { +# "label": "charmonitor", +# "filename": "unix:/var/lib/libvirt/qemu/seabios.rhel6.monitor,server=on", +# "frontend-open": true +# }, +# { +# "label": "charserial0", +# "filename": "pty:/dev/pts/2", +# "frontend-open": true +# } +# ] +# } ## { 'command': 'query-chardev', 'returns': ['ChardevInfo'], 'allow-preconfig': true } @@ -88,23 +88,23 @@ # # Example: # -# -> { "execute": "query-chardev-backends" } -# <- { -# "return":[ -# { -# "name":"udp" -# }, -# { -# "name":"tcp" -# }, -# { -# "name":"unix" -# }, -# { -# "name":"spiceport" -# } -# ] -# } +# -> { "execute": "query-chardev-backends" } +# <- { +# "return":[ +# { +# "name":"udp" +# }, +# { +# "name":"tcp" +# }, +# { +# "name":"unix" +# }, +# { +# "name":"spiceport" +# } +# ] +# } ## { 'command': 'query-chardev-backends', 'returns': ['ChardevBackendInfo'] } @@ -139,17 +139,15 @@ # - data itself is always Unicode regardless of format, like any # other string. # -# Returns: Nothing on success -# # Since: 1.4 # # Example: # -# -> { "execute": "ringbuf-write", -# "arguments": { "device": "foo", -# "data": "abcdefgh", -# "format": "utf8" } } -# <- { "return": {} } +# -> { "execute": "ringbuf-write", +# "arguments": { "device": "foo", +# "data": "abcdefgh", +# "format": "utf8" } } +# <- { "return": {} } ## { 'command': 'ringbuf-write', 'data': { 'device': 'str', @@ -181,11 +179,11 @@ # # Example: # -# -> { "execute": "ringbuf-read", -# "arguments": { "device": "foo", -# "size": 1000, -# "format": "utf8" } } -# <- { "return": "abcdefgh" } +# -> { "execute": "ringbuf-read", +# "arguments": { "device": "foo", +# "size": 1000, +# "format": "utf8" } } +# <- { "return": "abcdefgh" } ## { 'command': 'ringbuf-read', 'data': {'device': 'str', 'size': 'int', '*format': 'DataFormat'}, @@ -391,8 +389,8 @@ # @rows: console height, in chars # # Note: the options are only effective when the VNC or SDL graphical -# display backend is active. They are ignored with the GTK, Spice, VNC -# and D-Bus display backends. +# display backend is active. They are ignored with the GTK, +# Spice, VNC and D-Bus display backends. # # Since: 1.5 ## @@ -468,12 +466,16 @@ # # @memory: Since 1.5 # +# Features: +# +# @deprecated: Member @memory is deprecated. Use @ringbuf instead. +# # Since: 1.4 ## { 'enum': 'ChardevBackendKind', 'data': [ 'file', - 'serial', - 'parallel', + { 'name': 'serial', 'if': 'HAVE_CHARDEV_SERIAL' }, + { 'name': 'parallel', 'if': 'HAVE_CHARDEV_PARALLEL' }, 'pipe', 'socket', 'udp', @@ -482,22 +484,23 @@ 'mux', 'msmouse', 'wctablet', - 'braille', + { 'name': 'braille', 'if': 'CONFIG_BRLAPI' }, 'testdev', 'stdio', - 'console', + { 'name': 'console', 'if': 'CONFIG_WIN32' }, { 'name': 'spicevmc', 'if': 'CONFIG_SPICE' }, { 'name': 'spiceport', 'if': 'CONFIG_SPICE' }, { 'name': 'qemu-vdagent', 'if': 'CONFIG_SPICE_PROTOCOL' }, { 'name': 'dbus', 'if': 'CONFIG_DBUS_DISPLAY' }, 'vc', 'ringbuf', - # next one is just for compatibility - 'memory' ] } + { 'name': 'memory', 'features': [ 'deprecated' ] } ] } ## # @ChardevFileWrapper: # +# @data: Configuration info for file chardevs +# # Since: 1.4 ## { 'struct': 'ChardevFileWrapper', @@ -506,6 +509,8 @@ ## # @ChardevHostdevWrapper: # +# @data: Configuration info for device and pipe chardevs +# # Since: 1.4 ## { 'struct': 'ChardevHostdevWrapper', @@ -514,6 +519,8 @@ ## # @ChardevSocketWrapper: # +# @data: Configuration info for (stream) socket chardevs +# # Since: 1.4 ## { 'struct': 'ChardevSocketWrapper', @@ -522,6 +529,8 @@ ## # @ChardevUdpWrapper: # +# @data: Configuration info for datagram socket chardevs +# # Since: 1.5 ## { 'struct': 'ChardevUdpWrapper', @@ -530,6 +539,8 @@ ## # @ChardevCommonWrapper: # +# @data: Configuration shared across all chardev backends +# # Since: 2.6 ## { 'struct': 'ChardevCommonWrapper', @@ -538,6 +549,8 @@ ## # @ChardevMuxWrapper: # +# @data: Configuration info for mux chardevs +# # Since: 1.5 ## { 'struct': 'ChardevMuxWrapper', @@ -546,6 +559,8 @@ ## # @ChardevStdioWrapper: # +# @data: Configuration info for stdio chardevs +# # Since: 1.5 ## { 'struct': 'ChardevStdioWrapper', @@ -554,6 +569,8 @@ ## # @ChardevSpiceChannelWrapper: # +# @data: Configuration info for spice vm channel chardevs +# # Since: 1.5 ## { 'struct': 'ChardevSpiceChannelWrapper', @@ -563,6 +580,8 @@ ## # @ChardevSpicePortWrapper: # +# @data: Configuration info for spice port chardevs +# # Since: 1.5 ## { 'struct': 'ChardevSpicePortWrapper', @@ -572,6 +591,8 @@ ## # @ChardevQemuVDAgentWrapper: # +# @data: Configuration info for qemu vdagent implementation +# # Since: 6.1 ## { 'struct': 'ChardevQemuVDAgentWrapper', @@ -581,6 +602,8 @@ ## # @ChardevDBusWrapper: # +# @data: Configuration info for DBus chardevs +# # Since: 7.0 ## { 'struct': 'ChardevDBusWrapper', @@ -590,6 +613,8 @@ ## # @ChardevVCWrapper: # +# @data: Configuration info for virtual console chardevs +# # Since: 1.5 ## { 'struct': 'ChardevVCWrapper', @@ -598,6 +623,8 @@ ## # @ChardevRingbufWrapper: # +# @data: Configuration info for ring buffer chardevs +# # Since: 1.5 ## { 'struct': 'ChardevRingbufWrapper', @@ -608,14 +635,18 @@ # # Configuration info for the new chardev backend. # +# @type: backend type +# # Since: 1.4 ## { 'union': 'ChardevBackend', 'base': { 'type': 'ChardevBackendKind' }, 'discriminator': 'type', 'data': { 'file': 'ChardevFileWrapper', - 'serial': 'ChardevHostdevWrapper', - 'parallel': 'ChardevHostdevWrapper', + 'serial': { 'type': 'ChardevHostdevWrapper', + 'if': 'HAVE_CHARDEV_SERIAL' }, + 'parallel': { 'type': 'ChardevHostdevWrapper', + 'if': 'HAVE_CHARDEV_PARALLEL' }, 'pipe': 'ChardevHostdevWrapper', 'socket': 'ChardevSocketWrapper', 'udp': 'ChardevUdpWrapper', @@ -624,10 +655,12 @@ 'mux': 'ChardevMuxWrapper', 'msmouse': 'ChardevCommonWrapper', 'wctablet': 'ChardevCommonWrapper', - 'braille': 'ChardevCommonWrapper', + 'braille': { 'type': 'ChardevCommonWrapper', + 'if': 'CONFIG_BRLAPI' }, 'testdev': 'ChardevCommonWrapper', 'stdio': 'ChardevStdioWrapper', - 'console': 'ChardevCommonWrapper', + 'console': { 'type': 'ChardevCommonWrapper', + 'if': 'CONFIG_WIN32' }, 'spicevmc': { 'type': 'ChardevSpiceChannelWrapper', 'if': 'CONFIG_SPICE' }, 'spiceport': { 'type': 'ChardevSpicePortWrapper', @@ -638,7 +671,6 @@ 'if': 'CONFIG_DBUS_DISPLAY' }, 'vc': 'ChardevVCWrapper', 'ringbuf': 'ChardevRingbufWrapper', - # next one is just for compatibility 'memory': 'ChardevRingbufWrapper' } } ## @@ -669,21 +701,21 @@ # # Examples: # -# -> { "execute" : "chardev-add", -# "arguments" : { "id" : "foo", -# "backend" : { "type" : "null", "data" : {} } } } -# <- { "return": {} } +# -> { "execute" : "chardev-add", +# "arguments" : { "id" : "foo", +# "backend" : { "type" : "null", "data" : {} } } } +# <- { "return": {} } # -# -> { "execute" : "chardev-add", -# "arguments" : { "id" : "bar", -# "backend" : { "type" : "file", -# "data" : { "out" : "/tmp/bar.log" } } } } -# <- { "return": {} } +# -> { "execute" : "chardev-add", +# "arguments" : { "id" : "bar", +# "backend" : { "type" : "file", +# "data" : { "out" : "/tmp/bar.log" } } } } +# <- { "return": {} } # -# -> { "execute" : "chardev-add", -# "arguments" : { "id" : "baz", -# "backend" : { "type" : "pty", "data" : {} } } } -# <- { "return": { "pty" : "/dev/pty/42" } } +# -> { "execute" : "chardev-add", +# "arguments" : { "id" : "baz", +# "backend" : { "type" : "pty", "data" : {} } } } +# <- { "return": { "pty" : "/dev/pty/42" } } ## { 'command': 'chardev-add', 'data': { 'id': 'str', @@ -705,26 +737,26 @@ # # Examples: # -# -> { "execute" : "chardev-change", -# "arguments" : { "id" : "baz", -# "backend" : { "type" : "pty", "data" : {} } } } -# <- { "return": { "pty" : "/dev/pty/42" } } -# -# -> {"execute" : "chardev-change", -# "arguments" : { -# "id" : "charchannel2", -# "backend" : { -# "type" : "socket", -# "data" : { -# "addr" : { -# "type" : "unix" , -# "data" : { -# "path" : "/tmp/charchannel2.socket" -# } -# }, -# "server" : true, -# "wait" : false }}}} -# <- {"return": {}} +# -> { "execute" : "chardev-change", +# "arguments" : { "id" : "baz", +# "backend" : { "type" : "pty", "data" : {} } } } +# <- { "return": { "pty" : "/dev/pty/42" } } +# +# -> {"execute" : "chardev-change", +# "arguments" : { +# "id" : "charchannel2", +# "backend" : { +# "type" : "socket", +# "data" : { +# "addr" : { +# "type" : "unix" , +# "data" : { +# "path" : "/tmp/charchannel2.socket" +# } +# }, +# "server" : true, +# "wait" : false }}}} +# <- {"return": {}} ## { 'command': 'chardev-change', 'data': { 'id': 'str', @@ -738,14 +770,12 @@ # # @id: the chardev's ID, must exist and not be in use # -# Returns: Nothing on success -# # Since: 1.4 # # Example: # -# -> { "execute": "chardev-remove", "arguments": { "id" : "foo" } } -# <- { "return": {} } +# -> { "execute": "chardev-remove", "arguments": { "id" : "foo" } } +# <- { "return": {} } ## { 'command': 'chardev-remove', 'data': { 'id': 'str' } } @@ -757,14 +787,12 @@ # # @id: the chardev's ID, must exist # -# Returns: Nothing on success -# # Since: 2.10 # # Example: # -# -> { "execute": "chardev-send-break", "arguments": { "id" : "foo" } } -# <- { "return": {} } +# -> { "execute": "chardev-send-break", "arguments": { "id" : "foo" } } +# <- { "return": {} } ## { 'command': 'chardev-send-break', 'data': { 'id': 'str' } } @@ -784,9 +812,9 @@ # # Example: # -# <- { "event": "VSERPORT_CHANGE", -# "data": { "id": "channel0", "open": true }, -# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } } +# <- { "event": "VSERPORT_CHANGE", +# "data": { "id": "channel0", "open": true }, +# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } } ## { 'event': 'VSERPORT_CHANGE', 'data': { 'id': 'str', diff --git a/qapi/common.json b/qapi/common.json index 6fed9cde1a9..7558ce5430d 100644 --- a/qapi/common.json +++ b/qapi/common.json @@ -51,17 +51,6 @@ { 'enum': 'OnOffSplit', 'data': [ 'on', 'off', 'split' ] } -## -# @String: -# -# A fat type wrapping 'str', to be embedded in lists. -# -# Since: 1.2 -## -{ 'struct': 'String', - 'data': { - 'str': 'str' } } - ## # @StrOrNull: # @@ -118,10 +107,14 @@ # # @16: 16.0GT/s # +# @32: 32.0GT/s (since 9.0) +# +# @64: 64.0GT/s (since 9.0) +# # Since: 4.0 ## { 'enum': 'PCIELinkSpeed', - 'data': [ '2_5', '5', '8', '16' ] } + 'data': [ '2_5', '5', '8', '16', '32', '64' ] } ## # @PCIELinkWidth: diff --git a/qapi/control.json b/qapi/control.json index a91fa334079..6bdbf077c2e 100644 --- a/qapi/control.json +++ b/qapi/control.json @@ -11,8 +11,6 @@ # # Enable QMP capabilities. # -# Arguments: -# # @enable: An optional list of QMPCapability values to enable. The # client must not enable any capability that is not mentioned in # the QMP greeting message. If the field is not provided, it @@ -20,9 +18,9 @@ # # Example: # -# -> { "execute": "qmp_capabilities", -# "arguments": { "enable": [ "oob" ] } } -# <- { "return": {} } +# -> { "execute": "qmp_capabilities", +# "arguments": { "enable": [ "oob" ] } } +# <- { "return": {} } # # Notes: This command is valid exactly when first connecting: it must # be issued before any other command will be accepted, and will @@ -102,17 +100,17 @@ # # Example: # -# -> { "execute": "query-version" } -# <- { -# "return":{ -# "qemu":{ -# "major":0, -# "minor":11, -# "micro":5 -# }, -# "package":"" -# } -# } +# -> { "execute": "query-version" } +# <- { +# "return":{ +# "qemu":{ +# "major":0, +# "minor":11, +# "micro":5 +# }, +# "package":"" +# } +# } ## { 'command': 'query-version', 'returns': 'VersionInfo', 'allow-preconfig': true } @@ -139,17 +137,17 @@ # # Example: # -# -> { "execute": "query-commands" } -# <- { -# "return":[ -# { -# "name":"query-balloon" -# }, -# { -# "name":"system_powerdown" -# } -# ] -# } +# -> { "execute": "query-commands" } +# <- { +# "return":[ +# { +# "name":"query-balloon" +# }, +# { +# "name":"system_powerdown" +# } +# ] +# } # # Note: This example has been shortened as the real response is too # long. @@ -169,8 +167,8 @@ # # Example: # -# -> { "execute": "quit" } -# <- { "return": {} } +# -> { "execute": "quit" } +# <- { "return": {} } ## { 'command': 'quit', 'allow-preconfig': true } diff --git a/qapi/crypto.json b/qapi/crypto.json index fd3d46ebd12..e102be337bb 100644 --- a/qapi/crypto.json +++ b/qapi/crypto.json @@ -48,15 +48,15 @@ # # @sha1: SHA-1. Should not be used in any new code, legacy compat only # -# @sha224: SHA-224. (since 2.7) +# @sha224: SHA-224. (since 2.7) # # @sha256: SHA-256. Current recommended strong hash. # -# @sha384: SHA-384. (since 2.7) +# @sha384: SHA-384. (since 2.7) # -# @sha512: SHA-512. (since 2.7) +# @sha512: SHA-512. (since 2.7) # -# @ripemd160: RIPEMD-160. (since 2.7) +# @ripemd160: RIPEMD-160. (since 2.7) # # Since: 2.6 ## @@ -94,6 +94,8 @@ # # @twofish-256: Twofish with 256 bit / 32 byte keys # +# @sm4: SM4 with 128 bit / 16 byte keys (since 9.0) +# # Since: 2.6 ## { 'enum': 'QCryptoCipherAlgorithm', @@ -102,7 +104,8 @@ 'des', '3des', 'cast5-128', 'serpent-128', 'serpent-192', 'serpent-256', - 'twofish-128', 'twofish-192', 'twofish-256']} + 'twofish-128', 'twofish-192', 'twofish-256', + 'sm4']} ## # @QCryptoCipherMode: @@ -221,7 +224,9 @@ # 'sha256' # # @iter-time: number of milliseconds to spend in PBKDF passphrase -# processing. Currently defaults to 2000. (since 2.8) +# processing. Currently defaults to 2000. (since 2.8) +# +# @detached-header: create a detached LUKS header. (since 9.0) # # Since: 2.6 ## @@ -232,7 +237,8 @@ '*ivgen-alg': 'QCryptoIVGenAlgorithm', '*ivgen-hash-alg': 'QCryptoHashAlgorithm', '*hash-alg': 'QCryptoHashAlgorithm', - '*iter-time': 'int'}} + '*iter-time': 'int', + '*detached-header': 'bool'}} ## # @QCryptoBlockOpenOptions: @@ -311,6 +317,8 @@ # # @hash-alg: the master key hash algorithm # +# @detached-header: whether the LUKS header is detached (Since 9.0) +# # @payload-offset: offset to the payload data in bytes # # @master-key-iters: number of PBKDF2 iterations for key material @@ -327,6 +335,7 @@ 'ivgen-alg': 'QCryptoIVGenAlgorithm', '*ivgen-hash-alg': 'QCryptoHashAlgorithm', 'hash-alg': 'QCryptoHashAlgorithm', + 'detached-header': 'bool', 'payload-offset': 'int', 'master-key-iters': 'int', 'uuid': 'str', @@ -645,6 +654,8 @@ # The options that are available for all asymmetric key algorithms # when creating a new QCryptoAkCipher. # +# @alg: encryption cipher algorithm +# # Since: 7.1 ## { 'union': 'QCryptoAkCipherOptions', diff --git a/qapi/cxl.json b/qapi/cxl.json index 8cc4c72fa94..4281726deca 100644 --- a/qapi/cxl.json +++ b/qapi/cxl.json @@ -144,8 +144,8 @@ # @cxl-inject-memory-module-event: # # Inject an event record for a Memory Module Event (CXL r3.0 -# 8.2.9.2.1.3). This event includes a copy of the Device Health -# info at the time of the event. +# 8.2.9.2.1.3). This event includes a copy of the Device Health info +# at the time of the event. # # @path: CXL type 3 device canonical QOM path # diff --git a/qapi/dump.json b/qapi/dump.json index 5cbc237ad91..2fa9504d864 100644 --- a/qapi/dump.json +++ b/qapi/dump.json @@ -15,20 +15,20 @@ # # @elf: elf format # -# @kdump-zlib: makedumpfile flattened, kdump-compressed format with zlib -# compression +# @kdump-zlib: makedumpfile flattened, kdump-compressed format with +# zlib compression # # @kdump-lzo: makedumpfile flattened, kdump-compressed format with lzo # compression # -# @kdump-snappy: makedumpfile flattened, kdump-compressed format with snappy -# compression +# @kdump-snappy: makedumpfile flattened, kdump-compressed format with +# snappy compression # -# @kdump-raw-zlib: raw assembled kdump-compressed format with zlib compression -# (since 8.2) +# @kdump-raw-zlib: raw assembled kdump-compressed format with zlib +# compression (since 8.2) # -# @kdump-raw-lzo: raw assembled kdump-compressed format with lzo compression -# (since 8.2) +# @kdump-raw-lzo: raw assembled kdump-compressed format with lzo +# compression (since 8.2) # # @kdump-raw-snappy: raw assembled kdump-compressed format with snappy # compression (since 8.2) @@ -77,7 +77,7 @@ # # @detach: if true, QMP will return immediately rather than waiting # for the dump to finish. The user can track progress using -# "query-dump". (since 2.6). +# "query-dump". (since 2.6). # # @begin: if specified, the starting physical address. # @@ -92,15 +92,13 @@ # # Note: All boolean arguments default to false # -# Returns: nothing on success -# # Since: 1.2 # # Example: # -# -> { "execute": "dump-guest-memory", -# "arguments": { "paging": false, "protocol": "fd:dump" } } -# <- { "return": {} } +# -> { "execute": "dump-guest-memory", +# "arguments": { "paging": false, "protocol": "fd:dump" } } +# <- { "return": {} } ## { 'command': 'dump-guest-memory', 'data': { 'paging': 'bool', 'protocol': 'str', '*detach': 'bool', @@ -154,9 +152,9 @@ # # Example: # -# -> { "execute": "query-dump" } -# <- { "return": { "status": "active", "completed": 1024000, -# "total": 2048000 } } +# -> { "execute": "query-dump" } +# <- { "return": { "status": "active", "completed": 1024000, +# "total": 2048000 } } ## { 'command': 'query-dump', 'returns': 'DumpQueryResult' } @@ -175,10 +173,10 @@ # # Example: # -# <- { "event": "DUMP_COMPLETED", -# "data": { "result": { "total": 1090650112, "status": "completed", -# "completed": 1090650112 } }, -# "timestamp": { "seconds": 1648244171, "microseconds": 950316 } } +# <- { "event": "DUMP_COMPLETED", +# "data": { "result": { "total": 1090650112, "status": "completed", +# "completed": 1090650112 } }, +# "timestamp": { "seconds": 1648244171, "microseconds": 950316 } } ## { 'event': 'DUMP_COMPLETED' , 'data': { 'result': 'DumpQueryResult', '*error': 'str' } } @@ -186,7 +184,7 @@ ## # @DumpGuestMemoryCapability: # -# A list of the available formats for dump-guest-memory +# @formats: the available formats for dump-guest-memory # # Since: 2.0 ## @@ -206,9 +204,9 @@ # # Example: # -# -> { "execute": "query-dump-guest-memory-capability" } -# <- { "return": { "formats": -# ["elf", "kdump-zlib", "kdump-lzo", "kdump-snappy"] } } +# -> { "execute": "query-dump-guest-memory-capability" } +# <- { "return": { "formats": +# ["elf", "kdump-zlib", "kdump-lzo", "kdump-snappy"] } } ## { 'command': 'query-dump-guest-memory-capability', 'returns': 'DumpGuestMemoryCapability' } diff --git a/qapi/ebpf.json b/qapi/ebpf.json new file mode 100644 index 00000000000..e500b5a744e --- /dev/null +++ b/qapi/ebpf.json @@ -0,0 +1,64 @@ +# -*- Mode: Python -*- +# vim: filetype=python +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. + +## +# = eBPF Objects +# +# eBPF object is an ELF binary that contains the eBPF program and eBPF +# map description(BTF). Overall, eBPF object should contain the +# program and enough metadata to create/load eBPF with libbpf. As the +# eBPF maps/program should correspond to QEMU, the eBPF can't be used +# from different QEMU build. +# +# Currently, there is a possible eBPF for receive-side scaling (RSS). +## + +## +# @EbpfObject: +# +# An eBPF ELF object. +# +# @object: the eBPF object encoded in base64 +# +# Since: 9.0 +## +{ 'struct': 'EbpfObject', + 'data': {'object': 'str'}, + 'if': 'CONFIG_EBPF' } + +## +# @EbpfProgramID: +# +# The eBPF programs that can be gotten with request-ebpf. +# +# @rss: Receive side scaling, technology that allows steering traffic +# between queues by calculation hash. Users may set up +# indirection table and hash/packet types configurations. Used +# with virtio-net. +# +# Since: 9.0 +## +{ 'enum': 'EbpfProgramID', + 'if': 'CONFIG_EBPF', + 'data': [ { 'name': 'rss' } ] } + +## +# @request-ebpf: +# +# Retrieve an eBPF object that can be loaded with libbpf. Management +# applications (e.g. libvirt) may load it and pass file descriptors to +# QEMU, so they can run running QEMU without BPF capabilities. +# +# @id: The ID of the program to return. +# +# Returns: eBPF object encoded in base64. +# +# Since: 9.0 +## +{ 'command': 'request-ebpf', + 'data': { 'id': 'EbpfProgramID' }, + 'returns': 'EbpfObject', + 'if': 'CONFIG_EBPF' } diff --git a/qapi/introspect.json b/qapi/introspect.json index 9173e60fdd0..b041b02ba8c 100644 --- a/qapi/introspect.json +++ b/qapi/introspect.json @@ -93,8 +93,6 @@ # particular order. (since 4.1 for object types, 4.2 for # commands, 5.0 for the rest) # -# Additional members depend on the value of @meta-type. -# # Since: 2.5 ## { 'union': 'SchemaInfo', @@ -261,7 +259,7 @@ # # @members: the alternate type's members, in no particular order. The # members' wire encoding is distinct, see -# docs/devel/qapi-code-gen.txt section Alternate types. +# :doc:`/devel/qapi-code-gen` section Alternate types. # # On the wire, this can be any of the members. # diff --git a/qapi/machine-target.json b/qapi/machine-target.json index 7b7149f81c6..29e695aa06c 100644 --- a/qapi/machine-target.json +++ b/qapi/machine-target.json @@ -124,11 +124,12 @@ ## # @query-cpu-model-comparison: # -# Compares two CPU models, returning how they compare in a specific -# configuration. The results indicates how both models compare -# regarding runnability. This result can be used by tooling to make -# decisions if a certain CPU model will run in a certain configuration -# or if a compatible CPU model has to be created by baselining. +# Compares two CPU models, @modela and @modelb, returning how they +# compare in a specific configuration. The results indicates how +# both models compare regarding runnability. This result can be +# used by tooling to make decisions if a certain CPU model will +# run in a certain configuration or if a compatible CPU model has +# to be created by baselining. # # Usually, a CPU model is compared against the maximum possible CPU # model of a certain configuration (e.g. the "host" model for KVM). @@ -154,10 +155,20 @@ # Some architectures may not support comparing CPU models. s390x # supports comparing CPU models. # -# Returns: a CpuModelBaselineInfo. Returns an error if comparing CPU -# models is not supported, if a model cannot be used, if a model -# contains an unknown cpu definition name, unknown properties or -# properties with wrong types. +# @modela: description of the first CPU model to compare, referred to as +# "model A" in CpuModelCompareResult +# +# @modelb: description of the second CPU model to compare, referred to as +# "model B" in CpuModelCompareResult +# +# Returns: a CpuModelCompareInfo describing how both CPU models +# compare +# +# Errors: +# - if comparing CPU models is not supported +# - if a model cannot be used +# - if a model contains an unknown cpu definition name, unknown +# properties or properties with wrong types. # # Note: this command isn't specific to s390x, but is only implemented # on this architecture currently. @@ -172,9 +183,9 @@ ## # @query-cpu-model-baseline: # -# Baseline two CPU models, creating a compatible third model. The -# created model will always be a static, migration-safe CPU model (see -# "static" CPU model expansion for details). +# Baseline two CPU models, @modela and @modelb, creating a compatible +# third model. The created model will always be a static, +# migration-safe CPU model (see "static" CPU model expansion for details). # # This interface can be used by tooling to create a compatible CPU # model out two CPU models. The created CPU model will be identical @@ -201,10 +212,17 @@ # Some architectures may not support baselining CPU models. s390x # supports baselining CPU models. # -# Returns: a CpuModelBaselineInfo. Returns an error if baselining CPU -# models is not supported, if a model cannot be used, if a model -# contains an unknown cpu definition name, unknown properties or -# properties with wrong types. +# @modela: description of the first CPU model to baseline +# +# @modelb: description of the second CPU model to baseline +# +# Returns: a CpuModelBaselineInfo describing the baselined CPU model +# +# Errors: +# - if baselining CPU models is not supported +# - if a model cannot be used +# - if a model contains an unknown cpu definition name, unknown +# properties or properties with wrong types. # # Note: this command isn't specific to s390x, but is only implemented # on this architecture currently. @@ -237,10 +255,10 @@ ## # @query-cpu-model-expansion: # -# Expands a given CPU model (or a combination of CPU model + -# additional options) to different granularities, allowing tooling to -# get an understanding what a specific CPU model looks like in QEMU -# under a certain configuration. +# Expands a given CPU model, @model, (or a combination of CPU model + +# additional options) to different granularities, specified by +# @type, allowing tooling to get an understanding what a specific +# CPU model looks like in QEMU under a certain configuration. # # This interface can be used to query the "host" CPU model. # @@ -263,11 +281,18 @@ # Some architectures may not support all expansion types. s390x # supports "full" and "static". Arm only supports "full". # -# Returns: a CpuModelExpansionInfo. Returns an error if expanding CPU -# models is not supported, if the model cannot be expanded, if the -# model contains an unknown CPU definition name, unknown -# properties or properties with a wrong type. Also returns an -# error if an expansion type is not supported. +# @model: description of the CPU model to expand +# +# @type: expansion type, specifying how to expand the CPU model +# +# Returns: a CpuModelExpansionInfo describing the expanded CPU model +# +# Errors: +# - if expanding CPU models is not supported +# - if the model cannot be expanded +# - if the model contains an unknown CPU definition name, unknown +# properties or properties with a wrong type +# - if an expansion type is not supported # # Since: 2.8 ## @@ -385,9 +410,9 @@ ## # @set-cpu-topology: # -# Modify the topology by moving the CPU inside the topology tree, -# or by changing a modifier attribute of a CPU. -# Absent values will not be modified. +# Modify the topology by moving the CPU inside the topology tree, or +# by changing a modifier attribute of a CPU. Absent values will not +# be modified. # # @core-id: the vCPU ID to be moved # @@ -399,14 +424,13 @@ # # @entitlement: entitlement to set # -# @dedicated: whether the provisioning of real to virtual CPU is dedicated +# @dedicated: whether the provisioning of real to virtual CPU is +# dedicated # # Features: # # @unstable: This command is experimental. # -# Returns: Nothing on success. -# # Since: 8.2 ## { 'command': 'set-cpu-topology', @@ -428,14 +452,15 @@ # Emitted when the guest asks to change the polarization. # # The guest can tell the host (via the PTF instruction) whether the -# CPUs should be provisioned using horizontal or vertical polarization. +# CPUs should be provisioned using horizontal or vertical +# polarization. # -# On horizontal polarization the host is expected to provision all vCPUs -# equally. +# On horizontal polarization the host is expected to provision all +# vCPUs equally. # -# On vertical polarization the host can provision each vCPU differently. -# The guest will get information on the details of the provisioning -# the next time it uses the STSI(15) instruction. +# On vertical polarization the host can provision each vCPU +# differently. The guest will get information on the details of the +# provisioning the next time it uses the STSI(15) instruction. # # @polarization: polarization specified by the guest # @@ -447,9 +472,9 @@ # # Example: # -# <- { "event": "CPU_POLARIZATION_CHANGE", -# "data": { "polarization": "horizontal" }, -# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } } +# <- { "event": "CPU_POLARIZATION_CHANGE", +# "data": { "polarization": "horizontal" }, +# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } } ## { 'event': 'CPU_POLARIZATION_CHANGE', 'data': { 'polarization': 'CpuS390Polarization' }, diff --git a/qapi/machine.json b/qapi/machine.json index b6d634b30d5..e8b60641f23 100644 --- a/qapi/machine.json +++ b/qapi/machine.json @@ -106,32 +106,32 @@ # # Example: # -# -> { "execute": "query-cpus-fast" } -# <- { "return": [ -# { -# "thread-id": 25627, -# "props": { -# "core-id": 0, -# "thread-id": 0, -# "socket-id": 0 +# -> { "execute": "query-cpus-fast" } +# <- { "return": [ +# { +# "thread-id": 25627, +# "props": { +# "core-id": 0, +# "thread-id": 0, +# "socket-id": 0 +# }, +# "qom-path": "/machine/unattached/device[0]", +# "target":"x86_64", +# "cpu-index": 0 # }, -# "qom-path": "/machine/unattached/device[0]", -# "target":"x86_64", -# "cpu-index": 0 -# }, -# { -# "thread-id": 25628, -# "props": { -# "core-id": 0, -# "thread-id": 0, -# "socket-id": 1 -# }, -# "qom-path": "/machine/unattached/device[2]", -# "target":"x86_64", -# "cpu-index": 1 -# } -# ] -# } +# { +# "thread-id": 25628, +# "props": { +# "core-id": 0, +# "thread-id": 0, +# "socket-id": 1 +# }, +# "qom-path": "/machine/unattached/device[2]", +# "target":"x86_64", +# "cpu-index": 1 +# } +# ] +# } ## { 'command': 'query-cpus-fast', 'returns': [ 'CpuInfoFast' ] } @@ -258,8 +258,8 @@ # # Example: # -# -> { "execute": "query-uuid" } -# <- { "return": { "UUID": "550e8400-e29b-41d4-a716-446655440000" } } +# -> { "execute": "query-uuid" } +# <- { "return": { "UUID": "550e8400-e29b-41d4-a716-446655440000" } } ## { 'command': 'query-uuid', 'returns': 'UuidInfo', 'allow-preconfig': true } @@ -292,8 +292,8 @@ # # Example: # -# -> { "execute": "system_reset" } -# <- { "return": {} } +# -> { "execute": "system_reset" } +# <- { "return": {} } ## { 'command': 'system_reset' } @@ -311,8 +311,8 @@ # # Example: # -# -> { "execute": "system_powerdown" } -# <- { "return": {} } +# -> { "execute": "system_powerdown" } +# <- { "return": {} } ## { 'command': 'system_powerdown' } @@ -326,15 +326,13 @@ # # Since: 1.1 # -# Returns: nothing. -# # Note: prior to 4.0, this command does nothing in case the guest # isn't suspended. # # Example: # -# -> { "execute": "system_wakeup" } -# <- { "return": {} } +# -> { "execute": "system_wakeup" } +# <- { "return": {} } ## { 'command': 'system_wakeup' } @@ -377,8 +375,6 @@ # all CPUs (ppc64). The command fails when the guest doesn't support # injecting. # -# Returns: If successful, nothing -# # Since: 0.14 # # Note: prior to 2.1, this command was only supported for x86 and s390 @@ -386,8 +382,8 @@ # # Example: # -# -> { "execute": "inject-nmi" } -# <- { "return": {} } +# -> { "execute": "inject-nmi" } +# <- { "return": {} } ## { 'command': 'inject-nmi' } @@ -415,8 +411,8 @@ # # Example: # -# -> { "execute": "query-kvm" } -# <- { "return": { "enabled": true, "present": true } } +# -> { "execute": "query-kvm" } +# <- { "return": { "enabled": true, "present": true } } ## { 'command': 'query-kvm', 'returns': 'KvmInfo' } @@ -443,6 +439,8 @@ # # A discriminated record of NUMA options. (for OptsVisitor) # +# @type: NUMA option type +# # Since: 2.1 ## { 'union': 'NumaOptions', @@ -776,19 +774,17 @@ # @cpu-index: the index of the virtual CPU to use for translating the # virtual address (defaults to CPU 0) # -# Returns: Nothing on success -# # Since: 0.14 # # Notes: Errors were not reliably returned until 1.1 # # Example: # -# -> { "execute": "memsave", -# "arguments": { "val": 10, -# "size": 100, -# "filename": "/tmp/virtual-mem-dump" } } -# <- { "return": {} } +# -> { "execute": "memsave", +# "arguments": { "val": 10, +# "size": 100, +# "filename": "/tmp/virtual-mem-dump" } } +# <- { "return": {} } ## { 'command': 'memsave', 'data': {'val': 'int', 'size': 'int', 'filename': 'str', '*cpu-index': 'int'} } @@ -804,19 +800,17 @@ # # @filename: the file to save the memory to as binary data # -# Returns: Nothing on success -# # Since: 0.14 # # Notes: Errors were not reliably returned until 1.1 # # Example: # -# -> { "execute": "pmemsave", -# "arguments": { "val": 10, -# "size": 100, -# "filename": "/tmp/physical-mem-dump" } } -# <- { "return": {} } +# -> { "execute": "pmemsave", +# "arguments": { "val": 10, +# "size": 100, +# "filename": "/tmp/physical-mem-dump" } } +# <- { "return": {} } ## { 'command': 'pmemsave', 'data': {'val': 'int', 'size': 'int', 'filename': 'str'} } @@ -873,29 +867,29 @@ # # Example: # -# -> { "execute": "query-memdev" } -# <- { "return": [ -# { -# "id": "mem1", -# "size": 536870912, -# "merge": false, -# "dump": true, -# "prealloc": false, -# "share": false, -# "host-nodes": [0, 1], -# "policy": "bind" -# }, -# { -# "size": 536870912, -# "merge": false, -# "dump": true, -# "prealloc": true, -# "share": false, -# "host-nodes": [2, 3], -# "policy": "preferred" +# -> { "execute": "query-memdev" } +# <- { "return": [ +# { +# "id": "mem1", +# "size": 536870912, +# "merge": false, +# "dump": true, +# "prealloc": false, +# "share": false, +# "host-nodes": [0, 1], +# "policy": "bind" +# }, +# { +# "size": 536870912, +# "merge": false, +# "dump": true, +# "prealloc": true, +# "share": false, +# "host-nodes": [2, 3], +# "policy": "preferred" +# } +# ] # } -# ] -# } ## { 'command': 'query-memdev', 'returns': ['Memdev'], 'allow-preconfig': true } @@ -926,13 +920,12 @@ # @socket-id: socket number within parent container the CPU belongs to # # @die-id: die number within the parent container the CPU belongs to -# (since 4.1) +# (since 4.1) # # @cluster-id: cluster number within the parent container the CPU # belongs to (since 7.1) # -# @core-id: core number within the parent container the CPU -# belongs to +# @core-id: core number within the parent container the CPU belongs to # # @thread-id: thread number within the core the CPU belongs to # @@ -988,47 +981,47 @@ # # Examples: # -# For pseries machine type started with -smp 2,cores=2,maxcpus=4 -cpu -# POWER8: -# -# -> { "execute": "query-hotpluggable-cpus" } -# <- {"return": [ -# { "props": { "core-id": 8 }, "type": "POWER8-spapr-cpu-core", -# "vcpus-count": 1 }, -# { "props": { "core-id": 0 }, "type": "POWER8-spapr-cpu-core", -# "vcpus-count": 1, "qom-path": "/machine/unattached/device[0]"} -# ]}' -# -# For pc machine type started with -smp 1,maxcpus=2: -# -# -> { "execute": "query-hotpluggable-cpus" } -# <- {"return": [ -# { -# "type": "qemu64-x86_64-cpu", "vcpus-count": 1, -# "props": {"core-id": 0, "socket-id": 1, "thread-id": 0} -# }, -# { -# "qom-path": "/machine/unattached/device[0]", -# "type": "qemu64-x86_64-cpu", "vcpus-count": 1, -# "props": {"core-id": 0, "socket-id": 0, "thread-id": 0} -# } -# ]} -# -# For s390x-virtio-ccw machine type started with -smp 1,maxcpus=2 -cpu -# qemu (Since: 2.11): -# -# -> { "execute": "query-hotpluggable-cpus" } -# <- {"return": [ -# { -# "type": "qemu-s390x-cpu", "vcpus-count": 1, -# "props": { "core-id": 1 } -# }, -# { -# "qom-path": "/machine/unattached/device[0]", -# "type": "qemu-s390x-cpu", "vcpus-count": 1, -# "props": { "core-id": 0 } -# } -# ]} +# For pseries machine type started with -smp 2,cores=2,maxcpus=4 +# -cpu POWER8: +# +# -> { "execute": "query-hotpluggable-cpus" } +# <- {"return": [ +# { "props": { "core-id": 8 }, "type": "POWER8-spapr-cpu-core", +# "vcpus-count": 1 }, +# { "props": { "core-id": 0 }, "type": "POWER8-spapr-cpu-core", +# "vcpus-count": 1, "qom-path": "/machine/unattached/device[0]"} +# ]}' +# +# For pc machine type started with -smp 1,maxcpus=2: +# +# -> { "execute": "query-hotpluggable-cpus" } +# <- {"return": [ +# { +# "type": "qemu64-x86_64-cpu", "vcpus-count": 1, +# "props": {"core-id": 0, "socket-id": 1, "thread-id": 0} +# }, +# { +# "qom-path": "/machine/unattached/device[0]", +# "type": "qemu64-x86_64-cpu", "vcpus-count": 1, +# "props": {"core-id": 0, "socket-id": 0, "thread-id": 0} +# } +# ]} +# +# For s390x-virtio-ccw machine type started with -smp 1,maxcpus=2 +# -cpu qemu (Since: 2.11): +# +# -> { "execute": "query-hotpluggable-cpus" } +# <- {"return": [ +# { +# "type": "qemu-s390x-cpu", "vcpus-count": 1, +# "props": { "core-id": 1 } +# }, +# { +# "qom-path": "/machine/unattached/device[0]", +# "type": "qemu-s390x-cpu", "vcpus-count": 1, +# "props": { "core-id": 0 } +# } +# ]} ## { 'command': 'query-hotpluggable-cpus', 'returns': ['HotpluggableCPU'], 'allow-preconfig': true } @@ -1058,11 +1051,10 @@ # # From it we have: balloon_size = vm_ram_size - @value # -# Returns: -# - Nothing on success -# - If the balloon driver is enabled but not functional because the -# KVM kernel module cannot support it, KVMMissingCap -# - If no balloon device is present, DeviceNotActive +# Errors: +# - If the balloon driver is enabled but not functional because +# the KVM kernel module cannot support it, KVMMissingCap +# - If no balloon device is present, DeviceNotActive # # Notes: This command just issues a request to the guest. When it # returns, the balloon size may not have changed. A guest can @@ -1072,10 +1064,10 @@ # # Example: # -# -> { "execute": "balloon", "arguments": { "value": 536870912 } } -# <- { "return": {} } +# -> { "execute": "balloon", "arguments": { "value": 536870912 } } +# <- { "return": {} } # -# With a 2.5GiB guest this command inflated the ballon to 3GiB. +# With a 2.5GiB guest this command inflated the ballon to 3GiB. ## { 'command': 'balloon', 'data': {'value': 'int'} } @@ -1097,20 +1089,22 @@ # Return information about the balloon device. # # Returns: -# - @BalloonInfo on success -# - If the balloon driver is enabled but not functional because the -# KVM kernel module cannot support it, KVMMissingCap -# - If no balloon device is present, DeviceNotActive +# @BalloonInfo +# +# Errors: +# - If the balloon driver is enabled but not functional because +# the KVM kernel module cannot support it, KVMMissingCap +# - If no balloon device is present, DeviceNotActive # # Since: 0.14 # # Example: # -# -> { "execute": "query-balloon" } -# <- { "return": { -# "actual": 1073741824 -# } -# } +# -> { "execute": "query-balloon" } +# <- { "return": { +# "actual": 1073741824 +# } +# } ## { 'command': 'query-balloon', 'returns': 'BalloonInfo' } @@ -1130,9 +1124,9 @@ # # Example: # -# <- { "event": "BALLOON_CHANGE", -# "data": { "actual": 944766976 }, -# "timestamp": { "seconds": 1267020223, "microseconds": 435656 } } +# <- { "event": "BALLOON_CHANGE", +# "data": { "actual": 944766976 }, +# "timestamp": { "seconds": 1267020223, "microseconds": 435656 } } ## { 'event': 'BALLOON_CHANGE', 'data': { 'actual': 'int' } } @@ -1157,25 +1151,27 @@ ## # @query-hv-balloon-status-report: # -# Returns the hv-balloon driver data contained in the last received "STATUS" -# message from the guest. +# Returns the hv-balloon driver data contained in the last received +# "STATUS" message from the guest. # # Returns: -# - @HvBalloonInfo on success -# - If no hv-balloon device is present, guest memory status reporting -# is not enabled or no guest memory status report received yet, -# GenericError +# @HvBalloonInfo +# +# Errors: +# - If no hv-balloon device is present, guest memory status +# reporting is not enabled or no guest memory status report +# received yet, GenericError # # Since: 8.2 # # Example: # -# -> { "execute": "query-hv-balloon-status-report" } -# <- { "return": { -# "committed": 816640000, -# "available": 3333054464 -# } -# } +# -> { "execute": "query-hv-balloon-status-report" } +# <- { "return": { +# "committed": 816640000, +# "available": 3333054464 +# } +# } ## { 'command': 'query-hv-balloon-status-report', 'returns': 'HvBalloonInfo' } @@ -1191,10 +1187,9 @@ # # Example: # -# <- { "event": "HV_BALLOON_STATUS_REPORT", -# "data": { "committed": 816640000, "available": 3333054464 }, -# "timestamp": { "seconds": 1600295492, "microseconds": 661044 } } -# +# <- { "event": "HV_BALLOON_STATUS_REPORT", +# "data": { "committed": 816640000, "available": 3333054464 }, +# "timestamp": { "seconds": 1600295492, "microseconds": 661044 } } ## { 'event': 'HV_BALLOON_STATUS_REPORT', 'data': 'HvBalloonInfo' } @@ -1224,8 +1219,8 @@ # # Example: # -# -> { "execute": "query-memory-size-summary" } -# <- { "return": { "base-memory": 4294967296, "plugged-memory": 0 } } +# -> { "execute": "query-memory-size-summary" } +# <- { "return": { "base-memory": 4294967296, "plugged-memory": 0 } } # # Since: 2.11 ## @@ -1396,6 +1391,8 @@ ## # @PCDIMMDeviceInfoWrapper: # +# @data: PCDIMMDevice state information +# # Since: 2.1 ## { 'struct': 'PCDIMMDeviceInfoWrapper', @@ -1404,6 +1401,8 @@ ## # @VirtioPMEMDeviceInfoWrapper: # +# @data: VirtioPMEM state information +# # Since: 2.1 ## { 'struct': 'VirtioPMEMDeviceInfoWrapper', @@ -1412,6 +1411,8 @@ ## # @VirtioMEMDeviceInfoWrapper: # +# @data: VirtioMEMDevice state information +# # Since: 2.1 ## { 'struct': 'VirtioMEMDeviceInfoWrapper', @@ -1420,6 +1421,8 @@ ## # @SgxEPCDeviceInfoWrapper: # +# @data: Sgx EPC state information +# # Since: 6.2 ## { 'struct': 'SgxEPCDeviceInfoWrapper', @@ -1428,6 +1431,8 @@ ## # @HvBalloonDeviceInfoWrapper: # +# @data: hv-balloon provided memory state information +# # Since: 8.2 ## { 'struct': 'HvBalloonDeviceInfoWrapper', @@ -1438,6 +1443,8 @@ # # Union containing information about a memory device # +# @type: memory device type +# # Since: 2.1 ## { 'union': 'MemoryDeviceInfo', @@ -1491,18 +1498,18 @@ # # Example: # -# -> { "execute": "query-memory-devices" } -# <- { "return": [ { "data": -# { "addr": 5368709120, -# "hotpluggable": true, -# "hotplugged": true, -# "id": "d1", -# "memdev": "/objects/memX", -# "node": 0, -# "size": 1073741824, -# "slot": 0}, -# "type": "dimm" -# } ] } +# -> { "execute": "query-memory-devices" } +# <- { "return": [ { "data": +# { "addr": 5368709120, +# "hotpluggable": true, +# "hotplugged": true, +# "id": "d1", +# "memdev": "/objects/memX", +# "node": 0, +# "size": 1073741824, +# "slot": 0}, +# "type": "dimm" +# } ] } ## { 'command': 'query-memory-devices', 'returns': ['MemoryDeviceInfo'] } @@ -1525,10 +1532,10 @@ # # Example: # -# <- { "event": "MEMORY_DEVICE_SIZE_CHANGE", -# "data": { "id": "vm0", "size": 1073741824, -# "qom-path": "/machine/unattached/device[2]" }, -# "timestamp": { "seconds": 1588168529, "microseconds": 201316 } } +# <- { "event": "MEMORY_DEVICE_SIZE_CHANGE", +# "data": { "id": "vm0", "size": 1073741824, +# "qom-path": "/machine/unattached/device[2]" }, +# "timestamp": { "seconds": 1588168529, "microseconds": 201316 } } ## { 'event': 'MEMORY_DEVICE_SIZE_CHANGE', 'data': { '*id': 'str', 'size': 'size', 'qom-path' : 'str'} } @@ -1551,11 +1558,11 @@ # # Example: # -# <- { "event": "MEM_UNPLUG_ERROR", -# "data": { "device": "dimm1", -# "msg": "acpi: device unplug for unsupported device" -# }, -# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# <- { "event": "MEM_UNPLUG_ERROR", +# "data": { "device": "dimm1", +# "msg": "acpi: device unplug for unsupported device" +# }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'MEM_UNPLUG_ERROR', 'data': { 'device': 'str', 'msg': 'str' }, @@ -1788,10 +1795,13 @@ # # @64: SMBIOS version 3.0 (64-bit) Entry Point # +# @auto: Either 2.x or 3.x SMBIOS version, 2.x if configuration can be +# described by it and 3.x otherwise (since: 9.0) +# # Since: 7.0 ## { 'enum': 'SmbiosEntryPointType', - 'data': [ '32', '64' ] } + 'data': [ '32', '64', 'auto' ] } ## # @MemorySizeConfiguration: @@ -1822,9 +1832,9 @@ # # Example: # -# -> { "execute": "dumpdtb" } -# "arguments": { "filename": "fdt.dtb" } } -# <- { "return": {} } +# -> { "execute": "dumpdtb" } +# "arguments": { "filename": "fdt.dtb" } } +# <- { "return": {} } ## { 'command': 'dumpdtb', 'data': { 'filename': 'str' }, diff --git a/qapi/meson.build b/qapi/meson.build index f81a37565ca..375d564277c 100644 --- a/qapi/meson.build +++ b/qapi/meson.build @@ -33,6 +33,7 @@ qapi_all_modules = [ 'crypto', 'cxl', 'dump', + 'ebpf', 'error', 'introspect', 'job', diff --git a/qapi/migration.json b/qapi/migration.json index 197d3faa43f..8c65b903288 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -23,8 +23,8 @@ # # @duplicate: number of duplicate (zero) pages (since 1.2) # -# @skipped: number of skipped zero pages. Always zero, only provided for -# compatibility (since 1.5) +# @skipped: number of skipped zero pages. Always zero, only provided +# for compatibility (since 1.5) # # @normal: number of normal pages (since 1.2) # @@ -68,7 +68,6 @@ # @deprecated: Member @skipped is always zero since 1.5.3 # # Since: 0.14 -# ## { 'struct': 'MigrationStats', 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , @@ -230,7 +229,7 @@ # throttled during auto-converge. This is only present when # auto-converge has started throttling guest cpus. (Since 2.7) # -# @error-desc: the human readable error description string. Clients +# @error-desc: the human readable error description string. Clients # should not attempt to parse the error strings. (Since 2.7) # # @postcopy-blocktime: total time when all vCPU were blocked during @@ -312,112 +311,112 @@ # # Examples: # -# 1. Before the first migration -# -# -> { "execute": "query-migrate" } -# <- { "return": {} } -# -# 2. Migration is done and has succeeded -# -# -> { "execute": "query-migrate" } -# <- { "return": { -# "status": "completed", -# "total-time":12345, -# "setup-time":12345, -# "downtime":12345, -# "ram":{ -# "transferred":123, -# "remaining":123, -# "total":246, -# "duplicate":123, -# "normal":123, -# "normal-bytes":123456, -# "dirty-sync-count":15 -# } -# } -# } -# -# 3. Migration is done and has failed -# -# -> { "execute": "query-migrate" } -# <- { "return": { "status": "failed" } } -# -# 4. Migration is being performed and is not a block migration: -# -# -> { "execute": "query-migrate" } -# <- { -# "return":{ -# "status":"active", -# "total-time":12345, -# "setup-time":12345, -# "expected-downtime":12345, -# "ram":{ -# "transferred":123, -# "remaining":123, -# "total":246, -# "duplicate":123, -# "normal":123, -# "normal-bytes":123456, -# "dirty-sync-count":15 -# } -# } -# } -# -# 5. Migration is being performed and is a block migration: -# -# -> { "execute": "query-migrate" } -# <- { -# "return":{ -# "status":"active", -# "total-time":12345, -# "setup-time":12345, -# "expected-downtime":12345, -# "ram":{ -# "total":1057024, -# "remaining":1053304, -# "transferred":3720, -# "duplicate":123, -# "normal":123, -# "normal-bytes":123456, -# "dirty-sync-count":15 -# }, -# "disk":{ -# "total":20971520, -# "remaining":20880384, -# "transferred":91136 -# } -# } -# } -# -# 6. Migration is being performed and XBZRLE is active: -# -# -> { "execute": "query-migrate" } -# <- { -# "return":{ -# "status":"active", -# "total-time":12345, -# "setup-time":12345, -# "expected-downtime":12345, -# "ram":{ -# "total":1057024, -# "remaining":1053304, -# "transferred":3720, -# "duplicate":10, -# "normal":3333, -# "normal-bytes":3412992, -# "dirty-sync-count":15 -# }, -# "xbzrle-cache":{ -# "cache-size":67108864, -# "bytes":20971520, -# "pages":2444343, -# "cache-miss":2244, -# "cache-miss-rate":0.123, -# "encoding-rate":80.1, -# "overflow":34434 +# 1. Before the first migration +# +# -> { "execute": "query-migrate" } +# <- { "return": {} } +# +# 2. Migration is done and has succeeded +# +# -> { "execute": "query-migrate" } +# <- { "return": { +# "status": "completed", +# "total-time":12345, +# "setup-time":12345, +# "downtime":12345, +# "ram":{ +# "transferred":123, +# "remaining":123, +# "total":246, +# "duplicate":123, +# "normal":123, +# "normal-bytes":123456, +# "dirty-sync-count":15 +# } # } -# } -# } +# } +# +# 3. Migration is done and has failed +# +# -> { "execute": "query-migrate" } +# <- { "return": { "status": "failed" } } +# +# 4. Migration is being performed and is not a block migration: +# +# -> { "execute": "query-migrate" } +# <- { +# "return":{ +# "status":"active", +# "total-time":12345, +# "setup-time":12345, +# "expected-downtime":12345, +# "ram":{ +# "transferred":123, +# "remaining":123, +# "total":246, +# "duplicate":123, +# "normal":123, +# "normal-bytes":123456, +# "dirty-sync-count":15 +# } +# } +# } +# +# 5. Migration is being performed and is a block migration: +# +# -> { "execute": "query-migrate" } +# <- { +# "return":{ +# "status":"active", +# "total-time":12345, +# "setup-time":12345, +# "expected-downtime":12345, +# "ram":{ +# "total":1057024, +# "remaining":1053304, +# "transferred":3720, +# "duplicate":123, +# "normal":123, +# "normal-bytes":123456, +# "dirty-sync-count":15 +# }, +# "disk":{ +# "total":20971520, +# "remaining":20880384, +# "transferred":91136 +# } +# } +# } +# +# 6. Migration is being performed and XBZRLE is active: +# +# -> { "execute": "query-migrate" } +# <- { +# "return":{ +# "status":"active", +# "total-time":12345, +# "setup-time":12345, +# "expected-downtime":12345, +# "ram":{ +# "total":1057024, +# "remaining":1053304, +# "transferred":3720, +# "duplicate":10, +# "normal":3333, +# "normal-bytes":3412992, +# "dirty-sync-count":15 +# }, +# "xbzrle-cache":{ +# "cache-size":67108864, +# "bytes":20971520, +# "pages":2444343, +# "cache-miss":2244, +# "cache-miss-rate":0.123, +# "encoding-rate":80.1, +# "overflow":34434 +# } +# } +# } ## { 'command': 'query-migrate', 'returns': 'MigrationInfo' } @@ -501,8 +500,8 @@ # # @background-snapshot: If enabled, the migration stream will be a # snapshot of the VM exactly at the point when the migration -# procedure starts. The VM RAM is saved with running VM. (since -# 6.0) +# procedure starts. The VM RAM is saved with running VM. +# (since 6.0) # # @zero-copy-send: Controls behavior on sending memory pages on # migration. When true, enables a zero-copy mechanism for sending @@ -531,10 +530,14 @@ # and can result in more stable read performance. Requires KVM # with accelerator property "dirty-ring-size" set. (Since 8.1) # +# @mapped-ram: Migrate using fixed offsets in the migration file for +# each RAM page. Requires a migration URI that supports seeking, +# such as a file. (since 9.0) +# # Features: # # @deprecated: Member @block is deprecated. Use blockdev-mirror with -# NBD instead. Member @compression is deprecated because it is +# NBD instead. Member @compress is deprecated because it is # unreliable and untested. It is recommended to use multifd # migration, which offers an alternative compression # implementation that is reliable and tested. @@ -555,7 +558,7 @@ { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 'validate-uuid', 'background-snapshot', 'zero-copy-send', 'postcopy-preempt', 'switchover-ack', - 'dirty-limit'] } + 'dirty-limit', 'mapped-ram'] } ## # @MigrationCapabilityStatus: @@ -582,9 +585,9 @@ # # Example: # -# -> { "execute": "migrate-set-capabilities" , "arguments": -# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } -# <- { "return": {} } +# -> { "execute": "migrate-set-capabilities" , "arguments": +# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } +# <- { "return": {} } ## { 'command': 'migrate-set-capabilities', 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } @@ -600,17 +603,17 @@ # # Example: # -# -> { "execute": "query-migrate-capabilities" } -# <- { "return": [ -# {"state": false, "capability": "xbzrle"}, -# {"state": false, "capability": "rdma-pin-all"}, -# {"state": false, "capability": "auto-converge"}, -# {"state": false, "capability": "zero-blocks"}, -# {"state": false, "capability": "compress"}, -# {"state": true, "capability": "events"}, -# {"state": false, "capability": "postcopy-ram"}, -# {"state": false, "capability": "x-colo"} -# ]} +# -> { "execute": "query-migrate-capabilities" } +# <- { "return": [ +# {"state": false, "capability": "xbzrle"}, +# {"state": false, "capability": "rdma-pin-all"}, +# {"state": false, "capability": "auto-converge"}, +# {"state": false, "capability": "zero-blocks"}, +# {"state": false, "capability": "compress"}, +# {"state": true, "capability": "events"}, +# {"state": false, "capability": "postcopy-ram"}, +# {"state": false, "capability": "x-colo"} +# ]} ## { 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} @@ -634,25 +637,54 @@ ## # @MigMode: # -# @normal: the original form of migration. (since 8.2) -# -# @cpr-reboot: The migrate command saves state to a file, allowing one to -# quit qemu, reboot to an updated kernel, and restart an updated -# version of qemu. The caller must specify a migration URI -# that writes to and reads from a file. Unlike normal mode, -# the use of certain local storage options does not block the -# migration, but the caller must not modify guest block devices -# between the quit and restart. To avoid saving guest RAM to the -# file, the memory backend must be shared, and the @x-ignore-shared -# migration capability must be set. Guest RAM must be non-volatile -# across reboot, such as by backing it with a dax device, but this -# is not enforced. The restarted qemu arguments must match those -# used to initially start qemu, plus the -incoming option. -# (since 8.2) +# @normal: the original form of migration. (since 8.2) +# +# @cpr-reboot: The migrate command stops the VM and saves state to the +# URI. After quitting QEMU, the user resumes by running QEMU +# -incoming. +# +# This mode allows the user to quit QEMU, optionally update and +# reboot the OS, and restart QEMU. If the user reboots, the URI +# must persist across the reboot, such as by using a file. +# +# Unlike normal mode, the use of certain local storage options +# does not block the migration, but the user must not modify the +# contents of guest block devices between the quit and restart. +# +# This mode supports VFIO devices provided the user first puts the +# guest in the suspended runstate, such as by issuing +# guest-suspend-ram to the QEMU guest agent. +# +# Best performance is achieved when the memory backend is shared +# and the @x-ignore-shared migration capability is set, but this +# is not required. Further, if the user reboots before restarting +# such a configuration, the shared memory must persist across the +# reboot, such as by backing it with a dax device. +# +# @cpr-reboot may not be used with postcopy, background-snapshot, +# or COLO. +# +# (since 8.2) ## { 'enum': 'MigMode', 'data': [ 'normal', 'cpr-reboot' ] } +## +# @ZeroPageDetection: +# +# @none: Do not perform zero page checking. +# +# @legacy: Perform zero page checking in main migration thread. +# +# @multifd: Perform zero page checking in multifd sender thread if +# multifd migration is enabled, else in the main migration thread +# as for @legacy. +# +# Since: 9.0 +## +{ 'enum': 'ZeroPageDetection', + 'data': [ 'none', 'legacy', 'multifd' ] } + ## # @BitmapMigrationBitmapAliasTransform: # @@ -748,15 +780,15 @@ # # @throttle-trigger-threshold: The ratio of bytes_dirty_period and # bytes_xfer_period to trigger throttling. It is expressed as -# percentage. The default value is 50. (Since 5.0) +# percentage. The default value is 50. (Since 5.0) # # @cpu-throttle-initial: Initial percentage of time guest cpus are # throttled when migration auto-converge is activated. The -# default value is 20. (Since 2.7) +# default value is 20. (Since 2.7) # # @cpu-throttle-increment: throttle percentage increase each time # auto-converge detects that migration is not making progress. -# The default value is 10. (Since 2.7) +# The default value is 10. (Since 2.7) # # @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At # the tail stage of throttling, the Guest is very sensitive to CPU @@ -775,16 +807,19 @@ # for establishing a TLS connection over the migration data # channel. On the outgoing side of the migration, the credentials # must be for a 'client' endpoint, while for the incoming side the -# credentials must be for a 'server' endpoint. Setting this will -# enable TLS for all migrations. The default is unset, resulting -# in unsecured migration at the QEMU level. (Since 2.7) +# credentials must be for a 'server' endpoint. Setting this to a +# non-empty string enables TLS for all migrations. An empty +# string means that QEMU will use plain text mode for migration, +# rather than TLS. (Since 2.7) +# +# @tls-hostname: migration target's hostname for validating the +# server's x509 certificate identity. If empty, QEMU will use the +# hostname from the migration URI, if any. A non-empty value is +# required when using x509 based TLS credentials and the migration +# URI does not include a hostname, such as fd: or exec: based +# migration. (Since 2.7) # -# @tls-hostname: hostname of the target host for the migration. This -# is required when using x509 based TLS credentials and the -# migration URI does not already include a hostname. For example -# if using fd: or exec: based migration, the hostname must be -# provided so that the server's x509 certificate identity can be -# validated. (Since 2.7) +# Note: empty value works only since 2.9. # # @tls-authz: ID of the 'authz' object subclass that provides access # control checking of the TLS x509 certificate distinguished name. @@ -792,18 +827,19 @@ # and recreated on the fly while the migration server is active. # If missing, it will default to denying access (Since 4.0) # -# @max-bandwidth: to set maximum speed for migration. maximum speed -# in bytes per second. (Since 2.8) +# @max-bandwidth: maximum speed for migration, in bytes per second. +# (Since 2.8) # # @avail-switchover-bandwidth: to set the available bandwidth that # migration can use during switchover phase. NOTE! This does not -# limit the bandwidth during switchover, but only for calculations when -# making decisions to switchover. By default, this value is zero, -# which means QEMU will estimate the bandwidth automatically. This can -# be set when the estimated value is not accurate, while the user is -# able to guarantee such bandwidth is available when switching over. -# When specified correctly, this can make the switchover decision much -# more accurate. (Since 8.2) +# limit the bandwidth during switchover, but only for calculations +# when making decisions to switchover. By default, this value is +# zero, which means QEMU will estimate the bandwidth +# automatically. This can be set when the estimated value is not +# accurate, while the user is able to guarantee such bandwidth is +# available when switching over. When specified correctly, this +# can make the switchover decision much more accurate. +# (Since 8.2) # # @downtime-limit: set maximum tolerated downtime for migration. # maximum downtime in milliseconds (Since 2.8) @@ -840,13 +876,13 @@ # migration, the compression level is an integer between 0 and 9, # where 0 means no compression, 1 means the best compression # speed, and 9 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) # # @multifd-zstd-level: Set the compression level to be used in live # migration, the compression level is an integer between 0 and 20, # where 0 means no compression, 1 means the best compression # speed, and 20 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) # # @block-bitmap-mapping: Maps block nodes and bitmaps on them to # aliases for the purpose of dirty bitmap migration. Such aliases @@ -865,14 +901,18 @@ # to their node name otherwise. (Since 5.2) # # @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty -# limit during live migration. Should be in the range 1 to 1000ms. -# Defaults to 1000ms. (Since 8.1) +# limit during live migration. Should be in the range 1 to +# 1000ms. Defaults to 1000ms. (Since 8.1) # # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. # Defaults to 1. (Since 8.1) # -# @mode: Migration mode. See description in @MigMode. Default is 'normal'. -# (Since 8.2) +# @mode: Migration mode. See description in @MigMode. Default is +# 'normal'. (Since 8.2) +# +# @zero-page-detection: Whether and how to detect zero pages. +# See description in @ZeroPageDetection. Default is 'multifd'. +# (since 9.0) # # Features: # @@ -881,8 +921,8 @@ # @compress-threads, @decompress-threads and @compress-wait-thread # are deprecated because @compression is deprecated. # -# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period -# are experimental. +# @unstable: Members @x-checkpoint-delay and +# @x-vcpu-dirty-limit-period are experimental. # # Since: 2.4 ## @@ -907,7 +947,8 @@ 'block-bitmap-mapping', { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] }, 'vcpu-dirty-limit', - 'mode'] } + 'mode', + 'zero-page-detection'] } ## # @MigrateSetParameters: @@ -924,28 +965,38 @@ # @announce-step: Increase in delay (in milliseconds) between # subsequent packets in the announcement (Since 4.0) # -# @compress-level: compression level +# @compress-level: Set the compression level to be used in live +# migration, the compression level is an integer between 0 and 9, +# where 0 means no compression, 1 means the best compression +# speed, and 9 means best compression ratio which will consume +# more CPU. # -# @compress-threads: compression thread count +# @compress-threads: Set compression thread count to be used in live +# migration, the compression thread count is an integer between 1 +# and 255. # # @compress-wait-thread: Controls behavior when all compression # threads are currently busy. If true (default), wait for a free # compression thread to become available; otherwise, send the page # uncompressed. (Since 3.1) # -# @decompress-threads: decompression thread count +# @decompress-threads: Set decompression thread count to be used in +# live migration, the decompression thread count is an integer +# between 1 and 255. Usually, decompression is at least 4 times as +# fast as compression, so set the decompress-threads to the number +# about 1/4 of compress-threads is adequate. # # @throttle-trigger-threshold: The ratio of bytes_dirty_period and # bytes_xfer_period to trigger throttling. It is expressed as -# percentage. The default value is 50. (Since 5.0) +# percentage. The default value is 50. (Since 5.0) # # @cpu-throttle-initial: Initial percentage of time guest cpus are # throttled when migration auto-converge is activated. The -# default value is 20. (Since 2.7) +# default value is 20. (Since 2.7) # # @cpu-throttle-increment: throttle percentage increase each time # auto-converge detects that migration is not making progress. -# The default value is 10. (Since 2.7) +# The default value is 10. (Since 2.7) # # @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At # the tail stage of throttling, the Guest is very sensitive to CPU @@ -967,37 +1018,42 @@ # credentials must be for a 'server' endpoint. Setting this to a # non-empty string enables TLS for all migrations. An empty # string means that QEMU will use plain text mode for migration, -# rather than TLS (Since 2.9) Previously (since 2.7), this was -# reported by omitting tls-creds instead. -# -# @tls-hostname: hostname of the target host for the migration. This -# is required when using x509 based TLS credentials and the -# migration URI does not already include a hostname. For example -# if using fd: or exec: based migration, the hostname must be -# provided so that the server's x509 certificate identity can be -# validated. (Since 2.7) An empty string means that QEMU will use -# the hostname associated with the migration URI, if any. (Since -# 2.9) Previously (since 2.7), this was reported by omitting -# tls-hostname instead. -# -# @max-bandwidth: to set maximum speed for migration. maximum speed -# in bytes per second. (Since 2.8) +# rather than TLS. This is the default. (Since 2.7) +# +# @tls-hostname: migration target's hostname for validating the +# server's x509 certificate identity. If empty, QEMU will use the +# hostname from the migration URI, if any. A non-empty value is +# required when using x509 based TLS credentials and the migration +# URI does not include a hostname, such as fd: or exec: based +# migration. (Since 2.7) +# +# Note: empty value works only since 2.9. +# +# @tls-authz: ID of the 'authz' object subclass that provides access +# control checking of the TLS x509 certificate distinguished name. +# This object is only resolved at time of use, so can be deleted +# and recreated on the fly while the migration server is active. +# If missing, it will default to denying access (Since 4.0) +# +# @max-bandwidth: maximum speed for migration, in bytes per second. +# (Since 2.8) # # @avail-switchover-bandwidth: to set the available bandwidth that # migration can use during switchover phase. NOTE! This does not -# limit the bandwidth during switchover, but only for calculations when -# making decisions to switchover. By default, this value is zero, -# which means QEMU will estimate the bandwidth automatically. This can -# be set when the estimated value is not accurate, while the user is -# able to guarantee such bandwidth is available when switching over. -# When specified correctly, this can make the switchover decision much -# more accurate. (Since 8.2) +# limit the bandwidth during switchover, but only for calculations +# when making decisions to switchover. By default, this value is +# zero, which means QEMU will estimate the bandwidth +# automatically. This can be set when the estimated value is not +# accurate, while the user is able to guarantee such bandwidth is +# available when switching over. When specified correctly, this +# can make the switchover decision much more accurate. +# (Since 8.2) # # @downtime-limit: set maximum tolerated downtime for migration. # maximum downtime in milliseconds (Since 2.8) # -# @x-checkpoint-delay: the delay time between two COLO checkpoints. -# (Since 2.8) +# @x-checkpoint-delay: The delay time (in ms) between two COLO +# checkpoints in periodic mode. (Since 2.8) # # @block-incremental: Affects how much storage is migrated when the # block migration capability is enabled. When false, the entire @@ -1018,8 +1074,8 @@ # postcopy. Defaults to 0 (unlimited). In bytes per second. # (Since 3.0) # -# @max-cpu-throttle: maximum cpu throttle percentage. The default -# value is 99. (Since 3.1) +# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. +# (Since 3.1) # # @multifd-compression: Which compression method to use. Defaults to # none. (Since 5.0) @@ -1028,13 +1084,13 @@ # migration, the compression level is an integer between 0 and 9, # where 0 means no compression, 1 means the best compression # speed, and 9 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) # # @multifd-zstd-level: Set the compression level to be used in live # migration, the compression level is an integer between 0 and 20, # where 0 means no compression, 1 means the best compression # speed, and 20 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) # # @block-bitmap-mapping: Maps block nodes and bitmaps on them to # aliases for the purpose of dirty bitmap migration. Such aliases @@ -1053,14 +1109,18 @@ # to their node name otherwise. (Since 5.2) # # @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty -# limit during live migration. Should be in the range 1 to 1000ms. -# Defaults to 1000ms. (Since 8.1) +# limit during live migration. Should be in the range 1 to +# 1000ms. Defaults to 1000ms. (Since 8.1) # # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. # Defaults to 1. (Since 8.1) # -# @mode: Migration mode. See description in @MigMode. Default is 'normal'. -# (Since 8.2) +# @mode: Migration mode. See description in @MigMode. Default is +# 'normal'. (Since 8.2) +# +# @zero-page-detection: Whether and how to detect zero pages. +# See description in @ZeroPageDetection. Default is 'multifd'. +# (since 9.0) # # Features: # @@ -1069,8 +1129,8 @@ # @compress-threads, @decompress-threads and @compress-wait-thread # are deprecated because @compression is deprecated. # -# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period -# are experimental. +# @unstable: Members @x-checkpoint-delay and +# @x-vcpu-dirty-limit-period are experimental. # # TODO: either fuse back into MigrationParameters, or make # MigrationParameters members mandatory @@ -1115,7 +1175,8 @@ '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 'features': [ 'unstable' ] }, '*vcpu-dirty-limit': 'uint64', - '*mode': 'MigMode'} } + '*mode': 'MigMode', + '*zero-page-detection': 'ZeroPageDetection'} } ## # @migrate-set-parameters: @@ -1126,9 +1187,9 @@ # # Example: # -# -> { "execute": "migrate-set-parameters" , -# "arguments": { "multifd-channels": 5 } } -# <- { "return": {} } +# -> { "execute": "migrate-set-parameters" , +# "arguments": { "multifd-channels": 5 } } +# <- { "return": {} } ## { 'command': 'migrate-set-parameters', 'boxed': true, 'data': 'MigrateSetParameters' } @@ -1163,7 +1224,7 @@ # # @throttle-trigger-threshold: The ratio of bytes_dirty_period and # bytes_xfer_period to trigger throttling. It is expressed as -# percentage. The default value is 50. (Since 5.0) +# percentage. The default value is 50. (Since 5.0) # # @cpu-throttle-initial: Initial percentage of time guest cpus are # throttled when migration auto-converge is activated. (Since @@ -1192,34 +1253,33 @@ # must be for a 'client' endpoint, while for the incoming side the # credentials must be for a 'server' endpoint. An empty string # means that QEMU will use plain text mode for migration, rather -# than TLS (Since 2.7) Note: 2.8 reports this by omitting -# tls-creds instead. -# -# @tls-hostname: hostname of the target host for the migration. This -# is required when using x509 based TLS credentials and the -# migration URI does not already include a hostname. For example -# if using fd: or exec: based migration, the hostname must be -# provided so that the server's x509 certificate identity can be -# validated. (Since 2.7) An empty string means that QEMU will use -# the hostname associated with the migration URI, if any. (Since -# 2.9) Note: 2.8 reports this by omitting tls-hostname instead. +# than TLS. (Since 2.7) +# +# Note: 2.8 omits empty @tls-creds instead. +# +# @tls-hostname: migration target's hostname for validating the +# server's x509 certificate identity. If empty, QEMU will use the +# hostname from the migration URI, if any. (Since 2.7) +# +# Note: 2.8 omits empty @tls-hostname instead. # # @tls-authz: ID of the 'authz' object subclass that provides access # control checking of the TLS x509 certificate distinguished name. # (Since 4.0) # -# @max-bandwidth: to set maximum speed for migration. maximum speed -# in bytes per second. (Since 2.8) +# @max-bandwidth: maximum speed for migration, in bytes per second. +# (Since 2.8) # # @avail-switchover-bandwidth: to set the available bandwidth that # migration can use during switchover phase. NOTE! This does not -# limit the bandwidth during switchover, but only for calculations when -# making decisions to switchover. By default, this value is zero, -# which means QEMU will estimate the bandwidth automatically. This can -# be set when the estimated value is not accurate, while the user is -# able to guarantee such bandwidth is available when switching over. -# When specified correctly, this can make the switchover decision much -# more accurate. (Since 8.2) +# limit the bandwidth during switchover, but only for calculations +# when making decisions to switchover. By default, this value is +# zero, which means QEMU will estimate the bandwidth +# automatically. This can be set when the estimated value is not +# accurate, while the user is able to guarantee such bandwidth is +# available when switching over. When specified correctly, this +# can make the switchover decision much more accurate. +# (Since 8.2) # # @downtime-limit: set maximum tolerated downtime for migration. # maximum downtime in milliseconds (Since 2.8) @@ -1256,13 +1316,13 @@ # migration, the compression level is an integer between 0 and 9, # where 0 means no compression, 1 means the best compression # speed, and 9 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) # # @multifd-zstd-level: Set the compression level to be used in live # migration, the compression level is an integer between 0 and 20, # where 0 means no compression, 1 means the best compression # speed, and 20 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) # # @block-bitmap-mapping: Maps block nodes and bitmaps on them to # aliases for the purpose of dirty bitmap migration. Such aliases @@ -1281,14 +1341,18 @@ # to their node name otherwise. (Since 5.2) # # @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty -# limit during live migration. Should be in the range 1 to 1000ms. -# Defaults to 1000ms. (Since 8.1) +# limit during live migration. Should be in the range 1 to +# 1000ms. Defaults to 1000ms. (Since 8.1) # # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. # Defaults to 1. (Since 8.1) # -# @mode: Migration mode. See description in @MigMode. Default is 'normal'. -# (Since 8.2) +# @mode: Migration mode. See description in @MigMode. Default is +# 'normal'. (Since 8.2) +# +# @zero-page-detection: Whether and how to detect zero pages. +# See description in @ZeroPageDetection. Default is 'multifd'. +# (since 9.0) # # Features: # @@ -1297,8 +1361,8 @@ # @compress-threads, @decompress-threads and @compress-wait-thread # are deprecated because @compression is deprecated. # -# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period -# are experimental. +# @unstable: Members @x-checkpoint-delay and +# @x-vcpu-dirty-limit-period are experimental. # # Since: 2.4 ## @@ -1340,7 +1404,8 @@ '*x-vcpu-dirty-limit-period': { 'type': 'uint64', 'features': [ 'unstable' ] }, '*vcpu-dirty-limit': 'uint64', - '*mode': 'MigMode'} } + '*mode': 'MigMode', + '*zero-page-detection': 'ZeroPageDetection'} } ## # @query-migrate-parameters: @@ -1353,15 +1418,15 @@ # # Example: # -# -> { "execute": "query-migrate-parameters" } -# <- { "return": { -# "multifd-channels": 2, -# "cpu-throttle-increment": 10, -# "cpu-throttle-initial": 20, -# "max-bandwidth": 33554432, -# "downtime-limit": 300 -# } -# } +# -> { "execute": "query-migrate-parameters" } +# <- { "return": { +# "multifd-channels": 2, +# "cpu-throttle-increment": 10, +# "cpu-throttle-initial": 20, +# "max-bandwidth": 33554432, +# "downtime-limit": 300 +# } +# } ## { 'command': 'query-migrate-parameters', 'returns': 'MigrationParameters' } @@ -1377,8 +1442,8 @@ # # Example: # -# -> { "execute": "migrate-start-postcopy" } -# <- { "return": {} } +# -> { "execute": "migrate-start-postcopy" } +# <- { "return": {} } ## { 'command': 'migrate-start-postcopy' } @@ -1393,9 +1458,9 @@ # # Example: # -# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, -# "event": "MIGRATION", -# "data": {"status": "completed"} } +# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, +# "event": "MIGRATION", +# "data": {"status": "completed"} } ## { 'event': 'MIGRATION', 'data': {'status': 'MigrationStatus'}} @@ -1412,8 +1477,8 @@ # # Example: # -# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, -# "event": "MIGRATION_PASS", "data": {"pass": 2} } +# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, +# "event": "MIGRATION_PASS", "data": {"pass": 2} } ## { 'event': 'MIGRATION_PASS', 'data': { 'pass': 'int' } } @@ -1496,8 +1561,8 @@ # # Example: # -# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, -# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } +# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, +# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } ## { 'event': 'COLO_EXIT', 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } @@ -1539,8 +1604,8 @@ # # Example: # -# -> { "execute": "x-colo-lost-heartbeat" } -# <- { "return": {} } +# -> { "execute": "x-colo-lost-heartbeat" } +# <- { "return": {} } ## { 'command': 'x-colo-lost-heartbeat', 'features': [ 'unstable' ], @@ -1551,8 +1616,6 @@ # # Cancel the current executing migration process. # -# Returns: nothing on success -# # Notes: This command succeeds even if there is no migration process # running. # @@ -1560,8 +1623,8 @@ # # Example: # -# -> { "execute": "migrate_cancel" } -# <- { "return": {} } +# -> { "execute": "migrate_cancel" } +# <- { "return": {} } ## { 'command': 'migrate_cancel' } @@ -1572,15 +1635,13 @@ # # @state: The state the migration is currently expected to be in # -# Returns: nothing on success -# # Since: 2.11 # # Example: # -# -> { "execute": "migrate-continue" , "arguments": -# { "state": "pre-switchover" } } -# <- { "return": {} } +# -> { "execute": "migrate-continue" , "arguments": +# { "state": "pre-switchover" } } +# <- { "return": {} } ## { 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} } @@ -1597,7 +1658,7 @@ # # @file: Direct the migration stream to a file. # -# Since 8.2 +# Since: 8.2 ## { 'enum': 'MigrationAddressType', 'data': [ 'socket', 'exec', 'rdma', 'file' ] } @@ -1609,7 +1670,7 @@ # # @offset: The file offset where the migration stream will start # -# Since 8.2 +# Since: 8.2 ## { 'struct': 'FileMigrationArgs', 'data': { 'filename': 'str', @@ -1620,7 +1681,7 @@ # # @args: command (list head) and arguments to execute. # -# Since 8.2 +# Since: 8.2 ## { 'struct': 'MigrationExecCommand', 'data': {'args': [ 'str' ] } } @@ -1630,7 +1691,9 @@ # # Migration endpoint configuration. # -# Since 8.2 +# @transport: The migration stream transport mechanism +# +# Since: 8.2 ## { 'union': 'MigrationAddress', 'base': { 'transport' : 'MigrationAddressType'}, @@ -1648,7 +1711,7 @@ # # @main: Main outbound migration channel. # -# Since 8.1 +# Since: 8.1 ## { 'enum': 'MigrationChannelType', 'data': [ 'main' ] } @@ -1662,7 +1725,7 @@ # # @addr: Migration endpoint configuration on destination interface. # -# Since 8.1 +# Since: 8.1 ## { 'struct': 'MigrationChannel', 'data': { @@ -1686,74 +1749,73 @@ # @detach: this argument exists only for compatibility reasons and is # ignored by QEMU # -# @resume: resume one paused migration, default "off". (since 3.0) +# @resume: resume one paused migration, default "off". (since 3.0) # # Features: # # @deprecated: Members @inc and @blk are deprecated. Use # blockdev-mirror with NBD instead. # -# Returns: nothing on success -# # Since: 0.14 # # Notes: # -# 1. The 'query-migrate' command should be used to check migration's -# progress and final result (this information is provided by the -# 'status' member) +# 1. The 'query-migrate' command should be used to check +# migration's progress and final result (this information is +# provided by the 'status' member) # -# 2. All boolean arguments default to false +# 2. All boolean arguments default to false # -# 3. The user Monitor's "detach" argument is invalid in QMP and should -# not be used +# 3. The user Monitor's "detach" argument is invalid in QMP and +# should not be used # -# 4. The uri argument should have the Uniform Resource Identifier of -# default destination VM. This connection will be bound to default -# network. +# 4. The uri argument should have the Uniform Resource Identifier +# of default destination VM. This connection will be bound to +# default network. # -# 5. For now, number of migration streams is restricted to one, i.e -# number of items in 'channels' list is just 1. +# 5. For now, number of migration streams is restricted to one, +# i.e. number of items in 'channels' list is just 1. # -# 6. The 'uri' and 'channels' arguments are mutually exclusive; -# exactly one of the two should be present. +# 6. The 'uri' and 'channels' arguments are mutually exclusive; +# exactly one of the two should be present. # # Example: # -# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } -# <- { "return": {} } -# -> { "execute": "migrate", -# "arguments": { -# "channels": [ { "channel-type": "main", -# "addr": { "transport": "socket", -# "type": "inet", -# "host": "10.12.34.9", -# "port": "1050" } } ] } } -# <- { "return": {} } -# -# -> { "execute": "migrate", -# "arguments": { -# "channels": [ { "channel-type": "main", -# "addr": { "transport": "exec", -# "args": [ "/bin/nc", "-p", "6000", -# "/some/sock" ] } } ] } } -# <- { "return": {} } -# -# -> { "execute": "migrate", -# "arguments": { -# "channels": [ { "channel-type": "main", -# "addr": { "transport": "rdma", -# "host": "10.12.34.9", -# "port": "1050" } } ] } } -# <- { "return": {} } -# -# -> { "execute": "migrate", -# "arguments": { -# "channels": [ { "channel-type": "main", -# "addr": { "transport": "file", -# "filename": "/tmp/migfile", -# "offset": "0x1000" } } ] } } -# <- { "return": {} } +# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } +# <- { "return": {} } +# +# -> { "execute": "migrate", +# "arguments": { +# "channels": [ { "channel-type": "main", +# "addr": { "transport": "socket", +# "type": "inet", +# "host": "10.12.34.9", +# "port": "1050" } } ] } } +# <- { "return": {} } +# +# -> { "execute": "migrate", +# "arguments": { +# "channels": [ { "channel-type": "main", +# "addr": { "transport": "exec", +# "args": [ "/bin/nc", "-p", "6000", +# "/some/sock" ] } } ] } } +# <- { "return": {} } +# +# -> { "execute": "migrate", +# "arguments": { +# "channels": [ { "channel-type": "main", +# "addr": { "transport": "rdma", +# "host": "10.12.34.9", +# "port": "1050" } } ] } } +# <- { "return": {} } +# +# -> { "execute": "migrate", +# "arguments": { +# "channels": [ { "channel-type": "main", +# "addr": { "transport": "file", +# "filename": "/tmp/migfile", +# "offset": "0x1000" } } ] } } +# <- { "return": {} } # ## { 'command': 'migrate', @@ -1775,57 +1837,55 @@ # @channels: list of migration stream channels with each stream in the # list connected to a destination interface endpoint. # -# Returns: nothing on success -# # Since: 2.3 # # Notes: # -# 1. It's a bad idea to use a string for the uri, but it needs -# to stay compatible with -incoming and the format of the uri -# is already exposed above libvirt. +# 1. It's a bad idea to use a string for the uri, but it needs to +# stay compatible with -incoming and the format of the uri is +# already exposed above libvirt. # -# 2. QEMU must be started with -incoming defer to allow -# migrate-incoming to be used. +# 2. QEMU must be started with -incoming defer to allow +# migrate-incoming to be used. # -# 3. The uri format is the same as for -incoming +# 3. The uri format is the same as for -incoming # -# 5. For now, number of migration streams is restricted to one, i.e -# number of items in 'channels' list is just 1. +# 4. For now, number of migration streams is restricted to one, +# i.e. number of items in 'channels' list is just 1. # -# 4. The 'uri' and 'channels' arguments are mutually exclusive; -# exactly one of the two should be present. +# 5. The 'uri' and 'channels' arguments are mutually exclusive; +# exactly one of the two should be present. # # Example: # -# -> { "execute": "migrate-incoming", -# "arguments": { "uri": "tcp::4446" } } -# <- { "return": {} } -# -# -> { "execute": "migrate", -# "arguments": { -# "channels": [ { "channel-type": "main", -# "addr": { "transport": "socket", -# "type": "inet", -# "host": "10.12.34.9", -# "port": "1050" } } ] } } -# <- { "return": {} } -# -# -> { "execute": "migrate", -# "arguments": { -# "channels": [ { "channel-type": "main", -# "addr": { "transport": "exec", -# "args": [ "/bin/nc", "-p", "6000", -# "/some/sock" ] } } ] } } -# <- { "return": {} } -# -# -> { "execute": "migrate", -# "arguments": { -# "channels": [ { "channel-type": "main", -# "addr": { "transport": "rdma", -# "host": "10.12.34.9", -# "port": "1050" } } ] } } -# <- { "return": {} } +# -> { "execute": "migrate-incoming", +# "arguments": { "uri": "tcp:0:4446" } } +# <- { "return": {} } +# +# -> { "execute": "migrate-incoming", +# "arguments": { +# "channels": [ { "channel-type": "main", +# "addr": { "transport": "socket", +# "type": "inet", +# "host": "10.12.34.9", +# "port": "1050" } } ] } } +# <- { "return": {} } +# +# -> { "execute": "migrate-incoming", +# "arguments": { +# "channels": [ { "channel-type": "main", +# "addr": { "transport": "exec", +# "args": [ "/bin/nc", "-p", "6000", +# "/some/sock" ] } } ] } } +# <- { "return": {} } +# +# -> { "execute": "migrate-incoming", +# "arguments": { +# "channels": [ { "channel-type": "main", +# "addr": { "transport": "rdma", +# "host": "10.12.34.9", +# "port": "1050" } } ] } } +# <- { "return": {} } ## { 'command': 'migrate-incoming', 'data': {'*uri': 'str', @@ -1844,15 +1904,13 @@ # @live: Optional argument to ask QEMU to treat this command as part # of a live migration. Default to true. (since 2.11) # -# Returns: Nothing on success -# # Since: 1.1 # # Example: # -# -> { "execute": "xen-save-devices-state", -# "arguments": { "filename": "/tmp/save" } } -# <- { "return": {} } +# -> { "execute": "xen-save-devices-state", +# "arguments": { "filename": "/tmp/save" } } +# <- { "return": {} } ## { 'command': 'xen-save-devices-state', 'data': {'filename': 'str', '*live':'bool' } } @@ -1864,15 +1922,13 @@ # # @enable: true to enable, false to disable. # -# Returns: nothing -# # Since: 1.3 # # Example: # -# -> { "execute": "xen-set-global-dirty-log", -# "arguments": { "enable": true } } -# <- { "return": {} } +# -> { "execute": "xen-set-global-dirty-log", +# "arguments": { "enable": true } } +# <- { "return": {} } ## { 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } @@ -1890,9 +1946,9 @@ # # Example: # -# -> { "execute": "xen-load-devices-state", -# "arguments": { "filename": "/tmp/resume" } } -# <- { "return": {} } +# -> { "execute": "xen-load-devices-state", +# "arguments": { "filename": "/tmp/resume" } } +# <- { "return": {} } ## { 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } @@ -1905,16 +1961,14 @@ # # @primary: true for primary or false for secondary. # -# @failover: true to do failover, false to stop. but cannot be -# specified if 'enable' is true. default value is false. -# -# Returns: nothing. +# @failover: true to do failover, false to stop. Cannot be specified +# if 'enable' is true. Default value is false. # # Example: # -# -> { "execute": "xen-set-replication", -# "arguments": {"enable": true, "primary": false} } -# <- { "return": {} } +# -> { "execute": "xen-set-replication", +# "arguments": {"enable": true, "primary": false} } +# <- { "return": {} } # # Since: 2.9 ## @@ -1947,8 +2001,8 @@ # # Example: # -# -> { "execute": "query-xen-replication-status" } -# <- { "return": { "error": false } } +# -> { "execute": "query-xen-replication-status" } +# <- { "return": { "error": false } } # # Since: 2.9 ## @@ -1961,12 +2015,10 @@ # # Xen uses this command to notify replication to trigger a checkpoint. # -# Returns: nothing. -# # Example: # -# -> { "execute": "xen-colo-do-checkpoint" } -# <- { "return": {} } +# -> { "execute": "xen-colo-do-checkpoint" } +# <- { "return": {} } # # Since: 2.9 ## @@ -2003,8 +2055,8 @@ # # Example: # -# -> { "execute": "query-colo-status" } -# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } +# -> { "execute": "query-colo-status" } +# <- { "return": { "mode": "primary", "last-mode": "none", "reason": "request" } } # # Since: 3.1 ## @@ -2019,13 +2071,11 @@ # # @uri: the URI to be used for the recovery of migration stream. # -# Returns: nothing. -# # Example: # -# -> { "execute": "migrate-recover", -# "arguments": { "uri": "tcp:192.168.1.200:12345" } } -# <- { "return": {} } +# -> { "execute": "migrate-recover", +# "arguments": { "uri": "tcp:192.168.1.200:12345" } } +# <- { "return": {} } # # Since: 3.0 ## @@ -2038,12 +2088,10 @@ # # Pause a migration. Currently it only supports postcopy. # -# Returns: nothing. -# # Example: # -# -> { "execute": "migrate-pause" } -# <- { "return": {} } +# -> { "execute": "migrate-pause" } +# <- { "return": {} } # # Since: 3.0 ## @@ -2063,9 +2111,9 @@ # # Example: # -# <- { "event": "UNPLUG_PRIMARY", -# "data": { "device-id": "hostdev0" }, -# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# <- { "event": "UNPLUG_PRIMARY", +# "data": { "device-id": "hostdev0" }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'UNPLUG_PRIMARY', 'data': { 'device-id': 'str' } } @@ -2126,8 +2174,7 @@ # # @millisecond: value is in milliseconds # -# Since 8.2 -# +# Since: 8.2 ## { 'enum': 'TimeUnit', 'data': ['second', 'millisecond'] } @@ -2209,7 +2256,7 @@ # will not increase dirty page rate anymore. # # @calc-time-unit: time unit in which @calc-time is specified. -# By default it is seconds. (Since 8.2) +# By default it is seconds. (Since 8.2) # # @sample-pages: number of sampled pages per each GiB of guest memory. # Default value is 512. For 4KiB guest pages this corresponds to @@ -2224,16 +2271,16 @@ # # Example: # -# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, -# 'sample-pages': 512} } -# <- { "return": {} } +# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, +# 'sample-pages': 512} } +# <- { "return": {} } # -# Measure dirty rate using dirty bitmap for 500 milliseconds: +# Measure dirty rate using dirty bitmap for 500 milliseconds: # -# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500, -# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} } +# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 500, +# "calc-time-unit": "millisecond", "mode": "dirty-bitmap"} } # -# <- { "return": {} } +# <- { "return": {} } ## { 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', '*calc-time-unit': 'TimeUnit', @@ -2246,23 +2293,23 @@ # Query results of the most recent invocation of @calc-dirty-rate. # # @calc-time-unit: time unit in which to report calculation time. -# By default it is reported in seconds. (Since 8.2) +# By default it is reported in seconds. (Since 8.2) # # Since: 5.2 # # Examples: # -# 1. Measurement is in progress: +# 1. Measurement is in progress: # -# <- {"status": "measuring", "sample-pages": 512, -# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, -# "calc-time-unit": "second"} +# <- {"status": "measuring", "sample-pages": 512, +# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, +# "calc-time-unit": "second"} # -# 2. Measurement has been completed: +# 2. Measurement has been completed: # -# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108, -# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, -# "calc-time-unit": "second"} +# <- {"status": "measured", "sample-pages": 512, "dirty-rate": 108, +# "mode": "page-sampling", "start-time": 1693900454, "calc-time": 10, +# "calc-time-unit": "second"} ## { 'command': 'query-dirty-rate', 'data': {'*calc-time-unit': 'TimeUnit' }, 'returns': 'DirtyRateInfo' } @@ -2303,10 +2350,10 @@ # # Example: # -# -> {"execute": "set-vcpu-dirty-limit"} -# "arguments": { "dirty-rate": 200, -# "cpu-index": 1 } } -# <- { "return": {} } +# -> {"execute": "set-vcpu-dirty-limit"} +# "arguments": { "dirty-rate": 200, +# "cpu-index": 1 } } +# <- { "return": {} } ## { 'command': 'set-vcpu-dirty-limit', 'data': { '*cpu-index': 'int', @@ -2327,9 +2374,9 @@ # # Example: # -# -> {"execute": "cancel-vcpu-dirty-limit"}, -# "arguments": { "cpu-index": 1 } } -# <- { "return": {} } +# -> {"execute": "cancel-vcpu-dirty-limit"}, +# "arguments": { "cpu-index": 1 } } +# <- { "return": {} } ## { 'command': 'cancel-vcpu-dirty-limit', 'data': { '*cpu-index': 'int'} } @@ -2344,10 +2391,10 @@ # # Example: # -# -> {"execute": "query-vcpu-dirty-limit"} -# <- {"return": [ -# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, -# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} +# -> {"execute": "query-vcpu-dirty-limit"} +# <- {"return": [ +# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, +# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} ## { 'command': 'query-vcpu-dirty-limit', 'returns': [ 'DirtyLimitInfo' ] } @@ -2372,9 +2419,7 @@ # # Returns information of migration threads # -# data: migration thread name -# -# Returns: information about migration threads +# Returns: @MigrationThreadInfo # # Since: 7.2 ## @@ -2408,44 +2453,42 @@ # # If @tag already exists, an error will be reported # -# Returns: nothing -# # Example: # -# -> { "execute": "snapshot-save", -# "arguments": { -# "job-id": "snapsave0", -# "tag": "my-snap", -# "vmstate": "disk0", -# "devices": ["disk0", "disk1"] -# } -# } -# <- { "return": { } } -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, -# "data": {"status": "created", "id": "snapsave0"}} -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, -# "data": {"status": "running", "id": "snapsave0"}} -# <- {"event": "STOP", -# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } -# <- {"event": "RESUME", -# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, -# "data": {"status": "waiting", "id": "snapsave0"}} -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, -# "data": {"status": "pending", "id": "snapsave0"}} -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, -# "data": {"status": "concluded", "id": "snapsave0"}} -# -> {"execute": "query-jobs"} -# <- {"return": [{"current-progress": 1, -# "status": "concluded", -# "total-progress": 1, -# "type": "snapshot-save", -# "id": "snapsave0"}]} +# -> { "execute": "snapshot-save", +# "arguments": { +# "job-id": "snapsave0", +# "tag": "my-snap", +# "vmstate": "disk0", +# "devices": ["disk0", "disk1"] +# } +# } +# <- { "return": { } } +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1432121972, "microseconds": 744001}, +# "data": {"status": "created", "id": "snapsave0"}} +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1432122172, "microseconds": 744001}, +# "data": {"status": "running", "id": "snapsave0"}} +# <- {"event": "STOP", +# "timestamp": {"seconds": 1432122372, "microseconds": 744001} } +# <- {"event": "RESUME", +# "timestamp": {"seconds": 1432122572, "microseconds": 744001} } +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1432122772, "microseconds": 744001}, +# "data": {"status": "waiting", "id": "snapsave0"}} +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1432122972, "microseconds": 744001}, +# "data": {"status": "pending", "id": "snapsave0"}} +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1432123172, "microseconds": 744001}, +# "data": {"status": "concluded", "id": "snapsave0"}} +# -> {"execute": "query-jobs"} +# <- {"return": [{"current-progress": 1, +# "status": "concluded", +# "total-progress": 1, +# "type": "snapshot-save", +# "id": "snapsave0"}]} # # Since: 6.0 ## @@ -2480,44 +2523,42 @@ # device nodes that can have changed since the original @snapshot-save # command execution. # -# Returns: nothing -# # Example: # -# -> { "execute": "snapshot-load", -# "arguments": { -# "job-id": "snapload0", -# "tag": "my-snap", -# "vmstate": "disk0", -# "devices": ["disk0", "disk1"] -# } -# } -# <- { "return": { } } -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, -# "data": {"status": "created", "id": "snapload0"}} -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, -# "data": {"status": "running", "id": "snapload0"}} -# <- {"event": "STOP", -# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } -# <- {"event": "RESUME", -# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, -# "data": {"status": "waiting", "id": "snapload0"}} -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, -# "data": {"status": "pending", "id": "snapload0"}} -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, -# "data": {"status": "concluded", "id": "snapload0"}} -# -> {"execute": "query-jobs"} -# <- {"return": [{"current-progress": 1, -# "status": "concluded", -# "total-progress": 1, -# "type": "snapshot-load", -# "id": "snapload0"}]} +# -> { "execute": "snapshot-load", +# "arguments": { +# "job-id": "snapload0", +# "tag": "my-snap", +# "vmstate": "disk0", +# "devices": ["disk0", "disk1"] +# } +# } +# <- { "return": { } } +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1472124172, "microseconds": 744001}, +# "data": {"status": "created", "id": "snapload0"}} +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1472125172, "microseconds": 744001}, +# "data": {"status": "running", "id": "snapload0"}} +# <- {"event": "STOP", +# "timestamp": {"seconds": 1472125472, "microseconds": 744001} } +# <- {"event": "RESUME", +# "timestamp": {"seconds": 1472125872, "microseconds": 744001} } +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1472126172, "microseconds": 744001}, +# "data": {"status": "waiting", "id": "snapload0"}} +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1472127172, "microseconds": 744001}, +# "data": {"status": "pending", "id": "snapload0"}} +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1472128172, "microseconds": 744001}, +# "data": {"status": "concluded", "id": "snapload0"}} +# -> {"execute": "query-jobs"} +# <- {"return": [{"current-progress": 1, +# "status": "concluded", +# "total-progress": 1, +# "type": "snapshot-load", +# "id": "snapload0"}]} # # Since: 6.0 ## @@ -2543,39 +2584,37 @@ # to determine completion and to fetch details of any errors that # arise. # -# Returns: nothing -# # Example: # -# -> { "execute": "snapshot-delete", -# "arguments": { -# "job-id": "snapdelete0", -# "tag": "my-snap", -# "devices": ["disk0", "disk1"] -# } -# } -# <- { "return": { } } -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, -# "data": {"status": "created", "id": "snapdelete0"}} -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, -# "data": {"status": "running", "id": "snapdelete0"}} -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, -# "data": {"status": "waiting", "id": "snapdelete0"}} -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, -# "data": {"status": "pending", "id": "snapdelete0"}} -# <- {"event": "JOB_STATUS_CHANGE", -# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, -# "data": {"status": "concluded", "id": "snapdelete0"}} -# -> {"execute": "query-jobs"} -# <- {"return": [{"current-progress": 1, -# "status": "concluded", -# "total-progress": 1, -# "type": "snapshot-delete", -# "id": "snapdelete0"}]} +# -> { "execute": "snapshot-delete", +# "arguments": { +# "job-id": "snapdelete0", +# "tag": "my-snap", +# "devices": ["disk0", "disk1"] +# } +# } +# <- { "return": { } } +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1442124172, "microseconds": 744001}, +# "data": {"status": "created", "id": "snapdelete0"}} +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1442125172, "microseconds": 744001}, +# "data": {"status": "running", "id": "snapdelete0"}} +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1442126172, "microseconds": 744001}, +# "data": {"status": "waiting", "id": "snapdelete0"}} +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1442127172, "microseconds": 744001}, +# "data": {"status": "pending", "id": "snapdelete0"}} +# <- {"event": "JOB_STATUS_CHANGE", +# "timestamp": {"seconds": 1442128172, "microseconds": 744001}, +# "data": {"status": "concluded", "id": "snapdelete0"}} +# -> {"execute": "query-jobs"} +# <- {"return": [{"current-progress": 1, +# "status": "concluded", +# "total-progress": 1, +# "type": "snapshot-delete", +# "id": "snapdelete0"}]} # # Since: 6.0 ## diff --git a/qapi/misc-target.json b/qapi/misc-target.json index 88291453ba4..4e0a6492a9a 100644 --- a/qapi/misc-target.json +++ b/qapi/misc-target.json @@ -13,8 +13,8 @@ # # Example: # -# -> { "execute": "rtc-reset-reinjection" } -# <- { "return": {} } +# -> { "execute": "rtc-reset-reinjection" } +# <- { "return": {} } ## { 'command': 'rtc-reset-reinjection', 'if': 'TARGET_I386' } @@ -91,10 +91,10 @@ # # Example: # -# -> { "execute": "query-sev" } -# <- { "return": { "enabled": true, "api-major" : 0, "api-minor" : 0, -# "build-id" : 0, "policy" : 0, "state" : "running", -# "handle" : 1 } } +# -> { "execute": "query-sev" } +# <- { "return": { "enabled": true, "api-major" : 0, "api-minor" : 0, +# "build-id" : 0, "policy" : 0, "state" : "running", +# "handle" : 1 } } ## { 'command': 'query-sev', 'returns': 'SevInfo', 'if': 'TARGET_I386' } @@ -122,8 +122,8 @@ # # Example: # -# -> { "execute": "query-sev-launch-measure" } -# <- { "return": { "data": "4l8LXeNlSPUDlXPJG5966/8%YZ" } } +# -> { "execute": "query-sev-launch-measure" } +# <- { "return": { "data": "4l8LXeNlSPUDlXPJG5966/8%YZ" } } ## { 'command': 'query-sev-launch-measure', 'returns': 'SevLaunchMeasureInfo', 'if': 'TARGET_I386' } @@ -167,10 +167,10 @@ # # Example: # -# -> { "execute": "query-sev-capabilities" } -# <- { "return": { "pdh": "8CCDD8DDD", "cert-chain": "888CCCDDDEE", -# "cpu0-id": "2lvmGwo+...61iEinw==", -# "cbitpos": 47, "reduced-phys-bits": 1}} +# -> { "execute": "query-sev-capabilities" } +# <- { "return": { "pdh": "8CCDD8DDD", "cert-chain": "888CCCDDDEE", +# "cpu0-id": "2lvmGwo+...61iEinw==", +# "cbitpos": 47, "reduced-phys-bits": 1}} ## { 'command': 'query-sev-capabilities', 'returns': 'SevCapability', 'if': 'TARGET_I386' } @@ -221,9 +221,9 @@ # # Example: # -# -> { "execute" : "query-sev-attestation-report", -# "arguments": { "mnonce": "aaaaaaa" } } -# <- { "return" : { "data": "aaaaaaaabbbddddd"} } +# -> { "execute" : "query-sev-attestation-report", +# "arguments": { "mnonce": "aaaaaaa" } } +# <- { "return" : { "data": "aaaaaaaabbbddddd"} } ## { 'command': 'query-sev-attestation-report', 'data': { 'mnonce': 'str' }, @@ -237,15 +237,13 @@ # # @filename: the path to the file to dump to # -# This command is only supported on s390 architecture. -# # Since: 2.5 # # Example: # -# -> { "execute": "dump-skeys", -# "arguments": { "filename": "/tmp/skeys" } } -# <- { "return": {} } +# -> { "execute": "dump-skeys", +# "arguments": { "filename": "/tmp/skeys" } } +# <- { "return": {} } ## { 'command': 'dump-skeys', 'data': { 'filename': 'str' }, @@ -288,9 +286,9 @@ # # Example: # -# -> { "execute": "query-gic-capabilities" } -# <- { "return": [{ "version": 2, "emulated": true, "kernel": false }, -# { "version": 3, "emulated": false, "kernel": true } ] } +# -> { "execute": "query-gic-capabilities" } +# <- { "return": [{ "version": 2, "emulated": true, "kernel": false }, +# { "version": 3, "emulated": false, "kernel": true } ] } ## { 'command': 'query-gic-capabilities', 'returns': ['GICCapability'], 'if': 'TARGET_ARM' } @@ -346,11 +344,11 @@ # # Example: # -# -> { "execute": "query-sgx" } -# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, -# "flc": true, -# "sections": [{"node": 0, "size": 67108864}, -# {"node": 1, "size": 29360128}]} } +# -> { "execute": "query-sgx" } +# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, +# "flc": true, +# "sections": [{"node": 0, "size": 67108864}, +# {"node": 1, "size": 29360128}]} } ## { 'command': 'query-sgx', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } @@ -365,11 +363,11 @@ # # Example: # -# -> { "execute": "query-sgx-capabilities" } -# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, -# "flc": true, -# "section" : [{"node": 0, "size": 67108864}, -# {"node": 1, "size": 29360128}]} } +# -> { "execute": "query-sgx-capabilities" } +# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, +# "flc": true, +# "section" : [{"node": 0, "size": 67108864}, +# {"node": 1, "size": 29360128}]} } ## { 'command': 'query-sgx-capabilities', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } @@ -440,28 +438,28 @@ # # Example: # -# -> { "execute": "xen-event-list" } -# <- { "return": [ -# { -# "pending": false, -# "port": 1, -# "vcpu": 1, -# "remote-domain": "qemu", -# "masked": false, -# "type": "interdomain", -# "target": 1 -# }, -# { -# "pending": false, -# "port": 2, -# "vcpu": 0, -# "remote-domain": "", -# "masked": false, -# "type": "virq", -# "target": 0 -# } -# ] -# } +# -> { "execute": "xen-event-list" } +# <- { "return": [ +# { +# "pending": false, +# "port": 1, +# "vcpu": 1, +# "remote-domain": "qemu", +# "masked": false, +# "type": "interdomain", +# "target": 1 +# }, +# { +# "pending": false, +# "port": 2, +# "vcpu": 0, +# "remote-domain": "", +# "masked": false, +# "type": "virq", +# "target": 0 +# } +# ] +# } ## { 'command': 'xen-event-list', 'returns': ['EvtchnInfo'], @@ -474,15 +472,12 @@ # # @port: The port number # -# Returns: -# - Nothing on success. -# # Since: 8.0 # # Example: # -# -> { "execute": "xen-event-inject", "arguments": { "port": 1 } } -# <- { "return": { } } +# -> { "execute": "xen-event-inject", "arguments": { "port": 1 } } +# <- { "return": { } } ## { 'command': 'xen-event-inject', 'data': { 'port': 'uint32' }, diff --git a/qapi/misc.json b/qapi/misc.json index cda2effa815..ec30e5c570a 100644 --- a/qapi/misc.json +++ b/qapi/misc.json @@ -28,15 +28,13 @@ # # @tls: whether to perform TLS. Only applies to the "spice" protocol # -# Returns: nothing on success. -# # Since: 0.14 # # Example: # -# -> { "execute": "add_client", "arguments": { "protocol": "vnc", -# "fdname": "myclient" } } -# <- { "return": {} } +# -> { "execute": "add_client", "arguments": { "protocol": "vnc", +# "fdname": "myclient" } } +# <- { "return": {} } ## { 'command': 'add_client', 'data': { 'protocol': 'str', 'fdname': 'str', '*skipauth': 'bool', @@ -64,8 +62,8 @@ # # Example: # -# -> { "execute": "query-name" } -# <- { "return": { "name": "qemu-name" } } +# -> { "execute": "query-name" } +# <- { "return": { "name": "qemu-name" } } ## { 'command': 'query-name', 'returns': 'NameInfo', 'allow-preconfig': true } @@ -115,18 +113,18 @@ # # Example: # -# -> { "execute": "query-iothreads" } -# <- { "return": [ -# { -# "id":"iothread0", -# "thread-id":3134 -# }, -# { -# "id":"iothread1", -# "thread-id":3135 -# } -# ] -# } +# -> { "execute": "query-iothreads" } +# <- { "return": [ +# { +# "id":"iothread0", +# "thread-id":3134 +# }, +# { +# "id":"iothread1", +# "thread-id":3135 +# } +# ] +# } ## { 'command': 'query-iothreads', 'returns': ['IOThreadInfo'], 'allow-preconfig': true } @@ -134,7 +132,7 @@ ## # @stop: # -# Stop all guest VCPU execution. +# Stop guest VM execution. # # Since: 0.14 # @@ -143,32 +141,37 @@ # the guest remains paused once migration finishes, as if the -S # option was passed on the command line. # +# In the "suspended" state, it will completely stop the VM and +# cause a transition to the "paused" state. (Since 9.0) +# # Example: # -# -> { "execute": "stop" } -# <- { "return": {} } +# -> { "execute": "stop" } +# <- { "return": {} } ## { 'command': 'stop' } ## # @cont: # -# Resume guest VCPU execution. +# Resume guest VM execution. # # Since: 0.14 # -# Returns: If successful, nothing -# # Notes: This command will succeed if the guest is currently running. # It will also succeed if the guest is in the "inmigrate" state; # in this case, the effect of the command is to make sure the # guest starts once migration finishes, removing the effect of the # -S command line option if it was passed. # +# If the VM was previously suspended, and not been reset or woken, +# this command will transition back to the "suspended" state. +# (Since 9.0) +# # Example: # -# -> { "execute": "cont" } -# <- { "return": {} } +# -> { "execute": "cont" } +# <- { "return": {} } ## { 'command': 'cont' } @@ -189,12 +192,10 @@ # # Since: 3.0 # -# Returns: nothing -# # Example: # -# -> { "execute": "x-exit-preconfig" } -# <- { "return": {} } +# -> { "execute": "x-exit-preconfig" } +# <- { "return": {} } ## { 'command': 'x-exit-preconfig', 'allow-preconfig': true, 'features': [ 'unstable' ] } @@ -233,9 +234,9 @@ # # Example: # -# -> { "execute": "human-monitor-command", -# "arguments": { "command-line": "info kvm" } } -# <- { "return": "kvm support: enabled\r\n" } +# -> { "execute": "human-monitor-command", +# "arguments": { "command-line": "info kvm" } } +# <- { "return": "kvm support: enabled\r\n" } ## { 'command': 'human-monitor-command', 'data': {'command-line': 'str', '*cpu-index': 'int'}, @@ -249,8 +250,6 @@ # # @fdname: file descriptor name # -# Returns: Nothing on success -# # Since: 0.14 # # Notes: If @fdname already exists, the file descriptor assigned to it @@ -261,8 +260,8 @@ # # Example: # -# -> { "execute": "getfd", "arguments": { "fdname": "fd1" } } -# <- { "return": {} } +# -> { "execute": "getfd", "arguments": { "fdname": "fd1" } } +# <- { "return": {} } ## { 'command': 'getfd', 'data': {'fdname': 'str'}, 'if': 'CONFIG_POSIX' } @@ -278,8 +277,6 @@ # # @fdname: file descriptor name # -# Returns: Nothing on success -# # Since: 8.0 # # Notes: If @fdname already exists, the file descriptor assigned to it @@ -290,8 +287,8 @@ # # Example: # -# -> { "execute": "get-win32-socket", "arguments": { "info": "abcd123..", fdname": "skclient" } } -# <- { "return": {} } +# -> { "execute": "get-win32-socket", "arguments": { "info": "abcd123..", fdname": "skclient" } } +# <- { "return": {} } ## { 'command': 'get-win32-socket', 'data': {'info': 'str', 'fdname': 'str'}, 'if': 'CONFIG_WIN32' } @@ -302,14 +299,12 @@ # # @fdname: file descriptor name # -# Returns: Nothing on success -# # Since: 0.14 # # Example: # -# -> { "execute": "closefd", "arguments": { "fdname": "fd1" } } -# <- { "return": {} } +# -> { "execute": "closefd", "arguments": { "fdname": "fd1" } } +# <- { "return": {} } ## { 'command': 'closefd', 'data': {'fdname': 'str'} } @@ -337,20 +332,23 @@ # @opaque: A free-form string that can be used to describe the fd. # # Returns: -# - @AddfdInfo on success -# - If file descriptor was not received, GenericError -# - If @fdset-id is a negative value, GenericError +# @AddfdInfo # -# Notes: The list of fd sets is shared by all monitor connections. +# Errors: +# - If file descriptor was not received, GenericError +# - If @fdset-id is a negative value, GenericError # -# If @fdset-id is not specified, a new fd set will be created. +# Notes: +# The list of fd sets is shared by all monitor connections. +# +# If @fdset-id is not specified, a new fd set will be created. # # Since: 1.2 # # Example: # -# -> { "execute": "add-fd", "arguments": { "fdset-id": 1 } } -# <- { "return": { "fdset-id": 1, "fd": 3 } } +# -> { "execute": "add-fd", "arguments": { "fdset-id": 1 } } +# <- { "return": { "fdset-id": 1, "fd": 3 } } ## { 'command': 'add-fd', 'data': { '*fdset-id': 'int', @@ -366,21 +364,21 @@ # # @fd: The file descriptor that is to be removed. # -# Returns: -# - Nothing on success -# - If @fdset-id or @fd is not found, GenericError +# Errors: +# - If @fdset-id or @fd is not found, GenericError # # Since: 1.2 # -# Notes: The list of fd sets is shared by all monitor connections. +# Notes: +# The list of fd sets is shared by all monitor connections. # -# If @fd is not specified, all file descriptors in @fdset-id will be -# removed. +# If @fd is not specified, all file descriptors in @fdset-id will +# be removed. # # Example: # -# -> { "execute": "remove-fd", "arguments": { "fdset-id": 1, "fd": 3 } } -# <- { "return": {} } +# -> { "execute": "remove-fd", "arguments": { "fdset-id": 1, "fd": 3 } } +# <- { "return": {} } ## { 'command': 'remove-fd', 'data': {'fdset-id': 'int', '*fd': 'int'} } @@ -425,34 +423,34 @@ # # Example: # -# -> { "execute": "query-fdsets" } -# <- { "return": [ -# { -# "fds": [ -# { -# "fd": 30, -# "opaque": "rdonly:/path/to/file" -# }, +# -> { "execute": "query-fdsets" } +# <- { "return": [ # { -# "fd": 24, -# "opaque": "rdwr:/path/to/file" -# } -# ], -# "fdset-id": 1 -# }, -# { -# "fds": [ -# { -# "fd": 28 +# "fds": [ +# { +# "fd": 30, +# "opaque": "rdonly:/path/to/file" +# }, +# { +# "fd": 24, +# "opaque": "rdwr:/path/to/file" +# } +# ], +# "fdset-id": 1 # }, # { -# "fd": 29 +# "fds": [ +# { +# "fd": 28 +# }, +# { +# "fd": 29 +# } +# ], +# "fdset-id": 0 # } -# ], -# "fdset-id": 0 +# ] # } -# ] -# } ## { 'command': 'query-fdsets', 'returns': ['FdsetInfo'] } @@ -519,31 +517,33 @@ # @option: option name # # Returns: list of @CommandLineOptionInfo for all options (or for the -# given @option). Returns an error if the given @option doesn't -# exist. +# given @option). +# +# Errors: +# - if the given @option doesn't exist # # Since: 1.5 # # Example: # -# -> { "execute": "query-command-line-options", -# "arguments": { "option": "option-rom" } } -# <- { "return": [ -# { -# "parameters": [ -# { -# "name": "romfile", -# "type": "string" -# }, -# { -# "name": "bootindex", -# "type": "number" -# } -# ], -# "option": "option-rom" -# } -# ] -# } +# -> { "execute": "query-command-line-options", +# "arguments": { "option": "option-rom" } } +# <- { "return": [ +# { +# "parameters": [ +# { +# "name": "romfile", +# "type": "string" +# }, +# { +# "name": "bootindex", +# "type": "number" +# } +# ], +# "option": "option-rom" +# } +# ] +# } ## {'command': 'query-command-line-options', 'data': {'*option': 'str'}, @@ -568,9 +568,9 @@ # # Example: # -# <- { "event": "RTC_CHANGE", -# "data": { "offset": 78 }, -# "timestamp": { "seconds": 1267020223, "microseconds": 435656 } } +# <- { "event": "RTC_CHANGE", +# "data": { "offset": 78 }, +# "timestamp": { "seconds": 1267020223, "microseconds": 435656 } } ## { 'event': 'RTC_CHANGE', 'data': { 'offset': 'int', 'qom-path': 'str' } } @@ -595,12 +595,12 @@ # # Example: # -# <- { "event": "VFU_CLIENT_HANGUP", -# "data": { "vfu-id": "vfu1", -# "vfu-qom-path": "/objects/vfu1", -# "dev-id": "sas1", -# "dev-qom-path": "/machine/peripheral/sas1" }, -# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# <- { "event": "VFU_CLIENT_HANGUP", +# "data": { "vfu-id": "vfu1", +# "vfu-qom-path": "/objects/vfu1", +# "dev-id": "sas1", +# "dev-qom-path": "/machine/peripheral/sas1" }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'VFU_CLIENT_HANGUP', 'data': { 'vfu-id': 'str', 'vfu-qom-path': 'str', diff --git a/qapi/net.json b/qapi/net.json index 8095b68fa83..0f5a259475e 100644 --- a/qapi/net.json +++ b/qapi/net.json @@ -6,7 +6,6 @@ # = Net devices ## -{ 'include': 'common.json' } { 'include': 'sockets.json' } ## @@ -18,8 +17,8 @@ # # @up: true to set the link status to be up # -# Returns: Nothing on success If @name is not a valid network device, -# DeviceNotFound +# Errors: +# - If @name is not a valid network device, DeviceNotFound # # Since: 0.14 # @@ -29,9 +28,9 @@ # # Example: # -# -> { "execute": "set_link", -# "arguments": { "name": "e1000.0", "up": false } } -# <- { "return": {} } +# -> { "execute": "set_link", +# "arguments": { "name": "e1000.0", "up": false } } +# <- { "return": {} } ## { 'command': 'set_link', 'data': {'name': 'str', 'up': 'bool'} } @@ -44,15 +43,15 @@ # # Since: 0.14 # -# Returns: Nothing on success If @type is not a valid network backend, -# DeviceNotFound +# Errors: +# - If @type is not a valid network backend, DeviceNotFound # # Example: # -# -> { "execute": "netdev_add", -# "arguments": { "type": "user", "id": "netdev1", -# "dnssearch": [ { "str": "example.org" } ] } } -# <- { "return": {} } +# -> { "execute": "netdev_add", +# "arguments": { "type": "user", "id": "netdev1", +# "dnssearch": [ { "str": "example.org" } ] } } +# <- { "return": {} } ## { 'command': 'netdev_add', 'data': 'Netdev', 'boxed': true, 'allow-preconfig': true } @@ -64,15 +63,15 @@ # # @id: the name of the network backend to remove # -# Returns: Nothing on success If @id is not a valid network backend, -# DeviceNotFound +# Errors: +# - If @id is not a valid network backend, DeviceNotFound # # Since: 0.14 # # Example: # -# -> { "execute": "netdev_del", "arguments": { "id": "netdev1" } } -# <- { "return": {} } +# -> { "execute": "netdev_del", "arguments": { "id": "netdev1" } } +# <- { "return": {} } ## { 'command': 'netdev_del', 'data': {'id': 'str'}, 'allow-preconfig': true } @@ -102,6 +101,17 @@ '*addr': 'str', '*vectors': 'uint32' } } +## +# @String: +# +# A fat type wrapping 'str', to be embedded in lists. +# +# Since: 1.2 +## +{ 'struct': 'String', + 'data': { + 'str': 'str' } } + ## # @NetdevUserOptions: # @@ -415,8 +425,8 @@ # # @skb: generic mode, no driver support necessary # -# @native: DRV mode, program is attached to a driver, packets are passed to -# the socket without allocation of skb. +# @native: DRV mode, program is attached to a driver, packets are +# passed to the socket without allocation of skb. # # Since: 8.2 ## @@ -431,23 +441,26 @@ # # @ifname: The name of an existing network interface. # -# @mode: Attach mode for a default XDP program. If not specified, then -# 'native' will be tried first, then 'skb'. +# @mode: Attach mode for a default XDP program. If not specified, +# then 'native' will be tried first, then 'skb'. # # @force-copy: Force XDP copy mode even if device supports zero-copy. # (default: false) # -# @queues: number of queues to be used for multiqueue interfaces (default: 1). +# @queues: number of queues to be used for multiqueue interfaces +# (default: 1). # -# @start-queue: Use @queues starting from this queue number (default: 0). +# @start-queue: Use @queues starting from this queue number +# (default: 0). # -# @inhibit: Don't load a default XDP program, use one already loaded to -# the interface (default: false). Requires @sock-fds. +# @inhibit: Don't load a default XDP program, use one already loaded +# to the interface (default: false). Requires @sock-fds. # -# @sock-fds: A colon (:) separated list of file descriptors for already open -# but not bound AF_XDP sockets in the queue order. One fd per queue. -# These descriptors should already be added into XDP socket map for -# corresponding queues. Requires @inhibit. +# @sock-fds: A colon (:) separated list of file descriptors for +# already open but not bound AF_XDP sockets in the queue order. +# One fd per queue. These descriptors should already be added +# into XDP socket map for corresponding queues. Requires +# @inhibit. # # Since: 8.2 ## @@ -815,40 +828,42 @@ # @name: net client name # # Returns: list of @RxFilterInfo for all NICs (or for the given NIC). -# Returns an error if the given @name doesn't exist, or given NIC -# doesn't support rx-filter querying, or given net client isn't a -# NIC. +# +# Errors: +# - if the given @name doesn't exist +# - if the given NIC doesn't support rx-filter querying +# - if the given net client isn't a NIC # # Since: 1.6 # # Example: # -# -> { "execute": "query-rx-filter", "arguments": { "name": "vnet0" } } -# <- { "return": [ -# { -# "promiscuous": true, -# "name": "vnet0", -# "main-mac": "52:54:00:12:34:56", -# "unicast": "normal", -# "vlan": "normal", -# "vlan-table": [ -# 4, -# 0 -# ], -# "unicast-table": [ -# ], -# "multicast": "normal", -# "multicast-overflow": false, -# "unicast-overflow": false, -# "multicast-table": [ -# "01:00:5e:00:00:01", -# "33:33:00:00:00:01", -# "33:33:ff:12:34:56" -# ], -# "broadcast-allowed": false -# } -# ] -# } +# -> { "execute": "query-rx-filter", "arguments": { "name": "vnet0" } } +# <- { "return": [ +# { +# "promiscuous": true, +# "name": "vnet0", +# "main-mac": "52:54:00:12:34:56", +# "unicast": "normal", +# "vlan": "normal", +# "vlan-table": [ +# 4, +# 0 +# ], +# "unicast-table": [ +# ], +# "multicast": "normal", +# "multicast-overflow": false, +# "unicast-overflow": false, +# "multicast-table": [ +# "01:00:5e:00:00:01", +# "33:33:00:00:00:01", +# "33:33:ff:12:34:56" +# ], +# "broadcast-allowed": false +# } +# ] +# } ## { 'command': 'query-rx-filter', 'data': { '*name': 'str' }, @@ -868,10 +883,10 @@ # # Example: # -# <- { "event": "NIC_RX_FILTER_CHANGED", -# "data": { "name": "vnet0", -# "path": "/machine/peripheral/vnet0/virtio-backend" }, -# "timestamp": { "seconds": 1368697518, "microseconds": 326866 } } +# <- { "event": "NIC_RX_FILTER_CHANGED", +# "data": { "name": "vnet0", +# "path": "/machine/peripheral/vnet0/virtio-backend" }, +# "timestamp": { "seconds": 1368697518, "microseconds": 326866 } } ## { 'event': 'NIC_RX_FILTER_CHANGED', 'data': { '*name': 'str', 'path': 'str' } } @@ -917,11 +932,11 @@ # # Example: # -# -> { "execute": "announce-self", -# "arguments": { -# "initial": 50, "max": 550, "rounds": 10, "step": 50, -# "interfaces": ["vn2", "vn3"], "id": "bob" } } -# <- { "return": {} } +# -> { "execute": "announce-self", +# "arguments": { +# "initial": 50, "max": 550, "rounds": 10, "step": 50, +# "interfaces": ["vn2", "vn3"], "id": "bob" } } +# <- { "return": {} } # # Since: 4.0 ## @@ -942,9 +957,9 @@ # # Example: # -# <- { "event": "FAILOVER_NEGOTIATED", -# "data": { "device-id": "net1" }, -# "timestamp": { "seconds": 1368697518, "microseconds": 326866 } } +# <- { "event": "FAILOVER_NEGOTIATED", +# "data": { "device-id": "net1" }, +# "timestamp": { "seconds": 1368697518, "microseconds": 326866 } } ## { 'event': 'FAILOVER_NEGOTIATED', 'data': {'device-id': 'str'} } @@ -962,16 +977,16 @@ # # Examples: # -# <- { "event": "NETDEV_STREAM_CONNECTED", -# "data": { "netdev-id": "netdev0", -# "addr": { "port": "47666", "ipv6": true, -# "host": "::1", "type": "inet" } }, -# "timestamp": { "seconds": 1666269863, "microseconds": 311222 } } +# <- { "event": "NETDEV_STREAM_CONNECTED", +# "data": { "netdev-id": "netdev0", +# "addr": { "port": "47666", "ipv6": true, +# "host": "::1", "type": "inet" } }, +# "timestamp": { "seconds": 1666269863, "microseconds": 311222 } } # -# <- { "event": "NETDEV_STREAM_CONNECTED", -# "data": { "netdev-id": "netdev0", -# "addr": { "path": "/tmp/qemu0", "type": "unix" } }, -# "timestamp": { "seconds": 1666269706, "microseconds": 413651 } } +# <- { "event": "NETDEV_STREAM_CONNECTED", +# "data": { "netdev-id": "netdev0", +# "addr": { "path": "/tmp/qemu0", "type": "unix" } }, +# "timestamp": { "seconds": 1666269706, "microseconds": 413651 } } ## { 'event': 'NETDEV_STREAM_CONNECTED', 'data': { 'netdev-id': 'str', @@ -988,9 +1003,9 @@ # # Example: # -# <- { 'event': 'NETDEV_STREAM_DISCONNECTED', -# 'data': {'netdev-id': 'netdev0'}, -# 'timestamp': {'seconds': 1663330937, 'microseconds': 526695} } +# <- { 'event': 'NETDEV_STREAM_DISCONNECTED', +# 'data': {'netdev-id': 'netdev0'}, +# 'timestamp': {'seconds': 1663330937, 'microseconds': 526695} } ## { 'event': 'NETDEV_STREAM_DISCONNECTED', 'data': { 'netdev-id': 'str' } } diff --git a/qapi/pci.json b/qapi/pci.json index 086c7730528..08bf6958634 100644 --- a/qapi/pci.json +++ b/qapi/pci.json @@ -184,132 +184,132 @@ # # Example: # -# -> { "execute": "query-pci" } -# <- { "return": [ -# { -# "bus": 0, -# "devices": [ -# { -# "bus": 0, -# "qdev_id": "", -# "slot": 0, -# "class_info": { -# "class": 1536, -# "desc": "Host bridge" -# }, -# "id": { -# "device": 32902, -# "vendor": 4663 -# }, -# "function": 0, -# "regions": [ -# ] -# }, -# { -# "bus": 0, -# "qdev_id": "", -# "slot": 1, -# "class_info": { -# "class": 1537, -# "desc": "ISA bridge" -# }, -# "id": { -# "device": 32902, -# "vendor": 28672 -# }, -# "function": 0, -# "regions": [ -# ] -# }, -# { -# "bus": 0, -# "qdev_id": "", -# "slot": 1, -# "class_info": { -# "class": 257, -# "desc": "IDE controller" -# }, -# "id": { -# "device": 32902, -# "vendor": 28688 -# }, -# "function": 1, -# "regions": [ -# { -# "bar": 4, -# "size": 16, -# "address": 49152, -# "type": "io" -# } -# ] -# }, -# { -# "bus": 0, -# "qdev_id": "", -# "slot": 2, -# "class_info": { -# "class": 768, -# "desc": "VGA controller" -# }, -# "id": { -# "device": 4115, -# "vendor": 184 -# }, -# "function": 0, -# "regions": [ -# { -# "prefetch": true, -# "mem_type_64": false, -# "bar": 0, -# "size": 33554432, -# "address": 4026531840, -# "type": "memory" -# }, -# { -# "prefetch": false, -# "mem_type_64": false, -# "bar": 1, -# "size": 4096, -# "address": 4060086272, -# "type": "memory" -# }, -# { -# "prefetch": false, -# "mem_type_64": false, -# "bar": 6, -# "size": 65536, -# "address": -1, -# "type": "memory" -# } -# ] -# }, -# { -# "bus": 0, -# "qdev_id": "", -# "irq": 11, -# "slot": 4, -# "class_info": { -# "class": 1280, -# "desc": "RAM controller" -# }, -# "id": { -# "device": 6900, -# "vendor": 4098 -# }, -# "function": 0, -# "regions": [ -# { -# "bar": 0, -# "size": 32, -# "address": 49280, -# "type": "io" -# } -# ] -# } -# ] -# } -# ] -# } +# -> { "execute": "query-pci" } +# <- { "return": [ +# { +# "bus": 0, +# "devices": [ +# { +# "bus": 0, +# "qdev_id": "", +# "slot": 0, +# "class_info": { +# "class": 1536, +# "desc": "Host bridge" +# }, +# "id": { +# "device": 32902, +# "vendor": 4663 +# }, +# "function": 0, +# "regions": [ +# ] +# }, +# { +# "bus": 0, +# "qdev_id": "", +# "slot": 1, +# "class_info": { +# "class": 1537, +# "desc": "ISA bridge" +# }, +# "id": { +# "device": 32902, +# "vendor": 28672 +# }, +# "function": 0, +# "regions": [ +# ] +# }, +# { +# "bus": 0, +# "qdev_id": "", +# "slot": 1, +# "class_info": { +# "class": 257, +# "desc": "IDE controller" +# }, +# "id": { +# "device": 32902, +# "vendor": 28688 +# }, +# "function": 1, +# "regions": [ +# { +# "bar": 4, +# "size": 16, +# "address": 49152, +# "type": "io" +# } +# ] +# }, +# { +# "bus": 0, +# "qdev_id": "", +# "slot": 2, +# "class_info": { +# "class": 768, +# "desc": "VGA controller" +# }, +# "id": { +# "device": 4115, +# "vendor": 184 +# }, +# "function": 0, +# "regions": [ +# { +# "prefetch": true, +# "mem_type_64": false, +# "bar": 0, +# "size": 33554432, +# "address": 4026531840, +# "type": "memory" +# }, +# { +# "prefetch": false, +# "mem_type_64": false, +# "bar": 1, +# "size": 4096, +# "address": 4060086272, +# "type": "memory" +# }, +# { +# "prefetch": false, +# "mem_type_64": false, +# "bar": 6, +# "size": 65536, +# "address": -1, +# "type": "memory" +# } +# ] +# }, +# { +# "bus": 0, +# "qdev_id": "", +# "irq": 11, +# "slot": 4, +# "class_info": { +# "class": 1280, +# "desc": "RAM controller" +# }, +# "id": { +# "device": 6900, +# "vendor": 4098 +# }, +# "function": 0, +# "regions": [ +# { +# "bar": 0, +# "size": 32, +# "address": 49280, +# "type": "io" +# } +# ] +# } +# ] +# } +# ] +# } # # Note: This example has been shortened as the real response is too # long. diff --git a/qapi/pragma.json b/qapi/pragma.json index 0aa4eeddd38..59fbe74b8ce 100644 --- a/qapi/pragma.json +++ b/qapi/pragma.json @@ -31,6 +31,61 @@ 'query-tpm-models', 'query-tpm-types', 'ringbuf-read' ], + # Types, commands, and events with undocumented members / arguments: + 'documentation-exceptions': [ + 'AbortWrapper', + 'AudiodevDriver', + 'BlkdebugEvent', + 'BlockDirtyBitmapAddWrapper', + 'BlockDirtyBitmapMergeWrapper', + 'BlockDirtyBitmapWrapper', + 'BlockdevBackupWrapper', + 'BlockdevDriver', + 'BlockdevQcow2EncryptionFormat', + 'BlockdevSnapshotInternalWrapper', + 'BlockdevSnapshotSyncWrapper', + 'BlockdevSnapshotWrapper', + 'BlockdevVmdkAdapterType', + 'ChardevBackendKind', + 'CpuS390Entitlement', + 'CpuS390Polarization', + 'CpuS390State', + 'CxlCorErrorType', + 'DisplayProtocol', + 'DriveBackupWrapper', + 'DummyBlockCoreForceArrays', + 'DummyForceArrays', + 'DummyVirtioForceArrays', + 'GrabToggleKeys', + 'HotKeyMod', + 'ImageInfoSpecificKind', + 'InputAxis', + 'InputButton', + 'IscsiHeaderDigest', + 'IscsiTransport', + 'JSONType', + 'KeyValueKind', + 'MemoryDeviceInfoKind', + 'NetClientDriver', + 'ObjectType', + 'PciMemoryRegion', + 'QCryptoAkCipherKeyType', + 'QCryptodevBackendServiceType', + 'QKeyCode', + 'RbdAuthMode', + 'RbdImageEncryptionFormat', + 'String', + 'StringWrapper', + 'SysEmuTarget', + 'ThrottleGroupProperties', + 'VncPrimaryAuth', + 'VncVencryptSubAuth', + 'X86CPURegister32', + 'XDbgBlockGraph', + 'YankInstanceType', + 'blockdev-reopen', + 'query-rocker', + 'query-rocker-ports' ], # Externally visible types whose member names may use uppercase 'member-name-exceptions': [ # visible in: 'ACPISlotType', # query-acpi-ospm-status diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json index c01ec335e68..8304d456258 100644 --- a/qapi/qapi-schema.json +++ b/qapi/qapi-schema.json @@ -53,6 +53,7 @@ { 'include': 'char.json' } { 'include': 'dump.json' } { 'include': 'net.json' } +{ 'include': 'ebpf.json' } { 'include': 'rdma.json' } { 'include': 'rocker.json' } { 'include': 'tpm.json' } diff --git a/qapi/qapi-type-helpers.c b/qapi/qapi-type-helpers.c index f76b34f647e..266da013ad6 100644 --- a/qapi/qapi-type-helpers.c +++ b/qapi/qapi-type-helpers.c @@ -21,3 +21,17 @@ HumanReadableText *human_readable_text_from_str(GString *str) return ret; } + +char **strv_from_str_list(const strList *list) +{ + const strList *tail; + int i = 0; + char **strv = g_new(char *, QAPI_LIST_LENGTH(list) + 1); + + for (tail = list; tail != NULL; tail = tail->next) { + strv[i++] = g_strdup(tail->value); + } + strv[i] = NULL; + + return strv; +} diff --git a/qapi/qapi-util.c b/qapi/qapi-util.c index 63596e11c56..65a7d184372 100644 --- a/qapi/qapi-util.c +++ b/qapi/qapi-util.c @@ -112,7 +112,7 @@ bool qapi_bool_parse(const char *name, const char *value, bool *obj, Error **err * It may be prefixed by __RFQDN_ (downstream extension), where RFQDN * may contain only letters, digits, hyphen and period. * The special exception for enumeration names is not implemented. - * See docs/devel/qapi-code-gen.txt for more on QAPI naming rules. + * See docs/devel/qapi-code-gen.rst for more on QAPI naming rules. * Keep this consistent with scripts/qapi-gen.py! * If @complete, the parse fails unless it consumes @str completely. * Return its length on success, -1 on failure. diff --git a/qapi/qdev.json b/qapi/qdev.json index 6bc5a733b86..facaa0bc6a2 100644 --- a/qapi/qdev.json +++ b/qapi/qdev.json @@ -53,22 +53,22 @@ # # Notes: # -# 1. Additional arguments depend on the type. +# 1. Additional arguments depend on the type. # -# 2. For detailed information about this command, please refer to the -# 'docs/qdev-device-use.txt' file. +# 2. For detailed information about this command, please refer to +# the 'docs/qdev-device-use.txt' file. # -# 3. It's possible to list device properties by running QEMU with the -# "-device DEVICE,help" command-line argument, where DEVICE is the -# device's name +# 3. It's possible to list device properties by running QEMU with +# the "-device DEVICE,help" command-line argument, where DEVICE +# is the device's name # # Example: # -# -> { "execute": "device_add", -# "arguments": { "driver": "e1000", "id": "net1", -# "bus": "pci.0", -# "mac": "52:54:00:12:34:56" } } -# <- { "return": {} } +# -> { "execute": "device_add", +# "arguments": { "driver": "e1000", "id": "net1", +# "bus": "pci.0", +# "mac": "52:54:00:12:34:56" } } +# <- { "return": {} } # # TODO: This command effectively bypasses QAPI completely due to its # "additional arguments" business. It shouldn't have been added @@ -89,8 +89,8 @@ # # @id: the device's ID or QOM path # -# Returns: Nothing on success If @id is not a valid device, -# DeviceNotFound +# Errors: +# - If @id is not a valid device, DeviceNotFound # # Notes: When this command completes, the device may not be removed # from the guest. Hot removal is an operation that requires guest @@ -106,13 +106,13 @@ # # Examples: # -# -> { "execute": "device_del", -# "arguments": { "id": "net1" } } -# <- { "return": {} } +# -> { "execute": "device_del", +# "arguments": { "id": "net1" } } +# <- { "return": {} } # -# -> { "execute": "device_del", -# "arguments": { "id": "/machine/peripheral-anon/device[0]" } } -# <- { "return": {} } +# -> { "execute": "device_del", +# "arguments": { "id": "/machine/peripheral-anon/device[0]" } } +# <- { "return": {} } ## { 'command': 'device_del', 'data': {'id': 'str'} } @@ -132,10 +132,10 @@ # # Example: # -# <- { "event": "DEVICE_DELETED", -# "data": { "device": "virtio-net-pci-0", -# "path": "/machine/peripheral/virtio-net-pci-0" }, -# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } +# <- { "event": "DEVICE_DELETED", +# "data": { "device": "virtio-net-pci-0", +# "path": "/machine/peripheral/virtio-net-pci-0" }, +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'DEVICE_DELETED', 'data': { '*device': 'str', 'path': 'str' } } @@ -154,10 +154,10 @@ # # Example: # -# <- { "event": "DEVICE_UNPLUG_GUEST_ERROR", -# "data": { "device": "core1", -# "path": "/machine/peripheral/core1" }, -# "timestamp": { "seconds": 1615570772, "microseconds": 202844 } } +# <- { "event": "DEVICE_UNPLUG_GUEST_ERROR", +# "data": { "device": "core1", +# "path": "/machine/peripheral/core1" }, +# "timestamp": { "seconds": 1615570772, "microseconds": 202844 } } ## { 'event': 'DEVICE_UNPLUG_GUEST_ERROR', 'data': { '*device': 'str', 'path': 'str' } } diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c index 176b549473c..f3488afeef7 100644 --- a/qapi/qmp-dispatch.c +++ b/qapi/qmp-dispatch.c @@ -212,8 +212,7 @@ QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *requ * executing the command handler so that it can make progress if it * involves an AIO_WAIT_WHILE(). */ - aio_co_schedule(qemu_get_aio_context(), qemu_coroutine_self()); - qemu_coroutine_yield(); + aio_co_reschedule_self(qemu_get_aio_context()); } monitor_set_cur(qemu_coroutine_self(), cur_mon); @@ -227,9 +226,7 @@ QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *requ * Move back to iohandler_ctx so that nested event loops for * qemu_aio_context don't start new monitor commands. */ - aio_co_schedule(iohandler_get_aio_context(), - qemu_coroutine_self()); - qemu_coroutine_yield(); + aio_co_reschedule_self(iohandler_get_aio_context()); } } else { /* diff --git a/qapi/qom.json b/qapi/qom.json index c53ef978ff7..85e6b4f84a2 100644 --- a/qapi/qom.json +++ b/qapi/qom.json @@ -61,12 +61,12 @@ # # Example: # -# -> { "execute": "qom-list", -# "arguments": { "path": "/chardevs" } } -# <- { "return": [ { "name": "type", "type": "string" }, -# { "name": "parallel0", "type": "child" }, -# { "name": "serial0", "type": "child" }, -# { "name": "mon0", "type": "child" } ] } +# -> { "execute": "qom-list", +# "arguments": { "path": "/chardevs" } } +# <- { "return": [ { "name": "type", "type": "string" }, +# { "name": "parallel0", "type": "child" }, +# { "name": "serial0", "type": "child" }, +# { "name": "mon0", "type": "child" } ] } ## { 'command': 'qom-list', 'data': { 'path': 'str' }, @@ -106,19 +106,19 @@ # # Examples: # -# 1. Use absolute path +# 1. Use absolute path # -# -> { "execute": "qom-get", -# "arguments": { "path": "/machine/unattached/device[0]", -# "property": "hotplugged" } } -# <- { "return": false } +# -> { "execute": "qom-get", +# "arguments": { "path": "/machine/unattached/device[0]", +# "property": "hotplugged" } } +# <- { "return": false } # -# 2. Use partial path +# 2. Use partial path # -# -> { "execute": "qom-get", -# "arguments": { "path": "unattached/sysbus", -# "property": "type" } } -# <- { "return": "System" } +# -> { "execute": "qom-get", +# "arguments": { "path": "unattached/sysbus", +# "property": "type" } } +# <- { "return": "System" } ## { 'command': 'qom-get', 'data': { 'path': 'str', 'property': 'str' }, @@ -141,11 +141,11 @@ # # Example: # -# -> { "execute": "qom-set", -# "arguments": { "path": "/machine", -# "property": "graphics", -# "value": false } } -# <- { "return": {} } +# -> { "execute": "qom-set", +# "arguments": { "path": "/machine", +# "property": "graphics", +# "value": false } } +# <- { "return": {} } ## { 'command': 'qom-set', 'data': { 'path': 'str', 'property': 'str', 'value': 'any' }, @@ -649,14 +649,14 @@ # # @offset: the offset into the target file that the region starts at. # You can use this option to back multiple regions with a single -# file. Must be a multiple of the page size. +# file. Must be a multiple of the page size. # (default: 0) (since 8.1) # # @discard-data: if true, the file contents can be destroyed when QEMU # exits, to avoid unnecessarily flushing data to the backing file. # Note that @discard-data is only an optimization, and QEMU might # not discard file contents if it aborts unexpectedly or is -# terminated using SIGKILL. (default: false) +# terminated using SIGKILL. (default: false) # # @mem-path: the path to either a shared memory or huge page # filesystem mount @@ -668,19 +668,20 @@ # @readonly: if true, the backing file is opened read-only; if false, # it is opened read-write. (default: false) # -# @rom: whether to create Read Only Memory (ROM) that cannot be modified -# by the VM. Any write attempts to such ROM will be denied. Most -# use cases want writable RAM instead of ROM. However, selected use -# cases, like R/O NVDIMMs, can benefit from ROM. If set to 'on', -# create ROM; if set to 'off', create writable RAM; if set to -# 'auto', the value of the @readonly property is used. This -# property is primarily helpful when we want to have proper RAM in -# configurations that would traditionally create ROM before this -# property was introduced: VM templating, where we want to open a -# file readonly (@readonly set to true) and mark the memory to be -# private for QEMU (@share set to false). For this use case, we need -# writable RAM instead of ROM, and want to set this property to 'off'. -# (default: auto, since 8.2) +# @rom: whether to create Read Only Memory (ROM) that cannot be +# modified by the VM. Any write attempts to such ROM will be +# denied. Most use cases want writable RAM instead of ROM. +# However, selected use cases, like R/O NVDIMMs, can benefit from +# ROM. If set to 'on', create ROM; if set to 'off', create +# writable RAM; if set to 'auto', the value of the @readonly +# property is used. This property is primarily helpful when we +# want to have proper RAM in configurations that would +# traditionally create ROM before this property was introduced: VM +# templating, where we want to open a file readonly (@readonly set +# to true) and mark the memory to be private for QEMU (@share set +# to false). For this use case, we need writable RAM instead of +# ROM, and want to set this property to 'off'. (default: auto, +# since 8.2) # # Since: 2.1 ## @@ -794,6 +795,37 @@ { 'struct': 'VfioUserServerProperties', 'data': { 'socket': 'SocketAddress', 'device': 'str' } } +## +# @IOMMUFDProperties: +# +# Properties for iommufd objects. +# +# @fd: file descriptor name previously passed via 'getfd' command, +# which represents a pre-opened /dev/iommu. This allows the +# iommufd object to be shared across several subsystems (VFIO, +# VDPA, ...), and the file descriptor to be shared with other +# process, e.g. DPDK. (default: QEMU opens /dev/iommu by itself) +# +# Since: 9.0 +## +{ 'struct': 'IOMMUFDProperties', + 'data': { '*fd': 'str' } } + +## +# @AcpiGenericInitiatorProperties: +# +# Properties for acpi-generic-initiator objects. +# +# @pci-dev: PCI device ID to be associated with the node +# +# @node: NUMA node associated with the PCI device +# +# Since: 9.0 +## +{ 'struct': 'AcpiGenericInitiatorProperties', + 'data': { 'pci-dev': 'str', + 'node': 'uint32' } } + ## # @RngProperties: # @@ -911,6 +943,7 @@ ## { 'enum': 'ObjectType', 'data': [ + 'acpi-generic-initiator', 'authz-list', 'authz-listfile', 'authz-pam', @@ -934,6 +967,7 @@ 'input-barrier', { 'name': 'input-linux', 'if': 'CONFIG_LINUX' }, + 'iommufd', 'iothread', 'main-loop', { 'name': 'memory-backend-epc', @@ -981,6 +1015,7 @@ 'id': 'str' }, 'discriminator': 'qom-type', 'data': { + 'acpi-generic-initiator': 'AcpiGenericInitiatorProperties', 'authz-list': 'AuthZListProperties', 'authz-listfile': 'AuthZListFileProperties', 'authz-pam': 'AuthZPAMProperties', @@ -1003,6 +1038,7 @@ 'input-barrier': 'InputBarrierProperties', 'input-linux': { 'type': 'InputLinuxProperties', 'if': 'CONFIG_LINUX' }, + 'iommufd': 'IOMMUFDProperties', 'iothread': 'IothreadProperties', 'main-loop': 'MainLoopProperties', 'memory-backend-epc': { 'type': 'MemoryBackendEpcProperties', @@ -1037,17 +1073,17 @@ # # Create a QOM object. # -# Returns: Nothing on success Error if @qom-type is not a valid class -# name +# Errors: +# - Error if @qom-type is not a valid class name # # Since: 2.0 # # Example: # -# -> { "execute": "object-add", -# "arguments": { "qom-type": "rng-random", "id": "rng1", -# "filename": "/dev/hwrng" } } -# <- { "return": {} } +# -> { "execute": "object-add", +# "arguments": { "qom-type": "rng-random", "id": "rng1", +# "filename": "/dev/hwrng" } } +# <- { "return": {} } ## { 'command': 'object-add', 'data': 'ObjectOptions', 'boxed': true, 'allow-preconfig': true } @@ -1059,15 +1095,15 @@ # # @id: the name of the QOM object to remove # -# Returns: Nothing on success Error if @id is not a valid id for a QOM -# object +# Errors: +# - Error if @id is not a valid id for a QOM object # # Since: 2.0 # # Example: # -# -> { "execute": "object-del", "arguments": { "id": "rng1" } } -# <- { "return": {} } +# -> { "execute": "object-del", "arguments": { "id": "rng1" } } +# <- { "return": {} } ## { 'command': 'object-del', 'data': {'id': 'str'}, 'allow-preconfig': true } diff --git a/qapi/rdma.json b/qapi/rdma.json index 23ebcf7885e..195c001850b 100644 --- a/qapi/rdma.json +++ b/qapi/rdma.json @@ -23,13 +23,13 @@ # # Example: # -# <- {"timestamp": {"seconds": 1541579657, "microseconds": 986760}, -# "event": "RDMA_GID_STATUS_CHANGED", -# "data": -# {"netdev": "bridge0", -# "interface-id": 15880512517475447892, -# "gid-status": true, -# "subnet-prefix": 33022}} +# <- {"timestamp": {"seconds": 1541579657, "microseconds": 986760}, +# "event": "RDMA_GID_STATUS_CHANGED", +# "data": +# {"netdev": "bridge0", +# "interface-id": 15880512517475447892, +# "gid-status": true, +# "subnet-prefix": 33022}} ## { 'event': 'RDMA_GID_STATUS_CHANGED', 'data': { 'netdev' : 'str', diff --git a/qapi/replay.json b/qapi/replay.json index 289b2d36580..d3559f9c8f7 100644 --- a/qapi/replay.json +++ b/qapi/replay.json @@ -56,8 +56,8 @@ # # Example: # -# -> { "execute": "query-replay" } -# <- { "return": { "mode": "play", "filename": "log.rr", "icount": 220414 } } +# -> { "execute": "query-replay" } +# <- { "return": { "mode": "play", "filename": "log.rr", "icount": 220414 } } ## { 'command': 'query-replay', 'returns': 'ReplayInfo' } @@ -78,8 +78,8 @@ # # Example: # -# -> { "execute": "replay-break", "arguments": { "icount": 220414 } } -# <- { "return": {} } +# -> { "execute": "replay-break", "arguments": { "icount": 220414 } } +# <- { "return": {} } ## { 'command': 'replay-break', 'data': { 'icount': 'int' } } @@ -93,8 +93,8 @@ # # Example: # -# -> { "execute": "replay-delete-break" } -# <- { "return": {} } +# -> { "execute": "replay-delete-break" } +# <- { "return": {} } ## { 'command': 'replay-delete-break' } @@ -105,8 +105,8 @@ # replaying the execution. The command automatically loads nearest # snapshot and replays the execution to find the desired instruction. # When there is no preceding snapshot or the execution is not -# replayed, then the command fails. icount for the reference may be -# obtained with @query-replay command. +# replayed, then the command fails. Instruction count can be obtained +# with the @query-replay command. # # @icount: target instruction count # @@ -114,7 +114,7 @@ # # Example: # -# -> { "execute": "replay-seek", "arguments": { "icount": 220414 } } -# <- { "return": {} } +# -> { "execute": "replay-seek", "arguments": { "icount": 220414 } } +# <- { "return": {} } ## { 'command': 'replay-seek', 'data': { 'icount': 'int' } } diff --git a/qapi/rocker.json b/qapi/rocker.json index 31ce0b36f69..5635cf174fd 100644 --- a/qapi/rocker.json +++ b/qapi/rocker.json @@ -32,8 +32,8 @@ # # Example: # -# -> { "execute": "query-rocker", "arguments": { "name": "sw1" } } -# <- { "return": {"name": "sw1", "ports": 2, "id": 1327446905938}} +# -> { "execute": "query-rocker", "arguments": { "name": "sw1" } } +# <- { "return": {"name": "sw1", "ports": 2, "id": 1327446905938}} ## { 'command': 'query-rocker', 'data': { 'name': 'str' }, @@ -100,12 +100,12 @@ # # Example: # -# -> { "execute": "query-rocker-ports", "arguments": { "name": "sw1" } } -# <- { "return": [ {"duplex": "full", "enabled": true, "name": "sw1.1", -# "autoneg": "off", "link-up": true, "speed": 10000}, -# {"duplex": "full", "enabled": true, "name": "sw1.2", -# "autoneg": "off", "link-up": true, "speed": 10000} -# ]} +# -> { "execute": "query-rocker-ports", "arguments": { "name": "sw1" } } +# <- { "return": [ {"duplex": "full", "enabled": true, "name": "sw1.1", +# "autoneg": "off", "link-up": true, "speed": 10000}, +# {"duplex": "full", "enabled": true, "name": "sw1.2", +# "autoneg": "off", "link-up": true, "speed": 10000} +# ]} ## { 'command': 'query-rocker-ports', 'data': { 'name': 'str' }, @@ -242,16 +242,16 @@ # # Example: # -# -> { "execute": "query-rocker-of-dpa-flows", -# "arguments": { "name": "sw1" } } -# <- { "return": [ {"key": {"in-pport": 0, "priority": 1, "tbl-id": 0}, -# "hits": 138, -# "cookie": 0, -# "action": {"goto-tbl": 10}, -# "mask": {"in-pport": 4294901760} -# }, -# {...more...}, -# ]} +# -> { "execute": "query-rocker-of-dpa-flows", +# "arguments": { "name": "sw1" } } +# <- { "return": [ {"key": {"in-pport": 0, "priority": 1, "tbl-id": 0}, +# "hits": 138, +# "cookie": 0, +# "action": {"goto-tbl": 10}, +# "mask": {"in-pport": 4294901760} +# }, +# {...more...}, +# ]} ## { 'command': 'query-rocker-of-dpa-flows', 'data': { 'name': 'str', '*tbl-id': 'uint32' }, @@ -317,21 +317,21 @@ # # Example: # -# -> { "execute": "query-rocker-of-dpa-groups", -# "arguments": { "name": "sw1" } } -# <- { "return": [ {"type": 0, "out-pport": 2, -# "pport": 2, "vlan-id": 3841, -# "pop-vlan": 1, "id": 251723778}, -# {"type": 0, "out-pport": 0, -# "pport": 0, "vlan-id": 3841, -# "pop-vlan": 1, "id": 251723776}, -# {"type": 0, "out-pport": 1, -# "pport": 1, "vlan-id": 3840, -# "pop-vlan": 1, "id": 251658241}, -# {"type": 0, "out-pport": 0, -# "pport": 0, "vlan-id": 3840, -# "pop-vlan": 1, "id": 251658240} -# ]} +# -> { "execute": "query-rocker-of-dpa-groups", +# "arguments": { "name": "sw1" } } +# <- { "return": [ {"type": 0, "out-pport": 2, +# "pport": 2, "vlan-id": 3841, +# "pop-vlan": 1, "id": 251723778}, +# {"type": 0, "out-pport": 0, +# "pport": 0, "vlan-id": 3841, +# "pop-vlan": 1, "id": 251723776}, +# {"type": 0, "out-pport": 1, +# "pport": 1, "vlan-id": 3840, +# "pop-vlan": 1, "id": 251658241}, +# {"type": 0, "out-pport": 0, +# "pport": 0, "vlan-id": 3840, +# "pop-vlan": 1, "id": 251658240} +# ]} ## { 'command': 'query-rocker-of-dpa-groups', 'data': { 'name': 'str', '*type': 'uint8' }, diff --git a/qapi/run-state.json b/qapi/run-state.json index f216ba54ec4..f8773f23b29 100644 --- a/qapi/run-state.json +++ b/qapi/run-state.json @@ -91,7 +91,7 @@ # # @snapshot-load: A snapshot is being loaded by the record & replay # subsystem. This value is used only within QEMU. It doesn't -# occur in QMP. (since 7.2) +# occur in QMP. (since 7.2) ## { 'enum': 'ShutdownCause', # Beware, shutdown_caused_by_guest() depends on enumeration order @@ -102,46 +102,32 @@ ## # @StatusInfo: # -# Information about VCPU run state +# Information about VM run state # # @running: true if all VCPUs are runnable, false if not runnable # -# @singlestep: true if using TCG with one guest instruction per -# translation block -# # @status: the virtual machine @RunState # -# Features: -# -# @deprecated: Member 'singlestep' is deprecated (with no -# replacement). -# # Since: 0.14 -# -# Notes: @singlestep is enabled on the command line with '-accel -# tcg,one-insn-per-tb=on', or with the HMP 'one-insn-per-tb' -# command. ## { 'struct': 'StatusInfo', 'data': {'running': 'bool', - 'singlestep': { 'type': 'bool', 'features': [ 'deprecated' ]}, 'status': 'RunState'} } ## # @query-status: # -# Query the run status of all VCPUs +# Query the run status of the VM # -# Returns: @StatusInfo reflecting all VCPUs +# Returns: @StatusInfo reflecting the VM # # Since: 0.14 # # Example: # -# -> { "execute": "query-status" } -# <- { "return": { "running": true, -# "singlestep": false, -# "status": "running" } } +# -> { "execute": "query-status" } +# <- { "return": { "running": true, +# "status": "running" } } ## { 'command': 'query-status', 'returns': 'StatusInfo', 'allow-preconfig': true } @@ -155,10 +141,10 @@ # @guest: If true, the shutdown was triggered by a guest request (such # as a guest-initiated ACPI shutdown request or other # hardware-specific action) rather than a host request (such as -# sending qemu a SIGINT). (since 2.10) +# sending qemu a SIGINT). (since 2.10) # -# @reason: The @ShutdownCause which resulted in the SHUTDOWN. (since -# 4.0) +# @reason: The @ShutdownCause which resulted in the SHUTDOWN. +# (since 4.0) # # Note: If the command-line option "-no-shutdown" has been specified, # qemu will not exit, and a STOP event will eventually follow the @@ -168,9 +154,9 @@ # # Example: # -# <- { "event": "SHUTDOWN", -# "data": { "guest": true, "reason": "guest-shutdown" }, -# "timestamp": { "seconds": 1267040730, "microseconds": 682951 } } +# <- { "event": "SHUTDOWN", +# "data": { "guest": true, "reason": "guest-shutdown" }, +# "timestamp": { "seconds": 1267040730, "microseconds": 682951 } } ## { 'event': 'SHUTDOWN', 'data': { 'guest': 'bool', 'reason': 'ShutdownCause' } } @@ -184,8 +170,8 @@ # # Example: # -# <- { "event": "POWERDOWN", -# "timestamp": { "seconds": 1267040730, "microseconds": 682951 } } +# <- { "event": "POWERDOWN", +# "timestamp": { "seconds": 1267040730, "microseconds": 682951 } } ## { 'event': 'POWERDOWN' } @@ -197,17 +183,17 @@ # @guest: If true, the reset was triggered by a guest request (such as # a guest-initiated ACPI reboot request or other hardware-specific # action) rather than a host request (such as the QMP command -# system_reset). (since 2.10) +# system_reset). (since 2.10) # -# @reason: The @ShutdownCause of the RESET. (since 4.0) +# @reason: The @ShutdownCause of the RESET. (since 4.0) # # Since: 0.12 # # Example: # -# <- { "event": "RESET", -# "data": { "guest": false, "reason": "guest-reset" }, -# "timestamp": { "seconds": 1267041653, "microseconds": 9518 } } +# <- { "event": "RESET", +# "data": { "guest": false, "reason": "guest-reset" }, +# "timestamp": { "seconds": 1267041653, "microseconds": 9518 } } ## { 'event': 'RESET', 'data': { 'guest': 'bool', 'reason': 'ShutdownCause' } } @@ -220,8 +206,8 @@ # # Example: # -# <- { "event": "STOP", -# "timestamp": { "seconds": 1267041730, "microseconds": 281295 } } +# <- { "event": "STOP", +# "timestamp": { "seconds": 1267041730, "microseconds": 281295 } } ## { 'event': 'STOP' } @@ -234,8 +220,8 @@ # # Example: # -# <- { "event": "RESUME", -# "timestamp": { "seconds": 1271770767, "microseconds": 582542 } } +# <- { "event": "RESUME", +# "timestamp": { "seconds": 1271770767, "microseconds": 582542 } } ## { 'event': 'RESUME' } @@ -249,8 +235,8 @@ # # Example: # -# <- { "event": "SUSPEND", -# "timestamp": { "seconds": 1344456160, "microseconds": 309119 } } +# <- { "event": "SUSPEND", +# "timestamp": { "seconds": 1344456160, "microseconds": 309119 } } ## { 'event': 'SUSPEND' } @@ -268,8 +254,8 @@ # # Example: # -# <- { "event": "SUSPEND_DISK", -# "timestamp": { "seconds": 1344456160, "microseconds": 309119 } } +# <- { "event": "SUSPEND_DISK", +# "timestamp": { "seconds": 1344456160, "microseconds": 309119 } } ## { 'event': 'SUSPEND_DISK' } @@ -283,8 +269,8 @@ # # Example: # -# <- { "event": "WAKEUP", -# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } } +# <- { "event": "WAKEUP", +# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } } ## { 'event': 'WAKEUP' } @@ -305,9 +291,9 @@ # # Example: # -# <- { "event": "WATCHDOG", -# "data": { "action": "reset" }, -# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } +# <- { "event": "WATCHDOG", +# "data": { "action": "reset" }, +# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } ## { 'event': 'WATCHDOG', 'data': { 'action': 'WatchdogAction' } } @@ -390,9 +376,17 @@ ## # @watchdog-set-action: # -# Set watchdog action +# Set watchdog action. +# +# @action: @WatchdogAction action taken when watchdog timer expires. # # Since: 2.11 +# +# Example: +# +# -> { "execute": "watchdog-set-action", +# "arguments": { "action": "inject-nmi" } } +# <- { "return": {} } ## { 'command': 'watchdog-set-action', 'data' : {'action': 'WatchdogAction'} } @@ -408,21 +402,18 @@ # # @panic: @PanicAction action taken on guest panic. # -# @watchdog: @WatchdogAction action taken when watchdog timer expires -# . -# -# Returns: Nothing on success. +# @watchdog: @WatchdogAction action taken when watchdog timer expires. # # Since: 6.0 # # Example: # -# -> { "execute": "set-action", -# "arguments": { "reboot": "shutdown", -# "shutdown" : "pause", -# "panic": "pause", -# "watchdog": "inject-nmi" } } -# <- { "return": {} } +# -> { "execute": "set-action", +# "arguments": { "reboot": "shutdown", +# "shutdown" : "pause", +# "panic": "pause", +# "watchdog": "inject-nmi" } } +# <- { "return": {} } ## { 'command': 'set-action', 'data': { '*reboot': 'RebootAction', @@ -444,9 +435,9 @@ # # Example: # -# <- { "event": "GUEST_PANICKED", -# "data": { "action": "pause" }, -# "timestamp": { "seconds": 1648245231, "microseconds": 900001 } } +# <- { "event": "GUEST_PANICKED", +# "data": { "action": "pause" }, +# "timestamp": { "seconds": 1648245231, "microseconds": 900001 } } ## { 'event': 'GUEST_PANICKED', 'data': { 'action': 'GuestPanicAction', '*info': 'GuestPanicInformation' } } @@ -464,9 +455,9 @@ # # Example: # -# <- { "event": "GUEST_CRASHLOADED", -# "data": { "action": "run" }, -# "timestamp": { "seconds": 1648245259, "microseconds": 893771 } } +# <- { "event": "GUEST_CRASHLOADED", +# "data": { "action": "run" }, +# "timestamp": { "seconds": 1648245259, "microseconds": 893771 } } ## { 'event': 'GUEST_CRASHLOADED', 'data': { 'action': 'GuestPanicAction', '*info': 'GuestPanicInformation' } } @@ -521,6 +512,22 @@ # # Hyper-V specific guest panic information (HV crash MSRs) # +# @arg1: for Windows, STOP code for the guest crash. For Linux, +# an error code. +# +# @arg2: for Windows, first argument of the STOP. For Linux, the +# guest OS ID, which has the kernel version in bits 16-47 +# and 0x8100 in bits 48-63. +# +# @arg3: for Windows, second argument of the STOP. For Linux, the +# program counter of the guest. +# +# @arg4: for Windows, third argument of the STOP. For Linux, the +# RAX register (x86) or the stack pointer (aarch64) of the guest. +# +# @arg5: for Windows, fourth argument of the STOP. For x86 Linux, the +# stack pointer of the guest. +# # Since: 2.9 ## {'struct': 'GuestPanicInformationHyperV', @@ -584,22 +591,20 @@ # # @recipient: recipient is defined as @MemoryFailureRecipient. # -# @action: action that has been taken. action is defined as -# @MemoryFailureAction. +# @action: action that has been taken. # -# @flags: flags for MemoryFailureAction. action is defined as -# @MemoryFailureFlags. +# @flags: flags for MemoryFailureAction. # # Since: 5.2 # # Example: # -# <- { "event": "MEMORY_FAILURE", -# "data": { "recipient": "hypervisor", -# "action": "fatal", -# "flags": { "action-required": false, -# "recursive": false } }, -# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } +# <- { "event": "MEMORY_FAILURE", +# "data": { "recipient": "hypervisor", +# "action": "fatal", +# "flags": { "action-required": false, +# "recursive": false } }, +# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } ## { 'event': 'MEMORY_FAILURE', 'data': { 'recipient': 'MemoryFailureRecipient', diff --git a/qapi/sockets.json b/qapi/sockets.json index 62131545255..aa97c897687 100644 --- a/qapi/sockets.json +++ b/qapi/sockets.json @@ -5,8 +5,6 @@ # = Socket data types ## -{ 'include': 'common.json' } - ## # @NetworkAddressFamily: # @@ -60,7 +58,7 @@ # @keep-alive: enable keep-alive when connecting to this socket. Not # supported for passive sockets. (Since 4.2) # -# @mptcp: enable multi-path TCP. (Since 6.1) +# @mptcp: enable multi-path TCP. (Since 6.1) # # Since: 1.3 ## @@ -116,9 +114,28 @@ 'cid': 'str', 'port': 'str' } } +## +# @FdSocketAddress: +# +# A file descriptor name or number. +# +# @str: decimal is for file descriptor number, otherwise it's a file +# descriptor name. Named file descriptors are permitted in +# monitor commands, in combination with the 'getfd' command. +# Decimal file descriptors are permitted at startup or other +# contexts where no monitor context is active. +# +# Since: 1.2 +## +{ 'struct': 'FdSocketAddress', + 'data': { + 'str': 'str' } } + ## # @InetSocketAddressWrapper: # +# @data: internet domain socket address +# # Since: 1.3 ## { 'struct': 'InetSocketAddressWrapper', @@ -127,6 +144,8 @@ ## # @UnixSocketAddressWrapper: # +# @data: UNIX domain socket address +# # Since: 1.3 ## { 'struct': 'UnixSocketAddressWrapper', @@ -135,18 +154,22 @@ ## # @VsockSocketAddressWrapper: # +# @data: VSOCK domain socket address +# # Since: 2.8 ## { 'struct': 'VsockSocketAddressWrapper', 'data': { 'data': 'VsockSocketAddress' } } ## -# @StringWrapper: +# @FdSocketAddressWrapper: +# +# @data: file descriptor name or number # # Since: 1.3 ## -{ 'struct': 'StringWrapper', - 'data': { 'data': 'String' } } +{ 'struct': 'FdSocketAddressWrapper', + 'data': { 'data': 'FdSocketAddress' } } ## # @SocketAddressLegacy: @@ -154,6 +177,8 @@ # Captures the address of a socket, which could also be a named file # descriptor # +# @type: Transport type +# # Note: This type is deprecated in favor of SocketAddress. The # difference between SocketAddressLegacy and SocketAddress is that # the latter has fewer {} on the wire. @@ -167,7 +192,7 @@ 'inet': 'InetSocketAddressWrapper', 'unix': 'UnixSocketAddressWrapper', 'vsock': 'VsockSocketAddressWrapper', - 'fd': 'StringWrapper' } } + 'fd': 'FdSocketAddressWrapper' } } ## # @SocketAddressType: @@ -180,11 +205,7 @@ # # @vsock: VMCI address # -# @fd: decimal is for file descriptor number, otherwise a file -# descriptor name. Named file descriptors are permitted in -# monitor commands, in combination with the 'getfd' command. -# Decimal file descriptors are permitted at startup or other -# contexts where no monitor context is active. +# @fd: Socket file descriptor # # Since: 2.9 ## @@ -194,7 +215,7 @@ ## # @SocketAddress: # -# Captures the address of a socket, which could also be a named file +# Captures the address of a socket, which could also be a socket file # descriptor # # @type: Transport type @@ -207,4 +228,4 @@ 'data': { 'inet': 'InetSocketAddress', 'unix': 'UnixSocketAddress', 'vsock': 'VsockSocketAddress', - 'fd': 'String' } } + 'fd': 'FdSocketAddress' } } diff --git a/qapi/stats.json b/qapi/stats.json index 01791e86d5f..578b52c7ef7 100644 --- a/qapi/stats.json +++ b/qapi/stats.json @@ -114,11 +114,13 @@ # # The arguments to the query-stats command; specifies a target for # which to request statistics and optionally the required subset of -# information for that target: +# information for that target. # -# - which vCPUs to request statistics for -# - which providers to request statistics from -# - which named values to return within each provider +# @target: the kind of objects to query. Note that each possible +# target may enable additional filtering options +# +# @providers: which providers to request statistics from, and optionally +# which named values to return within each provider # # Since: 7.1 ## @@ -134,6 +136,8 @@ # # @scalar: single unsigned 64-bit integers. # +# @boolean: single boolean value. +# # @list: list of unsigned 64-bit integers (used for histograms). # # Since: 7.1 @@ -252,6 +256,8 @@ # # Return the schema for all available runtime-collected statistics. # +# @provider: a provider to restrict the query to. +# # Note: runtime-collected statistics and their names fall outside # QEMU's usual deprecation policies. QEMU will try to keep the # set of available data stable, together with their names, but diff --git a/qapi/string-output-visitor.c b/qapi/string-output-visitor.c index c0cb72dbe4d..5115536b153 100644 --- a/qapi/string-output-visitor.c +++ b/qapi/string-output-visitor.c @@ -65,6 +65,7 @@ struct StringOutputVisitor } range_start, range_end; GList *ranges; void *list; /* Only needed for sanity checking the caller */ + unsigned int struct_nesting; }; static StringOutputVisitor *to_sov(Visitor *v) @@ -144,6 +145,10 @@ static bool print_type_int64(Visitor *v, const char *name, int64_t *obj, StringOutputVisitor *sov = to_sov(v); GList *l; + if (sov->struct_nesting) { + return true; + } + switch (sov->list_mode) { case LM_NONE: string_output_append(sov, *obj); @@ -231,6 +236,10 @@ static bool print_type_size(Visitor *v, const char *name, uint64_t *obj, uint64_t val; char *out, *psize; + if (sov->struct_nesting) { + return true; + } + if (!sov->human) { out = g_strdup_printf("%"PRIu64, *obj); string_output_set(sov, out); @@ -250,6 +259,11 @@ static bool print_type_bool(Visitor *v, const char *name, bool *obj, Error **errp) { StringOutputVisitor *sov = to_sov(v); + + if (sov->struct_nesting) { + return true; + } + string_output_set(sov, g_strdup(*obj ? "true" : "false")); return true; } @@ -260,6 +274,10 @@ static bool print_type_str(Visitor *v, const char *name, char **obj, StringOutputVisitor *sov = to_sov(v); char *out; + if (sov->struct_nesting) { + return true; + } + if (sov->human) { out = *obj ? g_strdup_printf("\"%s\"", *obj) : g_strdup(""); } else { @@ -273,6 +291,11 @@ static bool print_type_number(Visitor *v, const char *name, double *obj, Error **errp) { StringOutputVisitor *sov = to_sov(v); + + if (sov->struct_nesting) { + return true; + } + string_output_set(sov, g_strdup_printf("%.17g", *obj)); return true; } @@ -283,6 +306,10 @@ static bool print_type_null(Visitor *v, const char *name, QNull **obj, StringOutputVisitor *sov = to_sov(v); char *out; + if (sov->struct_nesting) { + return true; + } + if (sov->human) { out = g_strdup(""); } else { @@ -292,12 +319,37 @@ static bool print_type_null(Visitor *v, const char *name, QNull **obj, return true; } +static bool start_struct(Visitor *v, const char *name, void **obj, + size_t size, Error **errp) +{ + StringOutputVisitor *sov = to_sov(v); + + sov->struct_nesting++; + return true; +} + +static void end_struct(Visitor *v, void **obj) +{ + StringOutputVisitor *sov = to_sov(v); + + if (--sov->struct_nesting) { + return; + } + + /* TODO actually print struct fields */ + string_output_set(sov, g_strdup("")); +} + static bool start_list(Visitor *v, const char *name, GenericList **list, size_t size, Error **errp) { StringOutputVisitor *sov = to_sov(v); + if (sov->struct_nesting) { + return true; + } + /* we can't traverse a list in a list */ assert(sov->list_mode == LM_NONE); /* We don't support visits without a list */ @@ -315,6 +367,10 @@ static GenericList *next_list(Visitor *v, GenericList *tail, size_t size) StringOutputVisitor *sov = to_sov(v); GenericList *ret = tail->next; + if (sov->struct_nesting) { + return ret; + } + if (ret && !ret->next) { sov->list_mode = LM_END; } @@ -325,6 +381,10 @@ static void end_list(Visitor *v, void **obj) { StringOutputVisitor *sov = to_sov(v); + if (sov->struct_nesting) { + return; + } + assert(sov->list == obj); assert(sov->list_mode == LM_STARTED || sov->list_mode == LM_END || @@ -379,6 +439,8 @@ Visitor *string_output_visitor_new(bool human, char **result) v->visitor.type_str = print_type_str; v->visitor.type_number = print_type_number; v->visitor.type_null = print_type_null; + v->visitor.start_struct = start_struct; + v->visitor.end_struct = end_struct; v->visitor.start_list = start_list; v->visitor.next_list = next_list; v->visitor.end_list = end_list; diff --git a/qapi/tpm.json b/qapi/tpm.json index a754455ca55..1577b5c259d 100644 --- a/qapi/tpm.json +++ b/qapi/tpm.json @@ -33,8 +33,8 @@ # # Example: # -# -> { "execute": "query-tpm-models" } -# <- { "return": [ "tpm-tis", "tpm-crb", "tpm-spapr" ] } +# -> { "execute": "query-tpm-models" } +# <- { "return": [ "tpm-tis", "tpm-crb", "tpm-spapr" ] } ## { 'command': 'query-tpm-models', 'returns': ['TpmModel'], 'if': 'CONFIG_TPM' } @@ -64,8 +64,8 @@ # # Example: # -# -> { "execute": "query-tpm-types" } -# <- { "return": [ "passthrough", "emulator" ] } +# -> { "execute": "query-tpm-types" } +# <- { "return": [ "passthrough", "emulator" ] } ## { 'command': 'query-tpm-types', 'returns': ['TpmType'], 'if': 'CONFIG_TPM' } @@ -102,6 +102,8 @@ ## # @TPMPassthroughOptionsWrapper: # +# @data: Information about the TPM passthrough type +# # Since: 1.5 ## { 'struct': 'TPMPassthroughOptionsWrapper', @@ -111,6 +113,8 @@ ## # @TPMEmulatorOptionsWrapper: # +# @data: Information about the TPM emulator type +# # Since: 2.11 ## { 'struct': 'TPMEmulatorOptionsWrapper', @@ -162,27 +166,25 @@ # # Return information about the TPM device # -# Returns: @TPMInfo on success -# # Since: 1.5 # # Example: # -# -> { "execute": "query-tpm" } -# <- { "return": -# [ -# { "model": "tpm-tis", -# "options": -# { "type": "passthrough", -# "data": -# { "cancel-path": "/sys/class/misc/tpm0/device/cancel", -# "path": "/dev/tpm0" -# } -# }, -# "id": "tpm0" +# -> { "execute": "query-tpm" } +# <- { "return": +# [ +# { "model": "tpm-tis", +# "options": +# { "type": "passthrough", +# "data": +# { "cancel-path": "/sys/class/misc/tpm0/device/cancel", +# "path": "/dev/tpm0" +# } +# }, +# "id": "tpm0" +# } +# ] # } -# ] -# } ## { 'command': 'query-tpm', 'returns': ['TPMInfo'], 'if': 'CONFIG_TPM' } diff --git a/qapi/trace.json b/qapi/trace.json index 2077d7e117b..043d12f83e0 100644 --- a/qapi/trace.json +++ b/qapi/trace.json @@ -66,9 +66,9 @@ # # Example: # -# -> { "execute": "trace-event-get-state", -# "arguments": { "name": "qemu_memalign" } } -# <- { "return": [ { "name": "qemu_memalign", "state": "disabled", "vcpu": false } ] } +# -> { "execute": "trace-event-get-state", +# "arguments": { "name": "qemu_memalign" } } +# <- { "return": [ { "name": "qemu_memalign", "state": "disabled", "vcpu": false } ] } ## { 'command': 'trace-event-get-state', 'data': {'name': 'str', @@ -96,9 +96,9 @@ # # Example: # -# -> { "execute": "trace-event-set-state", -# "arguments": { "name": "qemu_memalign", "enable": true } } -# <- { "return": {} } +# -> { "execute": "trace-event-set-state", +# "arguments": { "name": "qemu_memalign", "enable": true } } +# <- { "return": {} } ## { 'command': 'trace-event-set-state', 'data': {'name': 'str', 'enable': 'bool', '*ignore-unavailable': 'bool', diff --git a/qapi/transaction.json b/qapi/transaction.json index cffee2de28d..5749c133d4a 100644 --- a/qapi/transaction.json +++ b/qapi/transaction.json @@ -158,6 +158,8 @@ # A discriminated record of operations that can be performed with # @transaction. # +# @type: the operation to be performed +# # Since: 1.1 ## { 'union': 'TransactionAction', @@ -232,9 +234,8 @@ # execution of the transaction. See @TransactionProperties for # additional detail. # -# Returns: nothing on success -# -# Errors depend on the operations of the transaction +# Errors: +# Any errors from commands in the transaction # # Note: The transaction aborts on the first failure. Therefore, there # will be information on only one failed operation returned in an @@ -245,24 +246,24 @@ # # Example: # -# -> { "execute": "transaction", -# "arguments": { "actions": [ -# { "type": "blockdev-snapshot-sync", "data" : { "device": "ide-hd0", -# "snapshot-file": "/some/place/my-image", -# "format": "qcow2" } }, -# { "type": "blockdev-snapshot-sync", "data" : { "node-name": "myfile", -# "snapshot-file": "/some/place/my-image2", -# "snapshot-node-name": "node3432", -# "mode": "existing", -# "format": "qcow2" } }, -# { "type": "blockdev-snapshot-sync", "data" : { "device": "ide-hd1", -# "snapshot-file": "/some/place/my-image2", -# "mode": "existing", -# "format": "qcow2" } }, -# { "type": "blockdev-snapshot-internal-sync", "data" : { -# "device": "ide-hd2", -# "name": "snapshot0" } } ] } } -# <- { "return": {} } +# -> { "execute": "transaction", +# "arguments": { "actions": [ +# { "type": "blockdev-snapshot-sync", "data" : { "device": "ide-hd0", +# "snapshot-file": "/some/place/my-image", +# "format": "qcow2" } }, +# { "type": "blockdev-snapshot-sync", "data" : { "node-name": "myfile", +# "snapshot-file": "/some/place/my-image2", +# "snapshot-node-name": "node3432", +# "mode": "existing", +# "format": "qcow2" } }, +# { "type": "blockdev-snapshot-sync", "data" : { "device": "ide-hd1", +# "snapshot-file": "/some/place/my-image2", +# "mode": "existing", +# "format": "qcow2" } }, +# { "type": "blockdev-snapshot-internal-sync", "data" : { +# "device": "ide-hd2", +# "name": "snapshot0" } } ] } } +# <- { "return": {} } ## { 'command': 'transaction', 'data': { 'actions': [ 'TransactionAction' ], diff --git a/qapi/ui.json b/qapi/ui.json index a0158baf231..f610bce118a 100644 --- a/qapi/ui.json +++ b/qapi/ui.json @@ -63,7 +63,7 @@ ## # @SetPasswordOptionsVnc: # -# Options for set_password specific to the VNC procotol. +# Options for set_password specific to the VNC protocol. # # @display: The id of the display where the password should be # changed. Defaults to the first. @@ -78,17 +78,16 @@ # # Set the password of a remote display server. # -# Returns: -# - Nothing on success +# Errors: # - If Spice is not enabled, DeviceNotFound # # Since: 0.14 # # Example: # -# -> { "execute": "set_password", "arguments": { "protocol": "vnc", -# "password": "secret" } } -# <- { "return": {} } +# -> { "execute": "set_password", "arguments": { "protocol": "vnc", +# "password": "secret" } } +# <- { "return": {} } ## { 'command': 'set_password', 'boxed': true, 'data': 'SetPasswordOptions' } @@ -125,7 +124,7 @@ ## # @ExpirePasswordOptionsVnc: # -# Options for expire_password specific to the VNC procotol. +# Options for expire_password specific to the VNC protocol. # # @display: The id of the display where the expiration should be # changed. Defaults to the first. @@ -140,8 +139,7 @@ # # Expire the password of a remote display server. # -# Returns: -# - Nothing on success +# Errors: # - If @protocol is 'spice' and Spice is not active, # DeviceNotFound # @@ -149,9 +147,9 @@ # # Example: # -# -> { "execute": "expire_password", "arguments": { "protocol": "vnc", -# "time": "+60" } } -# <- { "return": {} } +# -> { "execute": "expire_password", "arguments": { "protocol": "vnc", +# "time": "+60" } } +# <- { "return": {} } ## { 'command': 'expire_password', 'boxed': true, 'data': 'ExpirePasswordOptions' } @@ -183,19 +181,17 @@ # @head: head to use in case the device supports multiple heads. If # this parameter is missing, head #0 will be used. Also note that # the head can only be specified in conjunction with the device -# ID. (Since 2.12) +# ID. (Since 2.12) # # @format: image format for screendump. (default: ppm) (Since 7.1) # -# Returns: Nothing on success -# # Since: 0.14 # # Example: # -# -> { "execute": "screendump", -# "arguments": { "filename": "/tmp/image" } } -# <- { "return": {} } +# -> { "execute": "screendump", +# "arguments": { "filename": "/tmp/image" } } +# <- { "return": {} } ## { 'command': 'screendump', 'data': {'filename': 'str', '*device': 'str', '*head': 'int', @@ -294,7 +290,7 @@ # @enabled: true if the SPICE server is enabled, false otherwise # # @migrated: true if the last guest migration completed and spice -# migration had completed as well. false otherwise. (since 1.4) +# migration had completed as well, false otherwise (since 1.4) # # @host: The hostname the SPICE server is bound to. This depends on # the name resolution on the host and may be an IP address. @@ -307,7 +303,7 @@ # # @auth: the current authentication type used by the server # -# - 'none' if no authentication is being used +# - 'none' if no authentication is being used # - 'spice' uses SASL or direct TLS authentication, depending on # command line options # @@ -337,38 +333,38 @@ # # Example: # -# -> { "execute": "query-spice" } -# <- { "return": { -# "enabled": true, -# "auth": "spice", -# "port": 5920, -# "migrated":false, -# "tls-port": 5921, -# "host": "0.0.0.0", -# "mouse-mode":"client", -# "channels": [ -# { -# "port": "54924", -# "family": "ipv4", -# "channel-type": 1, -# "connection-id": 1804289383, -# "host": "127.0.0.1", -# "channel-id": 0, -# "tls": true -# }, -# { -# "port": "36710", -# "family": "ipv4", -# "channel-type": 4, -# "connection-id": 1804289383, -# "host": "127.0.0.1", -# "channel-id": 0, -# "tls": false -# }, -# [ ... more channels follow ... ] -# ] -# } -# } +# -> { "execute": "query-spice" } +# <- { "return": { +# "enabled": true, +# "auth": "spice", +# "port": 5920, +# "migrated":false, +# "tls-port": 5921, +# "host": "0.0.0.0", +# "mouse-mode":"client", +# "channels": [ +# { +# "port": "54924", +# "family": "ipv4", +# "channel-type": 1, +# "connection-id": 1804289383, +# "host": "127.0.0.1", +# "channel-id": 0, +# "tls": true +# }, +# { +# "port": "36710", +# "family": "ipv4", +# "channel-type": 4, +# "connection-id": 1804289383, +# "host": "127.0.0.1", +# "channel-id": 0, +# "tls": false +# }, +# [ ... more channels follow ... ] +# ] +# } +# } ## { 'command': 'query-spice', 'returns': 'SpiceInfo', 'if': 'CONFIG_SPICE' } @@ -386,12 +382,12 @@ # # Example: # -# <- { "timestamp": {"seconds": 1290688046, "microseconds": 388707}, -# "event": "SPICE_CONNECTED", -# "data": { -# "server": { "port": "5920", "family": "ipv4", "host": "127.0.0.1"}, -# "client": {"port": "52873", "family": "ipv4", "host": "127.0.0.1"} -# }} +# <- { "timestamp": {"seconds": 1290688046, "microseconds": 388707}, +# "event": "SPICE_CONNECTED", +# "data": { +# "server": { "port": "5920", "family": "ipv4", "host": "127.0.0.1"}, +# "client": {"port": "52873", "family": "ipv4", "host": "127.0.0.1"} +# }} ## { 'event': 'SPICE_CONNECTED', 'data': { 'server': 'SpiceBasicInfo', @@ -412,14 +408,14 @@ # # Example: # -# <- { "timestamp": {"seconds": 1290688046, "microseconds": 417172}, -# "event": "SPICE_INITIALIZED", -# "data": {"server": {"auth": "spice", "port": "5921", -# "family": "ipv4", "host": "127.0.0.1"}, -# "client": {"port": "49004", "family": "ipv4", "channel-type": 3, -# "connection-id": 1804289383, "host": "127.0.0.1", -# "channel-id": 0, "tls": true} -# }} +# <- { "timestamp": {"seconds": 1290688046, "microseconds": 417172}, +# "event": "SPICE_INITIALIZED", +# "data": {"server": {"auth": "spice", "port": "5921", +# "family": "ipv4", "host": "127.0.0.1"}, +# "client": {"port": "49004", "family": "ipv4", "channel-type": 3, +# "connection-id": 1804289383, "host": "127.0.0.1", +# "channel-id": 0, "tls": true} +# }} ## { 'event': 'SPICE_INITIALIZED', 'data': { 'server': 'SpiceServerInfo', @@ -439,12 +435,12 @@ # # Example: # -# <- { "timestamp": {"seconds": 1290688046, "microseconds": 388707}, -# "event": "SPICE_DISCONNECTED", -# "data": { -# "server": { "port": "5920", "family": "ipv4", "host": "127.0.0.1"}, -# "client": {"port": "52873", "family": "ipv4", "host": "127.0.0.1"} -# }} +# <- { "timestamp": {"seconds": 1290688046, "microseconds": 388707}, +# "event": "SPICE_DISCONNECTED", +# "data": { +# "server": { "port": "5920", "family": "ipv4", "host": "127.0.0.1"}, +# "client": {"port": "52873", "family": "ipv4", "host": "127.0.0.1"} +# }} ## { 'event': 'SPICE_DISCONNECTED', 'data': { 'server': 'SpiceBasicInfo', @@ -460,8 +456,8 @@ # # Example: # -# <- { "timestamp": {"seconds": 1290688046, "microseconds": 417172}, -# "event": "SPICE_MIGRATE_COMPLETED" } +# <- { "timestamp": {"seconds": 1290688046, "microseconds": 417172}, +# "event": "SPICE_MIGRATE_COMPLETED" } ## { 'event': 'SPICE_MIGRATE_COMPLETED', 'if': 'CONFIG_SPICE' } @@ -668,23 +664,23 @@ # # Example: # -# -> { "execute": "query-vnc" } -# <- { "return": { -# "enabled":true, -# "host":"0.0.0.0", -# "service":"50402", -# "auth":"vnc", -# "family":"ipv4", -# "clients":[ -# { -# "host":"127.0.0.1", -# "service":"50401", -# "family":"ipv4", -# "websocket":false -# } -# ] -# } -# } +# -> { "execute": "query-vnc" } +# <- { "return": { +# "enabled":true, +# "host":"0.0.0.0", +# "service":"50402", +# "auth":"vnc", +# "family":"ipv4", +# "clients":[ +# { +# "host":"127.0.0.1", +# "service":"50401", +# "family":"ipv4", +# "websocket":false +# } +# ] +# } +# } ## { 'command': 'query-vnc', 'returns': 'VncInfo', 'if': 'CONFIG_VNC' } @@ -733,13 +729,13 @@ # # Example: # -# <- { "event": "VNC_CONNECTED", -# "data": { -# "server": { "auth": "sasl", "family": "ipv4", "websocket": false, -# "service": "5901", "host": "0.0.0.0" }, -# "client": { "family": "ipv4", "service": "58425", -# "host": "127.0.0.1", "websocket": false } }, -# "timestamp": { "seconds": 1262976601, "microseconds": 975795 } } +# <- { "event": "VNC_CONNECTED", +# "data": { +# "server": { "auth": "sasl", "family": "ipv4", "websocket": false, +# "service": "5901", "host": "0.0.0.0" }, +# "client": { "family": "ipv4", "service": "58425", +# "host": "127.0.0.1", "websocket": false } }, +# "timestamp": { "seconds": 1262976601, "microseconds": 975795 } } ## { 'event': 'VNC_CONNECTED', 'data': { 'server': 'VncServerInfo', @@ -760,13 +756,13 @@ # # Example: # -# <- { "event": "VNC_INITIALIZED", -# "data": { -# "server": { "auth": "sasl", "family": "ipv4", "websocket": false, -# "service": "5901", "host": "0.0.0.0"}, -# "client": { "family": "ipv4", "service": "46089", "websocket": false, -# "host": "127.0.0.1", "sasl_username": "luiz" } }, -# "timestamp": { "seconds": 1263475302, "microseconds": 150772 } } +# <- { "event": "VNC_INITIALIZED", +# "data": { +# "server": { "auth": "sasl", "family": "ipv4", "websocket": false, +# "service": "5901", "host": "0.0.0.0"}, +# "client": { "family": "ipv4", "service": "46089", "websocket": false, +# "host": "127.0.0.1", "sasl_username": "luiz" } }, +# "timestamp": { "seconds": 1263475302, "microseconds": 150772 } } ## { 'event': 'VNC_INITIALIZED', 'data': { 'server': 'VncServerInfo', @@ -786,13 +782,13 @@ # # Example: # -# <- { "event": "VNC_DISCONNECTED", -# "data": { -# "server": { "auth": "sasl", "family": "ipv4", "websocket": false, -# "service": "5901", "host": "0.0.0.0" }, -# "client": { "family": "ipv4", "service": "58425", "websocket": false, -# "host": "127.0.0.1", "sasl_username": "luiz" } }, -# "timestamp": { "seconds": 1262976601, "microseconds": 975795 } } +# <- { "event": "VNC_DISCONNECTED", +# "data": { +# "server": { "auth": "sasl", "family": "ipv4", "websocket": false, +# "service": "5901", "host": "0.0.0.0" }, +# "client": { "family": "ipv4", "service": "58425", "websocket": false, +# "host": "127.0.0.1", "sasl_username": "luiz" } }, +# "timestamp": { "seconds": 1262976601, "microseconds": 975795 } } ## { 'event': 'VNC_DISCONNECTED', 'data': { 'server': 'VncServerInfo', @@ -834,22 +830,22 @@ # # Example: # -# -> { "execute": "query-mice" } -# <- { "return": [ -# { -# "name":"QEMU Microsoft Mouse", -# "index":0, -# "current":false, -# "absolute":false -# }, -# { -# "name":"QEMU PS/2 Mouse", -# "index":1, -# "current":true, -# "absolute":true -# } -# ] -# } +# -> { "execute": "query-mice" } +# <- { "return": [ +# { +# "name":"QEMU Microsoft Mouse", +# "index":0, +# "current":false, +# "absolute":false +# }, +# { +# "name":"QEMU PS/2 Mouse", +# "index":1, +# "current":true, +# "absolute":true +# } +# ] +# } ## { 'command': 'query-mice', 'returns': ['MouseInfo'] } @@ -990,6 +986,8 @@ ## # @IntWrapper: # +# @data: a numeric key code +# # Since: 1.3 ## { 'struct': 'IntWrapper', @@ -998,6 +996,8 @@ ## # @QKeyCodeWrapper: # +# @data: An enumeration of key name +# # Since: 1.3 ## { 'struct': 'QKeyCodeWrapper', @@ -1008,6 +1008,8 @@ # # Represents a keyboard key. # +# @type: key encoding +# # Since: 1.3 ## { 'union': 'KeyValue', @@ -1030,19 +1032,18 @@ # @hold-time: time to delay key up events, milliseconds. Defaults to # 100 # -# Returns: -# - Nothing on success +# Errors: # - If key is unknown or redundant, GenericError # # Since: 1.3 # # Example: # -# -> { "execute": "send-key", -# "arguments": { "keys": [ { "type": "qcode", "data": "ctrl" }, -# { "type": "qcode", "data": "alt" }, -# { "type": "qcode", "data": "delete" } ] } } -# <- { "return": {} } +# -> { "execute": "send-key", +# "arguments": { "keys": [ { "type": "qcode", "data": "ctrl" }, +# { "type": "qcode", "data": "alt" }, +# { "type": "qcode", "data": "delete" } ] } } +# <- { "return": {} } ## { 'command': 'send-key', 'data': { 'keys': ['KeyValue'], '*hold-time': 'int' } } @@ -1079,6 +1080,16 @@ # # Type of a multi-touch event. # +# @begin: A new touch event sequence has just started. +# +# @update: A touch event sequence has been updated. +# +# @end: A touch event sequence has finished. +# +# @cancel: A touch event sequence has been canceled. +# +# @data: Absolute position data. +# # Since: 8.1 ## { 'enum' : 'InputMultiTouchType', @@ -1136,6 +1147,8 @@ # # MultiTouch input event. # +# @type: The type of multi-touch event. +# # @slot: Which slot has generated the event. # # @tracking-id: ID to correlate this event with previously generated @@ -1175,6 +1188,8 @@ ## # @InputKeyEventWrapper: # +# @data: Keyboard input event +# # Since: 2.0 ## { 'struct': 'InputKeyEventWrapper', @@ -1183,6 +1198,8 @@ ## # @InputBtnEventWrapper: # +# @data: Pointer button input event +# # Since: 2.0 ## { 'struct': 'InputBtnEventWrapper', @@ -1191,6 +1208,8 @@ ## # @InputMoveEventWrapper: # +# @data: Pointer motion input event +# # Since: 2.0 ## { 'struct': 'InputMoveEventWrapper', @@ -1199,6 +1218,8 @@ ## # @InputMultiTouchEventWrapper: # +# @data: MultiTouch input event +# # Since: 8.1 ## { 'struct': 'InputMultiTouchEventWrapper', @@ -1245,8 +1266,6 @@ # # @events: List of InputEvent union. # -# Returns: Nothing on success. -# # Since: 2.6 # # Note: The consoles are visible in the qom tree, under @@ -1256,39 +1275,39 @@ # # Examples: # -# 1. Press left mouse button. +# 1. Press left mouse button. # -# -> { "execute": "input-send-event", -# "arguments": { "device": "video0", -# "events": [ { "type": "btn", -# "data" : { "down": true, "button": "left" } } ] } } -# <- { "return": {} } +# -> { "execute": "input-send-event", +# "arguments": { "device": "video0", +# "events": [ { "type": "btn", +# "data" : { "down": true, "button": "left" } } ] } } +# <- { "return": {} } # -# -> { "execute": "input-send-event", -# "arguments": { "device": "video0", -# "events": [ { "type": "btn", -# "data" : { "down": false, "button": "left" } } ] } } -# <- { "return": {} } +# -> { "execute": "input-send-event", +# "arguments": { "device": "video0", +# "events": [ { "type": "btn", +# "data" : { "down": false, "button": "left" } } ] } } +# <- { "return": {} } # -# 2. Press ctrl-alt-del. +# 2. Press ctrl-alt-del. # -# -> { "execute": "input-send-event", -# "arguments": { "events": [ -# { "type": "key", "data" : { "down": true, -# "key": {"type": "qcode", "data": "ctrl" } } }, -# { "type": "key", "data" : { "down": true, -# "key": {"type": "qcode", "data": "alt" } } }, -# { "type": "key", "data" : { "down": true, -# "key": {"type": "qcode", "data": "delete" } } } ] } } -# <- { "return": {} } +# -> { "execute": "input-send-event", +# "arguments": { "events": [ +# { "type": "key", "data" : { "down": true, +# "key": {"type": "qcode", "data": "ctrl" } } }, +# { "type": "key", "data" : { "down": true, +# "key": {"type": "qcode", "data": "alt" } } }, +# { "type": "key", "data" : { "down": true, +# "key": {"type": "qcode", "data": "delete" } } } ] } } +# <- { "return": {} } # -# 3. Move mouse pointer to absolute coordinates (20000, 400). +# 3. Move mouse pointer to absolute coordinates (20000, 400). # -# -> { "execute": "input-send-event" , -# "arguments": { "events": [ -# { "type": "abs", "data" : { "axis": "x", "value" : 20000 } }, -# { "type": "abs", "data" : { "axis": "y", "value" : 400 } } ] } } -# <- { "return": {} } +# -> { "execute": "input-send-event" , +# "arguments": { "events": [ +# { "type": "abs", "data" : { "axis": "x", "value" : 20000 } }, +# { "type": "abs", "data" : { "axis": "y", "value" : 400 } } ] } } +# <- { "return": {} } ## { 'command': 'input-send-event', 'data': { '*device': 'str', @@ -1307,7 +1326,7 @@ # display device can notify the guest on window resizes # (virtio-gpu) this will default to "on", assuming the guest will # resize the display to match the window size then. Otherwise it -# defaults to "off". (Since 3.1) +# defaults to "off". (Since 3.1) # # @show-tabs: Display the tab bar for switching between the various # graphical interfaces (e.g. VGA and virtual console character @@ -1410,9 +1429,12 @@ # codes match their position on non-Mac keyboards and you can use # Meta/Super and Alt where you expect them. (default: off) # -# @zoom-to-fit: Zoom guest display to fit into the host window. When -# turned off the host window will be resized instead. Defaults to -# "off". (Since 8.2) +# @zoom-to-fit: Zoom guest display to fit into the host window. When +# turned off the host window will be resized instead. Defaults to +# "off". (Since 8.2) +# +# @zoom-interpolation: Apply interpolation to smooth output when +# zoom-to-fit is enabled. Defaults to "off". (Since 9.0) # # Since: 7.0 ## @@ -1421,7 +1443,8 @@ '*left-command-key': 'bool', '*full-grab': 'bool', '*swap-opt-cmd': 'bool', - '*zoom-to-fit': 'bool' + '*zoom-to-fit': 'bool', + '*zoom-interpolation': 'bool' } } ## @@ -1591,15 +1614,13 @@ # # Reload display configuration. # -# Returns: Nothing on success. -# # Since: 6.0 # # Example: # -# -> { "execute": "display-reload", -# "arguments": { "type": "vnc", "tls-certs": true } } -# <- { "return": {} } +# -> { "execute": "display-reload", +# "arguments": { "type": "vnc", "tls-certs": true } } +# <- { "return": {} } ## { 'command': 'display-reload', 'data': 'DisplayReloadOptions', @@ -1650,17 +1671,15 @@ # # Update display configuration. # -# Returns: Nothing on success. -# # Since: 7.1 # # Example: # -# -> { "execute": "display-update", -# "arguments": { "type": "vnc", "addresses": -# [ { "type": "inet", "host": "0.0.0.0", -# "port": "5901" } ] } } -# <- { "return": {} } +# -> { "execute": "display-update", +# "arguments": { "type": "vnc", "addresses": +# [ { "type": "inet", "host": "0.0.0.0", +# "port": "5901" } ] } } +# <- { "return": {} } ## { 'command': 'display-update', 'data': 'DisplayUpdateOptions', @@ -1687,11 +1706,11 @@ # # Example: # -# -> { "execute": "client_migrate_info", -# "arguments": { "protocol": "spice", -# "hostname": "virt42.lab.kraxel.org", -# "port": 1234 } } -# <- { "return": {} } +# -> { "execute": "client_migrate_info", +# "arguments": { "protocol": "spice", +# "hostname": "virt42.lab.kraxel.org", +# "port": 1234 } } +# <- { "return": {} } ## { 'command': 'client_migrate_info', 'data': { 'protocol': 'str', 'hostname': 'str', '*port': 'int', diff --git a/qapi/virtio.json b/qapi/virtio.json index e6dcee7b83d..74fc27c7029 100644 --- a/qapi/virtio.json +++ b/qapi/virtio.json @@ -36,30 +36,30 @@ # # Example: # -# -> { "execute": "x-query-virtio" } -# <- { "return": [ -# { -# "name": "virtio-input", -# "path": "/machine/peripheral-anon/device[4]/virtio-backend" -# }, -# { -# "name": "virtio-crypto", -# "path": "/machine/peripheral/crypto0/virtio-backend" -# }, -# { -# "name": "virtio-scsi", -# "path": "/machine/peripheral-anon/device[2]/virtio-backend" -# }, -# { -# "name": "virtio-net", -# "path": "/machine/peripheral-anon/device[1]/virtio-backend" -# }, -# { -# "name": "virtio-serial", -# "path": "/machine/peripheral-anon/device[0]/virtio-backend" -# } -# ] -# } +# -> { "execute": "x-query-virtio" } +# <- { "return": [ +# { +# "name": "virtio-input", +# "path": "/machine/peripheral-anon/device[4]/virtio-backend" +# }, +# { +# "name": "virtio-crypto", +# "path": "/machine/peripheral/crypto0/virtio-backend" +# }, +# { +# "name": "virtio-scsi", +# "path": "/machine/peripheral-anon/device[2]/virtio-backend" +# }, +# { +# "name": "virtio-net", +# "path": "/machine/peripheral-anon/device[1]/virtio-backend" +# }, +# { +# "name": "virtio-serial", +# "path": "/machine/peripheral-anon/device[0]/virtio-backend" +# } +# ] +# } ## { 'command': 'x-query-virtio', 'returns': [ 'VirtioInfo' ], @@ -205,229 +205,229 @@ # # Examples: # -# 1. Poll for the status of virtio-crypto (no vhost-crypto active) -# -# -> { "execute": "x-query-virtio-status", -# "arguments": { "path": "/machine/peripheral/crypto0/virtio-backend" } -# } -# <- { "return": { -# "device-endian": "little", -# "bus-name": "", -# "disable-legacy-check": false, -# "name": "virtio-crypto", -# "started": true, -# "device-id": 20, -# "backend-features": { -# "transports": [], -# "dev-features": [] -# }, -# "start-on-kick": false, -# "isr": 1, -# "broken": false, -# "status": { -# "statuses": [ -# "VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found", -# "VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device", -# "VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete", -# "VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready" -# ] -# }, -# "num-vqs": 2, -# "guest-features": { -# "dev-features": [], -# "transports": [ -# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", -# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", -# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)" -# ] -# }, -# "host-features": { -# "unknown-dev-features": 1073741824, -# "dev-features": [], -# "transports": [ -# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", -# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", -# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", -# "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", -# "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" -# ] -# }, -# "use-guest-notifier-mask": true, -# "vm-running": true, -# "queue-sel": 1, -# "disabled": false, -# "vhost-started": false, -# "use-started": true -# } -# } -# -# 2. Poll for the status of virtio-net (vhost-net is active) -# -# -> { "execute": "x-query-virtio-status", -# "arguments": { "path": "/machine/peripheral-anon/device[1]/virtio-backend" } -# } -# <- { "return": { -# "device-endian": "little", -# "bus-name": "", -# "disabled-legacy-check": false, -# "name": "virtio-net", -# "started": true, -# "device-id": 1, -# "vhost-dev": { -# "n-tmp-sections": 4, -# "n-mem-sections": 4, -# "max-queues": 1, -# "backend-cap": 2, -# "log-size": 0, +# 1. Poll for the status of virtio-crypto (no vhost-crypto active) +# +# -> { "execute": "x-query-virtio-status", +# "arguments": { "path": "/machine/peripheral/crypto0/virtio-backend" } +# } +# <- { "return": { +# "device-endian": "little", +# "bus-name": "", +# "disable-legacy-check": false, +# "name": "virtio-crypto", +# "started": true, +# "device-id": 20, # "backend-features": { +# "transports": [], +# "dev-features": [] +# }, +# "start-on-kick": false, +# "isr": 1, +# "broken": false, +# "status": { +# "statuses": [ +# "VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found", +# "VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device", +# "VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete", +# "VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready" +# ] +# }, +# "num-vqs": 2, +# "guest-features": { +# "dev-features": [], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)" +# ] +# }, +# "host-features": { +# "unknown-dev-features": 1073741824, # "dev-features": [], -# "transports": [] +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", +# "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", +# "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" +# ] # }, -# "nvqs": 2, -# "protocol-features": { -# "protocols": [] +# "use-guest-notifier-mask": true, +# "vm-running": true, +# "queue-sel": 1, +# "disabled": false, +# "vhost-started": false, +# "use-started": true +# } +# } +# +# 2. Poll for the status of virtio-net (vhost-net is active) +# +# -> { "execute": "x-query-virtio-status", +# "arguments": { "path": "/machine/peripheral-anon/device[1]/virtio-backend" } +# } +# <- { "return": { +# "device-endian": "little", +# "bus-name": "", +# "disabled-legacy-check": false, +# "name": "virtio-net", +# "started": true, +# "device-id": 1, +# "vhost-dev": { +# "n-tmp-sections": 4, +# "n-mem-sections": 4, +# "max-queues": 1, +# "backend-cap": 2, +# "log-size": 0, +# "backend-features": { +# "dev-features": [], +# "transports": [] +# }, +# "nvqs": 2, +# "protocol-features": { +# "protocols": [] +# }, +# "vq-index": 0, +# "log-enabled": false, +# "acked-features": { +# "dev-features": [ +# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers" +# ], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)" +# ] +# }, +# "features": { +# "dev-features": [ +# "VHOST_F_LOG_ALL: Logging write descriptors supported", +# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers" +# ], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_IOMMU_PLATFORM: Device can be used on IOMMU platform", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", +# "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", +# "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" +# ] +# } # }, -# "vq-index": 0, -# "log-enabled": false, -# "acked-features": { +# "backend-features": { # "dev-features": [ -# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers" +# "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features negotiation supported", +# "VIRTIO_NET_F_GSO: Handling GSO-type packets supported", +# "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control channel", +# "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets supported", +# "VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported", +# "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported", +# "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported", +# "VIRTIO_NET_F_CTRL_VQ: Control channel available", +# "VIRTIO_NET_F_STATUS: Configuration status field available", +# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers", +# "VIRTIO_NET_F_HOST_UFO: Device can receive UFO", +# "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN", +# "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6", +# "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4", +# "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO", +# "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN", +# "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6", +# "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4", +# "VIRTIO_NET_F_MAC: Device has given MAC address", +# "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading reconfig. supported", +# "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial checksum supported", +# "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum supported" # ], # "transports": [ # "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", # "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", -# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)" +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", +# "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", +# "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" +# ] +# }, +# "start-on-kick": false, +# "isr": 1, +# "broken": false, +# "status": { +# "statuses": [ +# "VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found", +# "VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device", +# "VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete", +# "VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready" # ] # }, -# "features": { +# "num-vqs": 3, +# "guest-features": { # "dev-features": [ -# "VHOST_F_LOG_ALL: Logging write descriptors supported", -# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers" +# "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control channel", +# "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets supported", +# "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported", +# "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported", +# "VIRTIO_NET_F_CTRL_VQ: Control channel available", +# "VIRTIO_NET_F_STATUS: Configuration status field available", +# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers", +# "VIRTIO_NET_F_HOST_UFO: Device can receive UFO", +# "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN", +# "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6", +# "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4", +# "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO", +# "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN", +# "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6", +# "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4", +# "VIRTIO_NET_F_MAC: Device has given MAC address", +# "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading reconfig. supported", +# "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial checksum supported", +# "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum supported" +# ], +# "transports": [ +# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", +# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", +# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)" +# ] +# }, +# "host-features": { +# "dev-features": [ +# "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features negotiation supported", +# "VIRTIO_NET_F_GSO: Handling GSO-type packets supported", +# "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control channel", +# "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets supported", +# "VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported", +# "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported", +# "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported", +# "VIRTIO_NET_F_CTRL_VQ: Control channel available", +# "VIRTIO_NET_F_STATUS: Configuration status field available", +# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers", +# "VIRTIO_NET_F_HOST_UFO: Device can receive UFO", +# "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN", +# "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6", +# "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4", +# "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO", +# "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN", +# "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6", +# "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4", +# "VIRTIO_NET_F_MAC: Device has given MAC address", +# "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading reconfig. supported", +# "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial checksum supported", +# "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum supported" # ], # "transports": [ # "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", # "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", -# "VIRTIO_F_IOMMU_PLATFORM: Device can be used on IOMMU platform", # "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", # "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", # "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" -# ] -# } -# }, -# "backend-features": { -# "dev-features": [ -# "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features negotiation supported", -# "VIRTIO_NET_F_GSO: Handling GSO-type packets supported", -# "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control channel", -# "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets supported", -# "VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported", -# "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported", -# "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported", -# "VIRTIO_NET_F_CTRL_VQ: Control channel available", -# "VIRTIO_NET_F_STATUS: Configuration status field available", -# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers", -# "VIRTIO_NET_F_HOST_UFO: Device can receive UFO", -# "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN", -# "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6", -# "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4", -# "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO", -# "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN", -# "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6", -# "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4", -# "VIRTIO_NET_F_MAC: Device has given MAC address", -# "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading reconfig. supported", -# "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial checksum supported", -# "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum supported" -# ], -# "transports": [ -# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", -# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", -# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", -# "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", -# "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" -# ] -# }, -# "start-on-kick": false, -# "isr": 1, -# "broken": false, -# "status": { -# "statuses": [ -# "VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found", -# "VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device", -# "VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete", -# "VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready" -# ] -# }, -# "num-vqs": 3, -# "guest-features": { -# "dev-features": [ -# "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control channel", -# "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets supported", -# "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported", -# "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported", -# "VIRTIO_NET_F_CTRL_VQ: Control channel available", -# "VIRTIO_NET_F_STATUS: Configuration status field available", -# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers", -# "VIRTIO_NET_F_HOST_UFO: Device can receive UFO", -# "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN", -# "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6", -# "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4", -# "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO", -# "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN", -# "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6", -# "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4", -# "VIRTIO_NET_F_MAC: Device has given MAC address", -# "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading reconfig. supported", -# "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial checksum supported", -# "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum supported" -# ], -# "transports": [ -# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", -# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", -# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)" -# ] -# }, -# "host-features": { -# "dev-features": [ -# "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features negotiation supported", -# "VIRTIO_NET_F_GSO: Handling GSO-type packets supported", -# "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control channel", -# "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets supported", -# "VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported", -# "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported", -# "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported", -# "VIRTIO_NET_F_CTRL_VQ: Control channel available", -# "VIRTIO_NET_F_STATUS: Configuration status field available", -# "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers", -# "VIRTIO_NET_F_HOST_UFO: Device can receive UFO", -# "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN", -# "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6", -# "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4", -# "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO", -# "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN", -# "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6", -# "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4", -# "VIRTIO_NET_F_MAC: Device has given MAC address", -# "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading reconfig. supported", -# "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial checksum supported", -# "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum supported" -# ], -# "transports": [ -# "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", -# "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported", -# "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)", -# "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts", -# "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. descs. on VQ" -# ] -# }, -# "use-guest-notifier-mask": true, -# "vm-running": true, -# "queue-sel": 2, -# "disabled": false, -# "vhost-started": true, -# "use-started": true -# } -# } +# ] +# }, +# "use-guest-notifier-mask": true, +# "vm-running": true, +# "queue-sel": 2, +# "disabled": false, +# "vhost-started": true, +# "use-started": true +# } +# } ## { 'command': 'x-query-virtio-status', 'data': { 'path': 'str' }, @@ -570,52 +570,52 @@ # # Examples: # -# 1. Get VirtQueueStatus for virtio-vsock (vhost-vsock running) -# -# -> { "execute": "x-query-virtio-queue-status", -# "arguments": { "path": "/machine/peripheral/vsock0/virtio-backend", -# "queue": 1 } -# } -# <- { "return": { -# "signalled-used": 0, -# "inuse": 0, -# "name": "vhost-vsock", -# "vring-align": 4096, -# "vring-desc": 5217370112, -# "signalled-used-valid": false, -# "vring-num-default": 128, -# "vring-avail": 5217372160, -# "queue-index": 1, -# "last-avail-idx": 0, -# "vring-used": 5217372480, -# "used-idx": 0, -# "vring-num": 128 -# } -# } -# -# 2. Get VirtQueueStatus for virtio-serial (no vhost) -# -# -> { "execute": "x-query-virtio-queue-status", -# "arguments": { "path": "/machine/peripheral-anon/device[0]/virtio-backend", -# "queue": 20 } -# } -# <- { "return": { -# "signalled-used": 0, -# "inuse": 0, -# "name": "virtio-serial", -# "vring-align": 4096, -# "vring-desc": 5182074880, -# "signalled-used-valid": false, -# "vring-num-default": 128, -# "vring-avail": 5182076928, -# "queue-index": 20, -# "last-avail-idx": 0, -# "vring-used": 5182077248, -# "used-idx": 0, -# "shadow-avail-idx": 0, -# "vring-num": 128 -# } -# } +# 1. Get VirtQueueStatus for virtio-vsock (vhost-vsock running) +# +# -> { "execute": "x-query-virtio-queue-status", +# "arguments": { "path": "/machine/peripheral/vsock0/virtio-backend", +# "queue": 1 } +# } +# <- { "return": { +# "signalled-used": 0, +# "inuse": 0, +# "name": "vhost-vsock", +# "vring-align": 4096, +# "vring-desc": 5217370112, +# "signalled-used-valid": false, +# "vring-num-default": 128, +# "vring-avail": 5217372160, +# "queue-index": 1, +# "last-avail-idx": 0, +# "vring-used": 5217372480, +# "used-idx": 0, +# "vring-num": 128 +# } +# } +# +# 2. Get VirtQueueStatus for virtio-serial (no vhost) +# +# -> { "execute": "x-query-virtio-queue-status", +# "arguments": { "path": "/machine/peripheral-anon/device[0]/virtio-backend", +# "queue": 20 } +# } +# <- { "return": { +# "signalled-used": 0, +# "inuse": 0, +# "name": "virtio-serial", +# "vring-align": 4096, +# "vring-desc": 5182074880, +# "signalled-used-valid": false, +# "vring-num-default": 128, +# "vring-avail": 5182076928, +# "queue-index": 20, +# "last-avail-idx": 0, +# "vring-used": 5182077248, +# "used-idx": 0, +# "shadow-avail-idx": 0, +# "vring-num": 128 +# } +# } ## { 'command': 'x-query-virtio-queue-status', 'data': { 'path': 'str', 'queue': 'uint16' }, @@ -642,15 +642,17 @@ # # @num: vhost_virtqueue num # -# @desc-phys: vhost_virtqueue desc_phys (descriptor area phys. addr.) +# @desc-phys: vhost_virtqueue desc_phys (descriptor area physical +# address) # # @desc-size: vhost_virtqueue desc_size # -# @avail-phys: vhost_virtqueue avail_phys (driver area phys. addr.) +# @avail-phys: vhost_virtqueue avail_phys (driver area physical +# address) # # @avail-size: vhost_virtqueue avail_size # -# @used-phys: vhost_virtqueue used_phys (device area phys. addr.) +# @used-phys: vhost_virtqueue used_phys (device area physical address) # # @used-size: vhost_virtqueue used_size # @@ -690,51 +692,51 @@ # # Examples: # -# 1. Get vhost_virtqueue status for vhost-crypto -# -# -> { "execute": "x-query-virtio-vhost-queue-status", -# "arguments": { "path": "/machine/peripheral/crypto0/virtio-backend", -# "queue": 0 } -# } -# <- { "return": { -# "avail-phys": 5216124928, -# "name": "virtio-crypto", -# "used-phys": 5216127040, -# "avail-size": 2054, -# "desc-size": 16384, -# "used-size": 8198, -# "desc": 140141447430144, -# "num": 1024, -# "call": 0, -# "avail": 140141447446528, -# "desc-phys": 5216108544, -# "used": 140141447448640, -# "kick": 0 -# } -# } -# -# 2. Get vhost_virtqueue status for vhost-vsock -# -# -> { "execute": "x-query-virtio-vhost-queue-status", -# "arguments": { "path": "/machine/peripheral/vsock0/virtio-backend", -# "queue": 0 } -# } -# <- { "return": { -# "avail-phys": 5182261248, -# "name": "vhost-vsock", -# "used-phys": 5182261568, -# "avail-size": 262, -# "desc-size": 2048, -# "used-size": 1030, -# "desc": 140141413580800, -# "num": 128, -# "call": 0, -# "avail": 140141413582848, -# "desc-phys": 5182259200, -# "used": 140141413583168, -# "kick": 0 -# } -# } +# 1. Get vhost_virtqueue status for vhost-crypto +# +# -> { "execute": "x-query-virtio-vhost-queue-status", +# "arguments": { "path": "/machine/peripheral/crypto0/virtio-backend", +# "queue": 0 } +# } +# <- { "return": { +# "avail-phys": 5216124928, +# "name": "virtio-crypto", +# "used-phys": 5216127040, +# "avail-size": 2054, +# "desc-size": 16384, +# "used-size": 8198, +# "desc": 140141447430144, +# "num": 1024, +# "call": 0, +# "avail": 140141447446528, +# "desc-phys": 5216108544, +# "used": 140141447448640, +# "kick": 0 +# } +# } +# +# 2. Get vhost_virtqueue status for vhost-vsock +# +# -> { "execute": "x-query-virtio-vhost-queue-status", +# "arguments": { "path": "/machine/peripheral/vsock0/virtio-backend", +# "queue": 0 } +# } +# <- { "return": { +# "avail-phys": 5182261248, +# "name": "vhost-vsock", +# "used-phys": 5182261568, +# "avail-size": 262, +# "desc-size": 2048, +# "used-size": 1030, +# "desc": 140141413580800, +# "num": 128, +# "call": 0, +# "avail": 140141413582848, +# "desc-phys": 5182259200, +# "used": 140141413583168, +# "kick": 0 +# } +# } ## { 'command': 'x-query-virtio-vhost-queue-status', 'data': { 'path': 'str', 'queue': 'uint16' }, @@ -839,92 +841,141 @@ # # Examples: # -# 1. Introspect on virtio-net's VirtQueue 0 at index 5 -# -# -> { "execute": "x-query-virtio-queue-element", -# "arguments": { "path": "/machine/peripheral-anon/device[1]/virtio-backend", -# "queue": 0, -# "index": 5 } -# } -# <- { "return": { -# "index": 5, -# "name": "virtio-net", -# "descs": [ -# { -# "flags": ["write"], -# "len": 1536, -# "addr": 5257305600 +# 1. Introspect on virtio-net's VirtQueue 0 at index 5 +# +# -> { "execute": "x-query-virtio-queue-element", +# "arguments": { "path": "/machine/peripheral-anon/device[1]/virtio-backend", +# "queue": 0, +# "index": 5 } +# } +# <- { "return": { +# "index": 5, +# "name": "virtio-net", +# "descs": [ +# { +# "flags": ["write"], +# "len": 1536, +# "addr": 5257305600 +# } +# ], +# "avail": { +# "idx": 256, +# "flags": 0, +# "ring": 5 +# }, +# "used": { +# "idx": 13, +# "flags": 0 # } -# ], -# "avail": { -# "idx": 256, -# "flags": 0, -# "ring": 5 -# }, -# "used": { -# "idx": 13, -# "flags": 0 # } -# } -# } -# -# 2. Introspect on virtio-crypto's VirtQueue 1 at head -# -# -> { "execute": "x-query-virtio-queue-element", -# "arguments": { "path": "/machine/peripheral/crypto0/virtio-backend", -# "queue": 1 } -# } -# <- { "return": { -# "index": 0, -# "name": "virtio-crypto", -# "descs": [ -# { -# "flags": [], -# "len": 0, -# "addr": 8080268923184214134 +# } +# +# 2. Introspect on virtio-crypto's VirtQueue 1 at head +# +# -> { "execute": "x-query-virtio-queue-element", +# "arguments": { "path": "/machine/peripheral/crypto0/virtio-backend", +# "queue": 1 } +# } +# <- { "return": { +# "index": 0, +# "name": "virtio-crypto", +# "descs": [ +# { +# "flags": [], +# "len": 0, +# "addr": 8080268923184214134 +# } +# ], +# "avail": { +# "idx": 280, +# "flags": 0, +# "ring": 0 +# }, +# "used": { +# "idx": 280, +# "flags": 0 # } -# ], -# "avail": { -# "idx": 280, -# "flags": 0, -# "ring": 0 -# }, -# "used": { -# "idx": 280, -# "flags": 0 # } -# } -# } -# -# 3. Introspect on virtio-scsi's VirtQueue 2 at head -# -# -> { "execute": "x-query-virtio-queue-element", -# "arguments": { "path": "/machine/peripheral-anon/device[2]/virtio-backend", -# "queue": 2 } -# } -# <- { "return": { -# "index": 19, -# "name": "virtio-scsi", -# "descs": [ -# { -# "flags": ["used", "indirect", "write"], -# "len": 4099327944, -# "addr": 12055409292258155293 +# } +# +# 3. Introspect on virtio-scsi's VirtQueue 2 at head +# +# -> { "execute": "x-query-virtio-queue-element", +# "arguments": { "path": "/machine/peripheral-anon/device[2]/virtio-backend", +# "queue": 2 } +# } +# <- { "return": { +# "index": 19, +# "name": "virtio-scsi", +# "descs": [ +# { +# "flags": ["used", "indirect", "write"], +# "len": 4099327944, +# "addr": 12055409292258155293 +# } +# ], +# "avail": { +# "idx": 1147, +# "flags": 0, +# "ring": 19 +# }, +# "used": { +# "idx": 280, +# "flags": 0 # } -# ], -# "avail": { -# "idx": 1147, -# "flags": 0, -# "ring": 19 -# }, -# "used": { -# "idx": 280, -# "flags": 0 # } -# } -# } +# } ## { 'command': 'x-query-virtio-queue-element', 'data': { 'path': 'str', 'queue': 'uint16', '*index': 'uint16' }, 'returns': 'VirtioQueueElement', 'features': [ 'unstable' ] } + +## +# @IOThreadVirtQueueMapping: +# +# Describes the subset of virtqueues assigned to an IOThread. +# +# @iothread: the id of IOThread object +# +# @vqs: an optional array of virtqueue indices that will be handled by +# this IOThread. When absent, virtqueues are assigned round-robin +# across all IOThreadVirtQueueMappings provided. Either all +# IOThreadVirtQueueMappings must have @vqs or none of them must +# have it. +# +# Since: 9.0 +## + +{ 'struct': 'IOThreadVirtQueueMapping', + 'data': { 'iothread': 'str', '*vqs': ['uint16'] } } + +## +# @DummyVirtioForceArrays: +# +# Not used by QMP; hack to let us use IOThreadVirtQueueMappingList +# internally +# +# Since: 9.0 +## + +{ 'struct': 'DummyVirtioForceArrays', + 'data': { 'unused-iothread-vq-mapping': ['IOThreadVirtQueueMapping'] } } + +## +# @GranuleMode: +# +# @4k: granule page size of 4KiB +# +# @8k: granule page size of 8KiB +# +# @16k: granule page size of 16KiB +# +# @64k: granule page size of 64KiB +# +# @host: granule matches the host page size +# +# Since: 9.0 +## +{ 'enum': 'GranuleMode', + 'data': [ '4k', '8k', '16k', '64k', 'host' ] } diff --git a/qapi/yank.json b/qapi/yank.json index 87ec7cab968..89f2f4d199b 100644 --- a/qapi/yank.json +++ b/qapi/yank.json @@ -49,6 +49,8 @@ # A yank instance can be yanked with the @yank qmp command to recover # from a hanging QEMU. # +# @type: yank instance type +# # Currently implemented yank instances: # # - nbd block device: Yanking it will shut down the connection to the @@ -74,21 +76,20 @@ # Try to recover from hanging QEMU by yanking the specified instances. # See @YankInstance for more information. # -# Takes a list of @YankInstance as argument. +# @instances: the instances to be yanked # -# Returns: -# - Nothing on success -# - @DeviceNotFound error, if any of the YankInstances doesn't exist +# Errors: +# - If any of the YankInstances doesn't exist, DeviceNotFound # # Example: # -# -> { "execute": "yank", -# "arguments": { -# "instances": [ -# { "type": "block-node", -# "node-name": "nbd0" } -# ] } } -# <- { "return": {} } +# -> { "execute": "yank", +# "arguments": { +# "instances": [ +# { "type": "block-node", +# "node-name": "nbd0" } +# ] } } +# <- { "return": {} } # # Since: 6.0 ## @@ -105,11 +106,11 @@ # # Example: # -# -> { "execute": "query-yank" } -# <- { "return": [ -# { "type": "block-node", -# "node-name": "nbd0" } -# ] } +# -> { "execute": "query-yank" } +# <- { "return": [ +# { "type": "block-node", +# "node-name": "nbd0" } +# ] } # # Since: 6.0 ## diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx index 068692d13eb..c9dd70a8920 100644 --- a/qemu-img-cmds.hx +++ b/qemu-img-cmds.hx @@ -1,3 +1,5 @@ +HXCOMM See docs/devel/docs.rst for the format of this file. +HXCOMM HXCOMM Keep the list of subcommands sorted by name. HXCOMM Use DEFHEADING() to define headings in both help text and rST HXCOMM Text between SRST and ERST are copied to rST version and diff --git a/qemu-img.c b/qemu-img.c index 5a77f677193..7668f86769f 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -960,7 +960,6 @@ static int img_commit(int argc, char **argv) Error *local_err = NULL; CommonBlockJobCBInfo cbi; bool image_opts = false; - AioContext *aio_context; int64_t rate_limit = 0; fmt = NULL; @@ -1078,12 +1077,9 @@ static int img_commit(int argc, char **argv) .bs = bs, }; - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); commit_active_start("commit", bs, base_bs, JOB_DEFAULT, rate_limit, BLOCKDEV_ON_ERROR_REPORT, NULL, common_block_job_cb, &cbi, false, &local_err); - aio_context_release(aio_context); if (local_err) { goto done; } diff --git a/qemu-io.c b/qemu-io.c index 050c70835f9..6cb1e00385e 100644 --- a/qemu-io.c +++ b/qemu-io.c @@ -414,15 +414,7 @@ static void prep_fetchline(void *opaque) static int do_qemuio_command(const char *cmd) { - int ret; - AioContext *ctx = - qemuio_blk ? blk_get_aio_context(qemuio_blk) : qemu_get_aio_context(); - - aio_context_acquire(ctx); - ret = qemuio_command(qemuio_blk, cmd); - aio_context_release(ctx); - - return ret; + return qemuio_command(qemuio_blk, cmd); } static int command_loop(void) diff --git a/qemu-nbd.c b/qemu-nbd.c index 186e6468b1a..d7b3ccab21c 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -114,6 +114,7 @@ static void usage(const char *name) " --tls-creds=ID use id of an earlier --object to provide TLS\n" " --tls-authz=ID use id of an earlier --object to provide\n" " authorization\n" +" --tls-hostname=HOSTNAME override hostname used to check x509 certificate\n" " -T, --trace [[enable=]][,events=][,file=]\n" " specify tracing options\n" " --fork fork off the server process and exit the parent\n" @@ -1123,9 +1124,7 @@ int main(int argc, char **argv) qdict_put_str(raw_opts, "file", bs->node_name); qdict_put_int(raw_opts, "offset", dev_offset); - aio_context_acquire(qemu_get_aio_context()); bs = bdrv_open(NULL, NULL, raw_opts, flags, &error_fatal); - aio_context_release(qemu_get_aio_context()); blk_remove_bs(blk); blk_insert_bs(blk, bs, &error_fatal); diff --git a/qemu-options.hx b/qemu-options.hx index b6b4ad9e676..8ce85d45598 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -1,3 +1,5 @@ +HXCOMM See docs/devel/docs.rst for the format of this file. +HXCOMM HXCOMM Use DEFHEADING() to define headings in both help text and rST. HXCOMM Text between SRST and ERST is copied to the rST version and HXCOMM discarded from C version. @@ -149,14 +151,14 @@ SRST platform and configuration dependent. ``interleave-granularity=granularity`` sets the granularity of - interleave. Default 256KiB. Only 256KiB, 512KiB, 1024KiB, 2048KiB - 4096KiB, 8192KiB and 16384KiB granularities supported. + interleave. Default 256 (bytes). Only 256, 512, 1k, 2k, + 4k, 8k and 16k granularities supported. Example: :: - -machine cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=128G,cxl-fmw.0.interleave-granularity=512k + -machine cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=128G,cxl-fmw.0.interleave-granularity=512 ERST DEF("M", HAS_ARG, QEMU_OPTION_M, @@ -188,7 +190,8 @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel, " dirty-ring-size=n (KVM dirty ring GFN count, default 0)\n" " eager-split-size=n (KVM Eager Page Split chunk size, default 0, disabled. ARM only)\n" " notify-vmexit=run|internal-error|disable,notify-window=n (enable notify VM exit and set notify window, x86 only)\n" - " thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL) + " thread=single|multi (enable multi-threaded TCG)\n" + " device=path (KVM device path, default /dev/kvm)\n", QEMU_ARCH_ALL) SRST ``-accel name[,prop=value[,...]]`` This is used to enable an accelerator. Depending on the target @@ -269,6 +272,11 @@ SRST open up for a specified of time (i.e. notify-window). Default: notify-vmexit=run,notify-window=0. + ``device=path`` + Sets the path to the KVM device node. Defaults to ``/dev/kvm``. This + option can be used to pass the KVM device to use via a file descriptor + by setting the value to ``/dev/fdset/NN``. + ERST DEF("smp", HAS_ARG, QEMU_OPTION_smp, @@ -1164,6 +1172,17 @@ SRST Please also refer to the wiki page for general scenarios of VT-d emulation in QEMU: https://wiki.qemu.org/Features/VT-d. +``-device virtio-iommu-pci[,option=...]`` + This is only supported by ``-machine q35`` (x86_64) and ``-machine virt`` (ARM). + It supports below options: + + ``granule=val`` (possible values are 4k, 8k, 16k, 64k and host; default: host) + This decides the default granule to be be exposed by the + virtio-iommu. If host, the granule matches the host page size. + + ``aw-bits=val`` (val between 32 and 64, default depends on machine) + This decides the address width of the IOVA address space. + ERST DEF("name", HAS_ARG, QEMU_OPTION_name, @@ -2077,7 +2096,7 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, #if defined(CONFIG_GTK) "-display gtk[,full-screen=on|off][,gl=on|off][,grab-on-hover=on|off]\n" " [,show-tabs=on|off][,show-cursor=on|off][,window-close=on|off]\n" - " [,show-menubar=on|off]\n" + " [,show-menubar=on|off][,zoom-to-fit=on|off]\n" #endif #if defined(CONFIG_VNC) "-display vnc=[,]\n" @@ -2087,6 +2106,8 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, #endif #if defined(CONFIG_COCOA) "-display cocoa[,full-grab=on|off][,swap-opt-cmd=on|off]\n" + " [,show-cursor=on|off][,left-command-key=on|off]\n" + " [,full-screen=on|off][,zoom-to-fit=on|off]\n" #endif #if defined(CONFIG_OPENGL) "-display egl-headless[,rendernode=]\n" @@ -2094,9 +2115,6 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, #if defined(CONFIG_DBUS_DISPLAY) "-display dbus[,addr=]\n" " [,gl=on|core|es|off][,rendernode=]\n" -#endif -#if defined(CONFIG_COCOA) - "-display cocoa[,show-cursor=on|off][,left-command-key=on|off]\n" #endif "-display none\n" " select display backend type\n" @@ -2191,10 +2209,26 @@ SRST provides drop-down menus and other UI elements to configure and control the VM during runtime. Valid parameters are: + ``full-grab=on|off`` : Capture all key presses, including system combos. + This requires accessibility permissions, since it + performs a global grab on key events. + (default: off) See + https://support.apple.com/en-in/guide/mac-help/mh32356/mac + + ``swap-opt-cmd=on|off`` : Swap the Option and Command keys so that their + key codes match their position on non-Mac + keyboards and you can use Meta/Super and Alt + where you expect them. (default: off) + ``show-cursor=on|off`` : Force showing the mouse cursor ``left-command-key=on|off`` : Disable forwarding left command key to host + ``full-screen=on|off`` : Start in fullscreen mode + + ``zoom-to-fit=on|off`` : Expand video output to the window size, + defaults to "off" + ``egl-headless[,rendernode=]`` Offload all OpenGL operations to a local DRI device. For any graphical display, this display needs to be paired with either @@ -2445,7 +2479,7 @@ SRST ``to=L`` With this option, QEMU will try next available VNC displays, - until the number L, if the origianlly defined "-vnc display" is + until the number L, if the originally defined "-vnc display" is not available, e.g. port 5900+display is already used by another application. By default, to=0. @@ -2618,7 +2652,8 @@ SRST ``-win2k-hack`` Use it when installing Windows 2000 to avoid a disk full bug. After Windows 2000 is installed, you no longer need this option (this - option slows down the IDE transfers). + option slows down the IDE transfers). Synonym of ``-global + ide-device.win2k-install-hack=on``. ERST DEF("no-fd-bootchk", 0, QEMU_OPTION_no_fd_bootchk, @@ -2627,23 +2662,7 @@ DEF("no-fd-bootchk", 0, QEMU_OPTION_no_fd_bootchk, SRST ``-no-fd-bootchk`` Disable boot signature checking for floppy disks in BIOS. May be - needed to boot from old floppy disks. -ERST - -DEF("no-acpi", 0, QEMU_OPTION_no_acpi, - "-no-acpi disable ACPI\n", QEMU_ARCH_I386 | QEMU_ARCH_ARM) -SRST -``-no-acpi`` - Disable ACPI (Advanced Configuration and Power Interface) support. - Use it if your guest OS complains about ACPI problems (PC target - machine only). -ERST - -DEF("no-hpet", 0, QEMU_OPTION_no_hpet, - "-no-hpet disable HPET\n", QEMU_ARCH_I386) -SRST -``-no-hpet`` - Disable HPET support. Deprecated, use '-machine hpet=off' instead. + needed to boot from old floppy disks. Synonym of ``-m fd-bootchk=off``. ERST DEF("acpitable", HAS_ARG, QEMU_OPTION_acpitable, @@ -2679,7 +2698,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, " specify SMBIOS type 3 fields\n" "-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str]\n" " [,asset=str][,part=str][,max-speed=%d][,current-speed=%d]\n" - " [,processor-id=%d]\n" + " [,processor-family=%d,processor-id=%d]\n" " specify SMBIOS type 4 fields\n" "-smbios type=8[,external_reference=str][,internal_reference=str][,connector_type=%d][,port_type=%d]\n" " specify SMBIOS type 8 fields\n" @@ -2690,7 +2709,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, " specify SMBIOS type 17 fields\n" "-smbios type=41[,designation=str][,kind=str][,instance=%d][,pcidev=str]\n" " specify SMBIOS type 41 fields\n", - QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH) + QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH | QEMU_ARCH_RISCV) SRST ``-smbios file=binary`` Load SMBIOS entry from binary file. @@ -2707,9 +2726,12 @@ SRST ``-smbios type=3[,manufacturer=str][,version=str][,serial=str][,asset=str][,sku=str]`` Specify SMBIOS type 3 fields -``-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str][,asset=str][,part=str][,processor-id=%d]`` +``-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str][,asset=str][,part=str][,processor-family=%d][,processor-id=%d]`` Specify SMBIOS type 4 fields +``-smbios type=9[,slot_designation=str][,slot_type=%d][,slot_data_bus_width=%d][,current_usage=%d][,slot_length=%d][,slot_id=%d][,slot_characteristics1=%d][,slot_characteristics12=%d][,pci_device=str]`` + Specify SMBIOS type 9 fields + ``-smbios type=11[,value=str][,path=filename]`` Specify SMBIOS type 11 fields @@ -3093,6 +3115,8 @@ SRST server. The files in dir will be exposed as the root of a TFTP server. The TFTP client on the guest must be configured in binary mode (use the command ``bin`` of the Unix TFTP client). + The built-in TFTP server is read-only; it does not implement any + command for writing files. QEMU will not write to this directory. ``tftp-server-name=name`` In BOOTP reply, broadcast name as the "TFTP server name" @@ -3987,7 +4011,7 @@ ERST DEF("initrd", HAS_ARG, QEMU_OPTION_initrd, \ "-initrd file use 'file' as initial ram disk\n", QEMU_ARCH_ALL) -SRST +SRST(initrd) ``-initrd file`` Use file as initial ram disk. @@ -4086,9 +4110,13 @@ DEF("fw_cfg", HAS_ARG, QEMU_OPTION_fwcfg, SRST ``-fw_cfg [name=]name,file=file`` Add named fw\_cfg entry with contents from file file. + If the filename contains comma, you must double it (for instance, + "file=my,,file" to use file "my,file"). ``-fw_cfg [name=]name,string=str`` Add named fw\_cfg entry with contents from string str. + If the string contains comma, you must double it (for instance, + "string=my,,string" to use file "my,string"). The terminating NUL character of the contents of str will not be included as part of the fw\_cfg item data. To insert contents with @@ -4115,7 +4143,7 @@ SRST default device is ``vc`` in graphical mode and ``stdio`` in non graphical mode. - This option can be used several times to simulate up to 4 serial + This option can be used several times to simulate multiple serial ports. You can use ``-serial none`` to suppress the creation of default @@ -4362,14 +4390,6 @@ SRST from a script. ERST -DEF("singlestep", 0, QEMU_OPTION_singlestep, \ - "-singlestep deprecated synonym for -accel tcg,one-insn-per-tb=on\n", QEMU_ARCH_ALL) -SRST -``-singlestep`` - This is a deprecated synonym for the TCG accelerator property - ``one-insn-per-tb``. -ERST - DEF("preconfig", 0, QEMU_OPTION_preconfig, \ "--preconfig pause QEMU before machine is initialized (experimental)\n", QEMU_ARCH_ALL) @@ -4795,18 +4815,6 @@ SRST ``-nodefaults`` option will disable all those default devices. ERST -#ifndef _WIN32 -DEF("chroot", HAS_ARG, QEMU_OPTION_chroot, \ - "-chroot dir chroot to dir just before starting the VM (deprecated)\n", - QEMU_ARCH_ALL) -#endif -SRST -``-chroot dir`` - Deprecated, use '-run-with chroot=...' instead. - Immediately before starting guest execution, chroot to the specified - directory. Especially useful in combination with -runas. -ERST - #ifndef _WIN32 DEF("runas", HAS_ARG, QEMU_OPTION_runas, \ "-runas user change to user id user just before starting the VM\n" \ @@ -4980,16 +4988,6 @@ HXCOMM Internal use DEF("qtest", HAS_ARG, QEMU_OPTION_qtest, "", QEMU_ARCH_ALL) DEF("qtest-log", HAS_ARG, QEMU_OPTION_qtest_log, "", QEMU_ARCH_ALL) -#ifdef __linux__ -DEF("async-teardown", 0, QEMU_OPTION_asyncteardown, - "-async-teardown enable asynchronous teardown\n", - QEMU_ARCH_ALL) -SRST -``-async-teardown`` - This option is deprecated and should no longer be used. The new option - ``-run-with async-teardown=on`` is a replacement. -ERST -#endif #ifdef CONFIG_POSIX DEF("run-with", HAS_ARG, QEMU_OPTION_run_with, "-run-with [async-teardown=on|off][,chroot=dir]\n" @@ -5232,6 +5230,18 @@ SRST The ``share`` boolean option is on by default with memfd. + ``-object iommufd,id=id[,fd=fd]`` + Creates an iommufd backend which allows control of DMA mapping + through the ``/dev/iommu`` device. + + The ``id`` parameter is a unique ID which frontends (such as + vfio-pci of vdpa) will use to connect with the iommufd backend. + + The ``fd`` parameter is an optional pre-opened file descriptor + resulting from ``/dev/iommu`` opening. Usually the iommufd is shared + across all subsystems, bringing the benefit of centralized + reference counting. + ``-object rng-builtin,id=id`` Creates a random number generator backend which obtains entropy from QEMU builtin functions. The ``id`` parameter is a unique ID @@ -5477,7 +5487,7 @@ SRST KVM COLO primary: - -netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,downscript=/etc/qemu-ifdown + -netdev tap,id=hn0,vhost=off -device e1000,id=e0,netdev=hn0,mac=52:a4:00:12:78:66 -chardev socket,id=mirror0,host=3.3.3.3,port=9003,server=on,wait=off -chardev socket,id=compare1,host=3.3.3.3,port=9004,server=on,wait=off @@ -5492,7 +5502,7 @@ SRST -object colo-compare,id=comp0,primary_in=compare0-0,secondary_in=compare1,outdev=compare_out0,iothread=iothread1 secondary: - -netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,down script=/etc/qemu-ifdown + -netdev tap,id=hn0,vhost=off -device e1000,netdev=hn0,mac=52:a4:00:12:78:66 -chardev socket,id=red0,host=3.3.3.3,port=9003 -chardev socket,id=red1,host=3.3.3.3,port=9004 @@ -5503,7 +5513,7 @@ SRST Xen COLO primary: - -netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,downscript=/etc/qemu-ifdown + -netdev tap,id=hn0,vhost=off -device e1000,id=e0,netdev=hn0,mac=52:a4:00:12:78:66 -chardev socket,id=mirror0,host=3.3.3.3,port=9003,server=on,wait=off -chardev socket,id=compare1,host=3.3.3.3,port=9004,server=on,wait=off @@ -5516,10 +5526,10 @@ SRST -object filter-redirector,netdev=hn0,id=redire0,queue=rx,indev=compare_out -object filter-redirector,netdev=hn0,id=redire1,queue=rx,outdev=compare0 -object iothread,id=iothread1 - -object colo-compare,id=comp0,primary_in=compare0-0,secondary_in=compare1,outdev=compare_out0,notify_dev=nofity_way,iothread=iothread1 + -object colo-compare,id=comp0,primary_in=compare0-0,secondary_in=compare1,outdev=compare_out0,notify_dev=notify_way,iothread=iothread1 secondary: - -netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,down script=/etc/qemu-ifdown + -netdev tap,id=hn0,vhost=off -device e1000,netdev=hn0,mac=52:a4:00:12:78:66 -chardev socket,id=red0,host=3.3.3.3,port=9003 -chardev socket,id=red1,host=3.3.3.3,port=9004 diff --git a/qga/commands-posix.c b/qga/commands-posix.c index 6169bbf7a01..26008db497e 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -45,9 +45,12 @@ #include #include #include -#if defined(__NetBSD__) || defined(__OpenBSD__) +#if defined(__NetBSD__) || defined(__OpenBSD__) || defined(CONFIG_SOLARIS) #include #include +#if !defined(ETHER_ADDR_LEN) && defined(ETHERADDRL) +#define ETHER_ADDR_LEN ETHERADDRL +#endif #else #include #endif diff --git a/qga/commands-win32.c b/qga/commands-win32.c index 697c65507ca..6242737b005 100644 --- a/qga/commands-win32.c +++ b/qga/commands-win32.c @@ -935,6 +935,8 @@ static GuestDiskAddressList *build_guest_disk_info(char *guid, Error **errp) DWORD last_err = GetLastError(); if (last_err == ERROR_MORE_DATA) { /* Try once more with big enough buffer */ + size = sizeof(VOLUME_DISK_EXTENTS) + + (sizeof(DISK_EXTENT) * (extents->NumberOfDiskExtents - 1)); g_free(extents); extents = g_malloc0(size); if (!DeviceIoControl( @@ -2118,49 +2120,47 @@ GuestUserList *qmp_guest_get_users(Error **errp) typedef struct _ga_matrix_lookup_t { int major; int minor; - char const *version; - char const *version_id; + const char *version; + const char *version_id; } ga_matrix_lookup_t; -static ga_matrix_lookup_t const WIN_VERSION_MATRIX[2][7] = { - { - /* Desktop editions */ - { 5, 0, "Microsoft Windows 2000", "2000"}, - { 5, 1, "Microsoft Windows XP", "xp"}, - { 6, 0, "Microsoft Windows Vista", "vista"}, - { 6, 1, "Microsoft Windows 7" "7"}, - { 6, 2, "Microsoft Windows 8", "8"}, - { 6, 3, "Microsoft Windows 8.1", "8.1"}, - { 0, 0, 0} - },{ - /* Server editions */ - { 5, 2, "Microsoft Windows Server 2003", "2003"}, - { 6, 0, "Microsoft Windows Server 2008", "2008"}, - { 6, 1, "Microsoft Windows Server 2008 R2", "2008r2"}, - { 6, 2, "Microsoft Windows Server 2012", "2012"}, - { 6, 3, "Microsoft Windows Server 2012 R2", "2012r2"}, - { 0, 0, 0}, - { 0, 0, 0} - } +static const ga_matrix_lookup_t WIN_CLIENT_VERSION_MATRIX[] = { + { 5, 0, "Microsoft Windows 2000", "2000"}, + { 5, 1, "Microsoft Windows XP", "xp"}, + { 6, 0, "Microsoft Windows Vista", "vista"}, + { 6, 1, "Microsoft Windows 7" "7"}, + { 6, 2, "Microsoft Windows 8", "8"}, + { 6, 3, "Microsoft Windows 8.1", "8.1"}, + { } +}; + +static const ga_matrix_lookup_t WIN_SERVER_VERSION_MATRIX[] = { + { 5, 2, "Microsoft Windows Server 2003", "2003"}, + { 6, 0, "Microsoft Windows Server 2008", "2008"}, + { 6, 1, "Microsoft Windows Server 2008 R2", "2008r2"}, + { 6, 2, "Microsoft Windows Server 2012", "2012"}, + { 6, 3, "Microsoft Windows Server 2012 R2", "2012r2"}, + { }, }; typedef struct _ga_win_10_0_t { int first_build; - char const *version; - char const *version_id; + const char *version; + const char *version_id; } ga_win_10_0_t; -static ga_win_10_0_t const WIN_10_0_SERVER_VERSION_MATRIX[4] = { +static const ga_win_10_0_t WIN_10_0_SERVER_VERSION_MATRIX[] = { {14393, "Microsoft Windows Server 2016", "2016"}, {17763, "Microsoft Windows Server 2019", "2019"}, {20344, "Microsoft Windows Server 2022", "2022"}, - {0, 0} + {26040, "MIcrosoft Windows Server 2025", "2025"}, + { } }; -static ga_win_10_0_t const WIN_10_0_CLIENT_VERSION_MATRIX[3] = { +static const ga_win_10_0_t WIN_10_0_CLIENT_VERSION_MATRIX[] = { {10240, "Microsoft Windows 10", "10"}, {22000, "Microsoft Windows 11", "11"}, - {0, 0} + { } }; static void ga_get_win_version(RTL_OSVERSIONINFOEXW *info, Error **errp) @@ -2183,16 +2183,17 @@ static void ga_get_win_version(RTL_OSVERSIONINFOEXW *info, Error **errp) return; } -static char *ga_get_win_name(OSVERSIONINFOEXW const *os_version, bool id) +static char *ga_get_win_name(const OSVERSIONINFOEXW *os_version, bool id) { DWORD major = os_version->dwMajorVersion; DWORD minor = os_version->dwMinorVersion; DWORD build = os_version->dwBuildNumber; int tbl_idx = (os_version->wProductType != VER_NT_WORKSTATION); - ga_matrix_lookup_t const *table = WIN_VERSION_MATRIX[tbl_idx]; - ga_win_10_0_t const *win_10_0_table = tbl_idx ? + const ga_matrix_lookup_t *table = tbl_idx ? + WIN_SERVER_VERSION_MATRIX : WIN_CLIENT_VERSION_MATRIX; + const ga_win_10_0_t *win_10_0_table = tbl_idx ? WIN_10_0_SERVER_VERSION_MATRIX : WIN_10_0_CLIENT_VERSION_MATRIX; - ga_win_10_0_t const *win_10_0_version = NULL; + const ga_win_10_0_t *win_10_0_version = NULL; while (table->version != NULL) { if (major == 10 && minor == 0) { while (win_10_0_table->version != NULL) { diff --git a/qga/main.c b/qga/main.c index 8668b9f3d39..bdf53445848 100644 --- a/qga/main.c +++ b/qga/main.c @@ -261,9 +261,9 @@ QEMU_COPYRIGHT "\n" " -s, --service service commands: install, uninstall, vss-install, vss-uninstall\n" #endif " -b, --block-rpcs comma-separated list of RPCs to disable (no spaces,\n" -" use \"help\" to list available RPCs)\n" +" use \"--block-rpcs=help\" to list available RPCs)\n" " -a, --allow-rpcs comma-separated list of RPCs to enable (no spaces,\n" -" use \"help\" to list available RPCs)\n" +" use \"--allow-rpcs=help\" to list available RPCs)\n" " -D, --dump-conf dump a qemu-ga config file based on current config\n" " options / command-line parameters to stdout\n" " -r, --retry-path attempt re-opening path if it's unavailable or closed\n" diff --git a/qga/meson.build b/qga/meson.build index ff7a8496e48..1c3d2a3d1b7 100644 --- a/qga/meson.build +++ b/qga/meson.build @@ -7,7 +7,7 @@ if not have_ga endif have_qga_vss = get_option('qga_vss') \ - .require(targetos == 'windows', + .require(host_os == 'windows', error_message: 'VSS support requires Windows') \ .require('cpp' in all_languages, error_message: 'VSS support requires a C++ compiler') \ @@ -67,29 +67,31 @@ qga_ss.add(files( 'main.c', 'cutils.c', )) -qga_ss.add(when: 'CONFIG_POSIX', if_true: files( - 'channel-posix.c', - 'commands-posix.c', - 'commands-posix-ssh.c', -)) -qga_ss.add(when: 'CONFIG_LINUX', if_true: files( - 'commands-linux.c', -)) -qga_ss.add(when: 'CONFIG_BSD', if_true: files( - 'commands-bsd.c', -)) -qga_ss.add(when: 'CONFIG_WIN32', if_true: files( - 'channel-win32.c', - 'commands-win32.c', - 'service-win32.c', - 'vss-win32.c' -)) +if host_os == 'windows' + qga_ss.add(files( + 'channel-win32.c', + 'commands-win32.c', + 'service-win32.c', + 'vss-win32.c' + )) +else + qga_ss.add(files( + 'channel-posix.c', + 'commands-posix.c', + 'commands-posix-ssh.c', + )) + if host_os == 'linux' + qga_ss.add(files('commands-linux.c')) + elif host_os in bsd_oses + qga_ss.add(files('commands-bsd.c')) + endif +endif -qga_ss = qga_ss.apply(config_targetos, strict: false) +qga_ss = qga_ss.apply({}) gen_tlb = [] qga_libs = [] -if targetos == 'windows' +if host_os == 'windows' qga_libs += ['-lws2_32', '-lwinmm', '-lpowrprof', '-lwtsapi32', '-lwininet', '-liphlpapi', '-lnetapi32', '-lsetupapi', '-lcfgmgr32'] if have_qga_vss @@ -99,7 +101,7 @@ if targetos == 'windows' endif qga_objs = [] -if targetos == 'windows' +if host_os == 'windows' windmc = find_program('windmc', required: true) windres = find_program('windres', required: true) @@ -121,7 +123,7 @@ qga = executable('qemu-ga', qga_ss.sources() + qga_objs, install: true) all_qga += qga -if targetos == 'windows' +if host_os == 'windows' qemu_ga_msi_arch = { 'x86': ['-D', 'Arch=32'], 'x86_64': ['-a', 'x64', '-D', 'Arch=64'] @@ -140,7 +142,7 @@ if targetos == 'windows' qemu_ga_msi_vss = ['-D', 'InstallVss'] deps += qga_vss endif - if glib.version() < '2.73.2' + if glib.version().version_compare('<2.73.2') libpcre = 'libpcre1' else libpcre = 'libpcre2' @@ -183,7 +185,7 @@ test_env.set('G_TEST_BUILDDIR', meson.current_build_dir()) # the leak detector in build-oss-fuzz Gitlab CI test. we should re-enable # this when an alternative is implemented or when the underlying glib # issue is identified/fix -#if targetos != 'windows' +#if host_os != 'windows' if false srcs = [files('commands-posix-ssh.c')] i = 0 diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json index 876e2a8ea85..d5af1550077 100644 --- a/qga/qapi-schema.json +++ b/qga/qapi-schema.json @@ -33,7 +33,10 @@ 'guest-get-time', 'guest-set-vcpus', 'guest-sync', - 'guest-sync-delimited' ] } } + 'guest-sync-delimited' ], + # Types and commands with undocumented members: + 'documentation-exceptions': [ + 'GuestNVMeSmart' ] } } ## # @guest-sync-delimited: @@ -150,8 +153,6 @@ # @time: time of nanoseconds, relative to the Epoch of 1970-01-01 in # UTC. # -# Returns: Nothing on success. -# # Since: 1.5 ## { 'command': 'guest-set-time', @@ -227,7 +228,7 @@ # # @mode: open mode, as per fopen(), "r" is the default. # -# Returns: Guest file handle on success. +# Returns: Guest file handle # # Since: 0.15.0 ## @@ -242,8 +243,6 @@ # # @handle: filehandle returned by guest-file-open # -# Returns: Nothing on success. -# # Since: 0.15.0 ## { 'command': 'guest-file-close', @@ -278,7 +277,7 @@ # @count: maximum number of bytes to read (default is 4KB, maximum is # 48MB) # -# Returns: @GuestFileRead on success. +# Returns: @GuestFileRead # # Since: 0.15.0 ## @@ -313,7 +312,7 @@ # @count: bytes to write (actual bytes, after base64-decode), default # is all content in buf-b64 buffer after base64 decoding # -# Returns: @GuestFileWrite on success. +# Returns: @GuestFileWrite # # Since: 0.15.0 ## @@ -380,7 +379,7 @@ # # @whence: Symbolic or numeric code for interpreting offset # -# Returns: @GuestFileSeek on success. +# Returns: @GuestFileSeek # # Since: 0.15.0 ## @@ -396,8 +395,6 @@ # # @handle: filehandle returned by guest-file-open # -# Returns: Nothing on success. -# # Since: 0.15.0 ## { 'command': 'guest-file-flush', @@ -440,15 +437,16 @@ # command succeeded, you may call @guest-fsfreeze-thaw later to # unfreeze. # +# On error, all filesystems will be thawed. If no filesystems are +# frozen as a result of this call, then @guest-fsfreeze-status will +# remain "thawed" and calling @guest-fsfreeze-thaw is not necessary. +# +# Returns: Number of file systems currently frozen. +# # Note: On Windows, the command is implemented with the help of a # Volume Shadow-copy Service DLL helper. The frozen state is # limited for up to 10 seconds by VSS. # -# Returns: Number of file systems currently frozen. On error, all -# filesystems will be thawed. If no filesystems are frozen as a -# result of this call, then @guest-fsfreeze-status will remain -# "thawed" and calling @guest-fsfreeze-thaw is not necessary. -# # Since: 0.15.0 ## { 'command': 'guest-fsfreeze-freeze', @@ -460,12 +458,13 @@ # Sync and freeze specified guest filesystems. See also # @guest-fsfreeze-freeze. # +# On error, all filesystems will be thawed. +# # @mountpoints: an array of mountpoints of filesystems to be frozen. # If omitted, every mounted filesystem is frozen. Invalid mount # points are ignored. # -# Returns: Number of file systems currently frozen. On error, all -# filesystems will be thawed. +# Returns: Number of file systems currently frozen. # # Since: 2.2 ## @@ -558,9 +557,8 @@ # could also exit (or set its status to "shutdown") due to other # reasons. # -# The following errors may be returned: -# -# - If suspend to disk is not supported, Unsupported +# Errors: +# - If suspend to disk is not supported, Unsupported # # Notes: It's strongly recommended to issue the guest-sync command # before sending commands when the guest resumes @@ -595,9 +593,8 @@ # 2. Issue the query-status QMP command to confirm the VM status is # "suspended" # -# The following errors may be returned: -# -# - If suspend to ram is not supported, Unsupported +# Errors: +# - If suspend to ram is not supported, Unsupported # # Notes: It's strongly recommended to issue the guest-sync command # before sending commands when the guest resumes @@ -631,9 +628,8 @@ # 2. Issue the query-status QMP command to confirm the VM status is # "suspended" # -# The following errors may be returned: -# -# - If hybrid suspend is not supported, Unsupported +# Errors: +# - If hybrid suspend is not supported, Unsupported # # Notes: It's strongly recommended to issue the guest-sync command # before sending commands when the guest resumes @@ -729,7 +725,7 @@ # # Get list of guest IP addresses, MAC addresses and netmasks. # -# Returns: List of GuestNetworkInterface on success. +# Returns: List of GuestNetworkInterface # # Since: 1.1 ## @@ -776,14 +772,15 @@ # Attempt to reconfigure (currently: enable/disable) logical # processors inside the guest. # -# The input list is processed node by node in order. In each node -# @logical-id is used to look up the guest VCPU, for which @online -# specifies the requested state. The set of distinct @logical-id's is -# only required to be a subset of the guest-supported identifiers. -# There's no restriction on list length or on repeating the same -# @logical-id (with possibly different @online field). Preferably the -# input list should describe a modified subset of @guest-get-vcpus' -# return value. +# @vcpus: The logical processors to be reconfigured. This list is +# processed node by node in order. In each node @logical-id is +# used to look up the guest VCPU, for which @online specifies the +# requested state. The set of distinct @logical-id's is only +# required to be a subset of the guest-supported identifiers. +# There's no restriction on list length or on repeating the same +# @logical-id (with possibly different @online field). Preferably +# the input list should describe a modified subset of +# @guest-get-vcpus' return value. # # Returns: The length of the initial sublist that has been # successfully processed. The guest agent maximizes this value. @@ -792,9 +789,6 @@ # - 0: # if the @vcpus list was empty on input. Guest state has not # been changed. Otherwise, -# - Error: -# processing the first node of @vcpus failed for the reason -# returned. Guest state has not been changed. Otherwise, # - < length(@vcpus): # more than zero initial nodes have been processed, but not the # entire @vcpus list. Guest state has changed accordingly. To @@ -804,6 +798,10 @@ # - length(@vcpus): # call successful. # +# Errors: +# - If the reconfiguration of the first node in @vcpus failed. +# Guest state has not been changed. +# # Since: 1.5 ## { 'command': 'guest-set-vcpus', @@ -934,6 +932,8 @@ # NVMe smart information, based on NVMe specification, section # # +# TODO: document members briefly +# # Since: 7.1 ## { 'struct': 'GuestNVMeSmart', @@ -968,7 +968,7 @@ # # Disk type related smart information. # -# - @nvme: NVMe disk smart +# @type: disk bus type # # Since: 7.1 ## @@ -1073,8 +1073,6 @@ # transmission, even if already crypt()d, to ensure it is 8-bit safe # when passed as JSON. # -# Returns: Nothing on success. -# # Since: 2.3 ## { 'command': 'guest-set-user-password', @@ -1163,22 +1161,24 @@ # Attempt to reconfigure (currently: enable/disable) state of memory # blocks inside the guest. # -# The input list is processed node by node in order. In each node -# @phys-index is used to look up the guest MEMORY BLOCK, for which -# @online specifies the requested state. The set of distinct -# @phys-index's is only required to be a subset of the guest-supported -# identifiers. There's no restriction on list length or on repeating -# the same @phys-index (with possibly different @online field). -# Preferably the input list should describe a modified subset of -# @guest-get-memory-blocks' return value. +# @mem-blks: The memory blocks to be reconfigured. This list is +# processed node by node in order. In each node @phys-index is +# used to look up the guest MEMORY BLOCK, for which @online +# specifies the requested state. The set of distinct +# @phys-index's is only required to be a subset of the +# guest-supported identifiers. There's no restriction on list +# length or on repeating the same @phys-index (with possibly +# different @online field). Preferably the input list should +# describe a modified subset of @guest-get-memory-blocks' return +# value. # # Returns: The operation results, it is a list of # @GuestMemoryBlockResponse, which is corresponding to the input # list. # -# Note: it will return NULL if the @mem-blks list was empty on -# input, or there is an error, and in this case, guest state will -# not be changed. +# Note: it will return an empty list if the @mem-blks list was +# empty on input, or there is an error, and in this case, guest +# state will not be changed. # # Since: 2.3 ## @@ -1220,13 +1220,13 @@ # @signal: signal number (linux) or unhandled exception code (windows) # if the process was abnormally terminated. # -# @out-data: base64-encoded stdout of the process. This field will only -# be populated after the process exits. +# @out-data: base64-encoded stdout of the process. This field will +# only be populated after the process exits. # -# @err-data: base64-encoded stderr of the process. Note: @out-data and -# @err-data are present only if 'capture-output' was specified for -# 'guest-exec'. This field will only be populated after the process -# exits. +# @err-data: base64-encoded stderr of the process. Note: @out-data +# and @err-data are present only if 'capture-output' was specified +# for 'guest-exec'. This field will only be populated after the +# process exits. # # @out-truncated: true if stdout was not fully captured due to size # limitation. @@ -1249,7 +1249,7 @@ # # @pid: pid returned from guest-exec # -# Returns: GuestExecStatus on success. +# Returns: GuestExecStatus # # Since: 2.5 ## @@ -1273,12 +1273,16 @@ # An enumeration of guest-exec capture modes. # # @none: do not capture any output +# # @stdout: only capture stdout +# # @stderr: only capture stderr +# # @separated: capture both stdout and stderr, but separated into -# GuestExecStatus out-data and err-data, respectively -# @merged: capture both stdout and stderr, but merge together -# into out-data. not effective on windows guests. +# GuestExecStatus out-data and err-data, respectively +# +# @merged: capture both stdout and stderr, but merge together into +# out-data. Not effective on windows guests. # # Since: 8.0 ## @@ -1291,8 +1295,9 @@ # # Controls what guest-exec output gets captures. # -# @flag: captures both stdout and stderr if true. Equivalent -# to GuestExecCaptureOutputMode::all. (since 2.5) +# @flag: captures both stdout and stderr if true. Equivalent to +# GuestExecCaptureOutputMode::all. (since 2.5) +# # @mode: capture mode; preferred interface # # Since: 8.0 @@ -1315,9 +1320,9 @@ # @input-data: data to be passed to process stdin (base64 encoded) # # @capture-output: bool flag to enable capture of stdout/stderr of -# running process. defaults to false. +# running process. Defaults to false. # -# Returns: PID on success. +# Returns: PID # # Since: 2.5 ## @@ -1346,7 +1351,7 @@ # or even present in DNS or some other name service at all. It need # not even be unique on your local network or site, but usually it is. # -# Returns: the host name of the machine on success +# Returns: the host name of the machine # # Since: 2.10 ## @@ -1487,6 +1492,8 @@ ## # @GuestDeviceType: +# +# @pci: PCI device ## { 'enum': 'GuestDeviceType', 'data': [ 'pci' ] } @@ -1506,7 +1513,9 @@ ## # @GuestDeviceId: # -# Id of the device - @pci: PCI ID, since: 5.2 +# Id of the device +# +# @type: device type # # Since: 5.2 ## @@ -1565,11 +1574,11 @@ ## # @guest-ssh-get-authorized-keys: # -# @username: the user account to add the authorized keys -# # Return the public keys from user .ssh/authorized_keys on Unix # systems (not implemented for other systems). # +# @username: the user account to add the authorized keys +# # Returns: @GuestAuthorizedKeys # # Since: 5.2 @@ -1582,6 +1591,9 @@ ## # @guest-ssh-add-authorized-keys: # +# Append public keys to user .ssh/authorized_keys on Unix systems (not +# implemented for other systems). +# # @username: the user account to add the authorized keys # # @keys: the public keys to add (in OpenSSH/sshd(8) authorized_keys @@ -1589,11 +1601,6 @@ # # @reset: ignore the existing content, set it with the given keys only # -# Append public keys to user .ssh/authorized_keys on Unix systems (not -# implemented for other systems). -# -# Returns: Nothing on success. -# # Since: 5.2 ## { 'command': 'guest-ssh-add-authorized-keys', @@ -1603,16 +1610,14 @@ ## # @guest-ssh-remove-authorized-keys: # -# @username: the user account to remove the authorized keys -# -# @keys: the public keys to remove (in OpenSSH/sshd(8) authorized_keys -# format) -# # Remove public keys from the user .ssh/authorized_keys on Unix # systems (not implemented for other systems). It's not an error if # the key is already missing. # -# Returns: Nothing on success. +# @username: the user account to remove the authorized keys +# +# @keys: the public keys to remove (in OpenSSH/sshd(8) authorized_keys +# format) # # Since: 5.2 ## @@ -1688,6 +1693,8 @@ # @major: major device number of disk # # @minor: minor device number of disk +# +# @stats: I/O statistics ## { 'struct': 'GuestDiskStatsInfo', 'data': {'name': 'str', @@ -1711,7 +1718,9 @@ ## # @GuestCpuStatsType: # -# An enumeration of OS type +# Guest operating systems supporting CPU statistics +# +# @linux: Linux # # Since: 7.1 ## @@ -1768,7 +1777,7 @@ # # Get statistics of each CPU in millisecond. # -# - @linux: Linux style CPU statistics +# @type: guest operating system # # Since: 7.1 ## diff --git a/qom/object.c b/qom/object.c index 95c0dc8285f..d4a001cf411 100644 --- a/qom/object.c +++ b/qom/object.c @@ -138,9 +138,46 @@ static TypeImpl *type_new(const TypeInfo *info) return ti; } +static bool type_name_is_valid(const char *name) +{ + const int slen = strlen(name); + int plen; + + g_assert(slen > 1); + + /* + * Ideally, the name should start with a letter - however, we've got + * too many names starting with a digit already, so allow digits here, + * too (except '0' which is not used yet) + */ + if (!g_ascii_isalnum(name[0]) || name[0] == '0') { + return false; + } + + plen = strspn(name, "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789-_."); + + /* Allow some legacy names with '+' in it for compatibility reasons */ + if (name[plen] == '+') { + if (plen >= 17 && g_str_has_prefix(name, "Sun-UltraSparc-I")) { + /* Allow "Sun-UltraSparc-IV+" and "Sun-UltraSparc-IIIi+" */ + return true; + } + } + + return plen == slen; +} + static TypeImpl *type_register_internal(const TypeInfo *info) { TypeImpl *ti; + + if (!type_name_is_valid(info->name)) { + fprintf(stderr, "Registering '%s' with illegal type name\n", info->name); + abort(); + } + ti = type_new(info); type_table_add(ti); @@ -2192,6 +2229,22 @@ Object *object_resolve_path_at(Object *parent, const char *path) return object_resolve_abs_path(parent, parts, TYPE_OBJECT); } +Object *object_resolve_type_unambiguous(const char *typename, Error **errp) +{ + bool ambig; + Object *o = object_resolve_path_type("", typename, &ambig); + + if (ambig) { + error_setg(errp, "More than one object of type %s", typename); + return NULL; + } + if (!o) { + error_setg(errp, "No object found of type %s", typename); + return NULL; + } + return o; +} + typedef struct StringProperty { char *(*get)(Object *, Error **); diff --git a/replay/replay-char.c b/replay/replay-char.c index a31aded032e..72b1f832dde 100644 --- a/replay/replay-char.c +++ b/replay/replay-char.c @@ -113,8 +113,7 @@ void replay_char_write_event_load(int *res, int *offset) *offset = replay_get_dword(); replay_finish_event(); } else { - error_report("Missing character write event in the replay log"); - exit(1); + replay_sync_error("Missing character write event in the replay log"); } } @@ -135,8 +134,7 @@ int replay_char_read_all_load(uint8_t *buf) replay_finish_event(); return res; } else { - error_report("Missing character read all event in the replay log"); - exit(1); + replay_sync_error("Missing character read all event in the replay log"); } } diff --git a/replay/replay-debugging.c b/replay/replay-debugging.c index 3e60549a4ae..82c66fff262 100644 --- a/replay/replay-debugging.c +++ b/replay/replay-debugging.c @@ -144,7 +144,6 @@ static char *replay_find_nearest_snapshot(int64_t icount, char *ret = NULL; int rv; int nb_sns, i; - AioContext *aio_context; *snapshot_icount = -1; @@ -152,11 +151,8 @@ static char *replay_find_nearest_snapshot(int64_t icount, if (!bs) { goto fail; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); nb_sns = bdrv_snapshot_list(bs, &sn_tab); - aio_context_release(aio_context); for (i = 0; i < nb_sns; i++) { rv = bdrv_all_has_snapshot(sn_tab[i].name, false, NULL, NULL); diff --git a/replay/replay-internal.c b/replay/replay-internal.c index 77d0c82327e..13fcbdd8f42 100644 --- a/replay/replay-internal.c +++ b/replay/replay-internal.c @@ -175,11 +175,12 @@ void replay_fetch_data_kind(void) if (replay_file) { if (!replay_state.has_unread_data) { replay_state.data_kind = replay_get_byte(); + replay_state.current_event++; if (replay_state.data_kind == EVENT_INSTRUCTION) { replay_state.instruction_count = replay_get_dword(); } replay_check_error(); - replay_state.has_unread_data = 1; + replay_state.has_unread_data = true; if (replay_state.data_kind >= EVENT_COUNT) { error_report("Replay: unknown event kind %d", replay_state.data_kind); @@ -191,7 +192,7 @@ void replay_fetch_data_kind(void) void replay_finish_event(void) { - replay_state.has_unread_data = 0; + replay_state.has_unread_data = false; replay_fetch_data_kind(); } @@ -216,7 +217,7 @@ void replay_mutex_lock(void) { if (replay_mode != REPLAY_MODE_NONE) { unsigned long id; - g_assert(!qemu_mutex_iothread_locked()); + g_assert(!bql_locked()); g_assert(!replay_mutex_locked()); qemu_mutex_lock(&lock); id = mutex_tail++; diff --git a/replay/replay-internal.h b/replay/replay-internal.h index b6836354ac5..75249b76936 100644 --- a/replay/replay-internal.h +++ b/replay/replay-internal.h @@ -25,7 +25,12 @@ typedef enum ReplayAsyncEventKind { REPLAY_ASYNC_COUNT } ReplayAsyncEventKind; -/* Any changes to order/number of events will need to bump REPLAY_VERSION */ +/* + * Any changes to order/number of events will need to bump + * REPLAY_VERSION to prevent confusion with old logs. Also don't + * forget to update replay_event_name() to make your debugging life + * easier. + */ enum ReplayEvents { /* for instruction event */ EVENT_INSTRUCTION, @@ -63,26 +68,33 @@ enum ReplayEvents { EVENT_COUNT }; +/** + * typedef ReplayState - global tracking Replay state + * + * This structure tracks where we are in the current ReplayState + * including the logged events from the recorded replay stream. Some + * of the data is also stored/restored from VMStateDescription when VM + * save/restore events take place. + * + * @cached_clock: Cached clocks values + * @current_icount: number of processed instructions + * @instruction_count: number of instructions until next event + * @current_event: current event index + * @data_kind: current event + * @has_unread_data: true if event not yet processed + * @file_offset: offset into replay log at replay snapshot + * @block_request_id: current serialised block request id + * @read_event_id: current async read event id + */ typedef struct ReplayState { - /*! Cached clock values. */ int64_t cached_clock[REPLAY_CLOCK_COUNT]; - /*! Current icount - number of processed instructions. */ uint64_t current_icount; - /*! Number of instructions to be executed before other events happen. */ int instruction_count; - /*! Type of the currently executed event. */ + unsigned int current_event; unsigned int data_kind; - /*! Flag which indicates that event is not processed yet. */ - unsigned int has_unread_data; - /*! Temporary variable for saving current log offset. */ + bool has_unread_data; uint64_t file_offset; - /*! Next block operation id. - This counter is global, because requests from different - block devices should not get overlapping ids. */ uint64_t block_request_id; - /*! Prior value of the host clock */ - uint64_t host_clock_last; - /*! Asynchronous event id read from the log */ uint64_t read_event_id; } ReplayState; extern ReplayState replay_state; @@ -183,6 +195,16 @@ void replay_event_net_save(void *opaque); /*! Reads network from the file. */ void *replay_event_net_load(void); +/* Diagnostics */ + +/** + * replay_sync_error(): report sync error and exit + * + * When we reach an error condition we want to report it centrally so + * we can also dump some useful information into the logs. + */ +G_NORETURN void replay_sync_error(const char *error); + /* VMState-related functions */ /* Registers replay VMState. diff --git a/replay/replay-snapshot.c b/replay/replay-snapshot.c index 10a7cf79927..ccb4d89dda7 100644 --- a/replay/replay-snapshot.c +++ b/replay/replay-snapshot.c @@ -47,16 +47,17 @@ static int replay_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_replay = { .name = "replay", - .version_id = 2, - .minimum_version_id = 2, + .version_id = 3, + .minimum_version_id = 3, .pre_save = replay_pre_save, .post_load = replay_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64_ARRAY(cached_clock, ReplayState, REPLAY_CLOCK_COUNT), VMSTATE_UINT64(current_icount, ReplayState), VMSTATE_INT32(instruction_count, ReplayState), + VMSTATE_UINT32(current_event, ReplayState), VMSTATE_UINT32(data_kind, ReplayState), - VMSTATE_UINT32(has_unread_data, ReplayState), + VMSTATE_BOOL(has_unread_data, ReplayState), VMSTATE_UINT64(file_offset, ReplayState), VMSTATE_UINT64(block_request_id, ReplayState), VMSTATE_UINT64(read_event_id, ReplayState), diff --git a/replay/replay.c b/replay/replay.c index 0f7d766efe8..a2c576c16e7 100644 --- a/replay/replay.c +++ b/replay/replay.c @@ -38,6 +38,107 @@ static GSList *replay_blockers; uint64_t replay_break_icount = -1ULL; QEMUTimer *replay_break_timer; +/* Pretty print event names */ + +static const char *replay_async_event_name(ReplayAsyncEventKind event) +{ + switch (event) { +#define ASYNC_EVENT(_x) case REPLAY_ASYNC_EVENT_ ## _x: return "ASYNC_EVENT_"#_x + ASYNC_EVENT(BH); + ASYNC_EVENT(BH_ONESHOT); + ASYNC_EVENT(INPUT); + ASYNC_EVENT(INPUT_SYNC); + ASYNC_EVENT(CHAR_READ); + ASYNC_EVENT(BLOCK); + ASYNC_EVENT(NET); +#undef ASYNC_EVENT + default: + g_assert_not_reached(); + } +} + +static const char *replay_clock_event_name(ReplayClockKind clock) +{ + switch (clock) { +#define CLOCK_EVENT(_x) case REPLAY_CLOCK_ ## _x: return "CLOCK_" #_x + CLOCK_EVENT(HOST); + CLOCK_EVENT(VIRTUAL_RT); +#undef CLOCK_EVENT + default: + g_assert_not_reached(); + } +} + +/* Pretty print shutdown event names */ +static const char *replay_shutdown_event_name(ShutdownCause cause) +{ + switch (cause) { +#define SHUTDOWN_EVENT(_x) case SHUTDOWN_CAUSE_ ## _x: return "SHUTDOWN_CAUSE_" #_x + SHUTDOWN_EVENT(NONE); + SHUTDOWN_EVENT(HOST_ERROR); + SHUTDOWN_EVENT(HOST_QMP_QUIT); + SHUTDOWN_EVENT(HOST_QMP_SYSTEM_RESET); + SHUTDOWN_EVENT(HOST_SIGNAL); + SHUTDOWN_EVENT(HOST_UI); + SHUTDOWN_EVENT(GUEST_SHUTDOWN); + SHUTDOWN_EVENT(GUEST_RESET); + SHUTDOWN_EVENT(GUEST_PANIC); + SHUTDOWN_EVENT(SUBSYSTEM_RESET); + SHUTDOWN_EVENT(SNAPSHOT_LOAD); +#undef SHUTDOWN_EVENT + default: + g_assert_not_reached(); + } +} + +static const char *replay_checkpoint_event_name(enum ReplayCheckpoint checkpoint) +{ + switch (checkpoint) { +#define CHECKPOINT_EVENT(_x) case CHECKPOINT_ ## _x: return "CHECKPOINT_" #_x + CHECKPOINT_EVENT(CLOCK_WARP_START); + CHECKPOINT_EVENT(CLOCK_WARP_ACCOUNT); + CHECKPOINT_EVENT(RESET_REQUESTED); + CHECKPOINT_EVENT(SUSPEND_REQUESTED); + CHECKPOINT_EVENT(CLOCK_VIRTUAL); + CHECKPOINT_EVENT(CLOCK_HOST); + CHECKPOINT_EVENT(CLOCK_VIRTUAL_RT); + CHECKPOINT_EVENT(INIT); + CHECKPOINT_EVENT(RESET); +#undef CHECKPOINT_EVENT + default: + g_assert_not_reached(); + } +} + +static const char *replay_event_name(enum ReplayEvents event) +{ + /* First deal with the simple ones */ + switch (event) { +#define EVENT(_x) case EVENT_ ## _x: return "EVENT_"#_x + EVENT(INSTRUCTION); + EVENT(INTERRUPT); + EVENT(EXCEPTION); + EVENT(CHAR_WRITE); + EVENT(CHAR_READ_ALL); + EVENT(AUDIO_OUT); + EVENT(AUDIO_IN); + EVENT(RANDOM); +#undef EVENT + default: + if (event >= EVENT_ASYNC && event <= EVENT_ASYNC_LAST) { + return replay_async_event_name(event - EVENT_ASYNC); + } else if (event >= EVENT_SHUTDOWN && event <= EVENT_SHUTDOWN_LAST) { + return replay_shutdown_event_name(event - EVENT_SHUTDOWN); + } else if (event >= EVENT_CLOCK && event <= EVENT_CLOCK_LAST) { + return replay_clock_event_name(event - EVENT_CLOCK); + } else if (event >= EVENT_CHECKPOINT && event <= EVENT_CHECKPOINT_LAST) { + return replay_checkpoint_event_name(event - EVENT_CHECKPOINT); + } + } + + g_assert_not_reached(); +} + bool replay_next_event_is(int event) { bool res = false; @@ -226,6 +327,15 @@ bool replay_has_event(void) return res; } +G_NORETURN void replay_sync_error(const char *error) +{ + error_report("%s (insn total %"PRId64"/%d left, event %d is %s)", error, + replay_state.current_icount, replay_state.instruction_count, + replay_state.current_event, + replay_event_name(replay_state.data_kind)); + abort(); +} + static void replay_enable(const char *fname, int mode) { const char *fmode = NULL; @@ -258,6 +368,7 @@ static void replay_enable(const char *fname, int mode) replay_state.data_kind = -1; replay_state.instruction_count = 0; replay_state.current_icount = 0; + replay_state.current_event = 0; replay_state.has_unread_data = 0; /* skip file header for RECORD and check it for PLAY */ @@ -338,6 +449,27 @@ void replay_start(void) replay_enable_events(); } +/* + * For none/record the answer is yes. + */ +bool replay_can_wait(void) +{ + if (replay_mode == REPLAY_MODE_PLAY) { + /* + * For playback we shouldn't ever be at a point we wait. If + * the instruction count has reached zero and we have an + * unconsumed event we should go around again and consume it. + */ + if (replay_state.instruction_count == 0 && replay_state.has_unread_data) { + return false; + } else { + replay_sync_error("Playback shouldn't have to iowait"); + } + } + return true; +} + + void replay_finish(void) { if (replay_mode == REPLAY_MODE_NONE) { @@ -379,7 +511,7 @@ void replay_add_blocker(const char *feature) { Error *reason = NULL; - error_setg(&reason, "Record/replay feature is not supported for '%s'", + error_setg(&reason, "Record/replay is not supported with %s", feature); replay_blockers = g_slist_prepend(replay_blockers, reason); } diff --git a/roms/Makefile b/roms/Makefile index 67f709ba2dd..dfed2b216a1 100644 --- a/roms/Makefile +++ b/roms/Makefile @@ -41,8 +41,8 @@ x86_64_cross_prefix := $(call find-cross-prefix,x86_64) riscv32_cross_prefix := $(call find-cross-prefix,riscv32) riscv64_cross_prefix := $(call find-cross-prefix,riscv64) -# tag our seabios builds -SEABIOS_EXTRAVERSION="-prebuilt.qemu.org" +# tag our firmware builds +FIRMWARE_EXTRAVERSION = -prebuilt.qemu.org # # EfiRom utility is shipped with edk2 / tianocore, in BaseTools/ @@ -52,6 +52,8 @@ SEABIOS_EXTRAVERSION="-prebuilt.qemu.org" # EDK2_EFIROM = edk2/BaseTools/Source/C/bin/EfiRom +-include edk2-version + default help: @echo "nothing is build by default" @echo "available build targets:" @@ -68,6 +70,7 @@ default help: @echo " opensbi32-generic -- update OpenSBI for 32-bit generic machine" @echo " opensbi64-generic -- update OpenSBI for 64-bit generic machine" @echo " qboot -- update qboot" + @echo " hppa-firmware -- update 32- and 64-bit hppa firmware" @echo " clean -- delete the files generated by the previous" \ "build targets" @@ -90,12 +93,12 @@ build-seabios-config-%: config.% mkdir -p seabios/builds/$* cp $< seabios/builds/$*/.config $(MAKE) -C seabios \ - EXTRAVERSION=$(SEABIOS_EXTRAVERSION) \ + EXTRAVERSION=$(FIRMWARE_EXTRAVERSION) \ CROSS_PREFIX=$(x86_64_cross_prefix) \ KCONFIG_CONFIG=$(CURDIR)/seabios/builds/$*/.config \ OUT=$(CURDIR)/seabios/builds/$*/ oldnoconfig $(MAKE) -C seabios \ - EXTRAVERSION=$(SEABIOS_EXTRAVERSION) \ + EXTRAVERSION=$(FIRMWARE_EXTRAVERSION) \ CROSS_PREFIX=$(x86_64_cross_prefix) \ KCONFIG_CONFIG=$(CURDIR)/seabios/builds/$*/.config \ OUT=$(CURDIR)/seabios/builds/$*/ all @@ -146,10 +149,19 @@ skiboot: $(MAKE) -C skiboot CROSS=$(powerpc64_cross_prefix) cp skiboot/skiboot.lid ../pc-bios/skiboot.lid -efi: +edk2-version: edk2 + if test -e edk2/.git; then \ + echo "EDK2_STABLE = $$(cd edk2; git describe --tags --match 'edk2-stable*')" > $@; \ + echo "EDK2_DATE = $$(cd edk2; git log -1 --pretty='format:%cd' --date='format:%m/%d/%Y')" >> $@; \ + else \ + touch $@; \ + fi + +efi: edk2-version $(PYTHON) edk2-build.py --config edk2-build.config \ - --version-override "edk2-stable202302-for-qemu" \ - --release-date "03/01/2023" + --version-override "$(EDK2_STABLE)$(FIRMWARE_EXTRAVERSION)" \ + --release-date "$(EDK2_DATE)" \ + --silent --no-logs rm -f ../pc-bios/edk2-*.fd.bz2 bzip2 --verbose ../pc-bios/edk2-*.fd @@ -177,10 +189,16 @@ npcm7xx_bootrom: $(MAKE) -C vbootrom CROSS_COMPILE=$(arm_cross_prefix) cp vbootrom/npcm7xx_bootrom.bin ../pc-bios/npcm7xx_bootrom.bin +hppa-firmware: + $(MAKE) -C seabios-hppa parisc + cp seabios-hppa/out/hppa-firmware.img ../pc-bios/ + cp seabios-hppa/out-64/hppa-firmware64.img ../pc-bios/ + clean: rm -rf seabios/.config seabios/out seabios/builds $(MAKE) -C ipxe/src veryclean $(MAKE) -C edk2/BaseTools clean + rm -rf edk2/Conf/{.cache,BuildEnv.sh,build_rule.txt,target.txt,tools_def.txt} $(MAKE) -C SLOF clean rm -rf u-boot/build-e500 $(MAKE) -C u-boot-sam460ex distclean @@ -189,3 +207,4 @@ clean: $(MAKE) -C opensbi clean $(MAKE) -C qboot clean $(MAKE) -C vbootrom clean + $(MAKE) -C seabios-hppa clean diff --git a/roms/edk2-build.config b/roms/edk2-build.config index 0d367dbdb77..cc9b2115420 100644 --- a/roms/edk2-build.config +++ b/roms/edk2-build.config @@ -18,6 +18,7 @@ CAVIUM_ERRATUM_27456 = TRUE [opts.ovmf.sb.smm] SECURE_BOOT_ENABLE = TRUE SMM_REQUIRE = TRUE +BUILD_SHELL = FALSE [opts.armvirt.silent] DEBUG_PRINT_ERROR_LEVEL = 0x80000000 @@ -32,9 +33,6 @@ PcdDxeNxMemoryProtectionPolicy = 0xC000000000007FD1 # shim.efi has broken MemAttr code PcdUninstallMemAttrProtocol = TRUE -[pcds.workaround.202308] -PcdFirstTimeWakeUpAPsBySipi = FALSE - #################################################################################### # i386 @@ -66,19 +64,17 @@ desc = ovmf build (64-bit) conf = OvmfPkg/OvmfPkgX64.dsc arch = X64 opts = common -pcds = workaround.202308 plat = OvmfX64 dest = ../pc-bios cpy1 = FV/OVMF_CODE.fd edk2-x86_64-code.fd [build.ovmf.x86_64.secure] desc = ovmf build (64-bit, secure boot) -conf = OvmfPkg/OvmfPkgIa32X64.dsc -arch = IA32 X64 +conf = OvmfPkg/OvmfPkgX64.dsc +arch = X64 opts = common ovmf.sb.smm -pcds = workaround.202308 -plat = Ovmf3264 +plat = OvmfX64 dest = ../pc-bios cpy1 = FV/OVMF_CODE.fd edk2-x86_64-secure-code.fd @@ -87,7 +83,6 @@ desc = ovmf build for microvm conf = OvmfPkg/Microvm/MicrovmX64.dsc arch = X64 opts = common -pcds = workaround.202308 plat = MicrovmX64 dest = ../pc-bios cpy1 = FV/MICROVM.fd edk2-x86_64-microvm.fd diff --git a/roms/edk2-version b/roms/edk2-version new file mode 100644 index 00000000000..1594ed8c4de --- /dev/null +++ b/roms/edk2-version @@ -0,0 +1,2 @@ +EDK2_STABLE = edk2-stable202402 +EDK2_DATE = 02/14/2024 diff --git a/roms/opensbi b/roms/opensbi index 057eb10b6d5..a2b255b8891 160000 --- a/roms/opensbi +++ b/roms/opensbi @@ -1 +1 @@ -Subproject commit 057eb10b6d523540012e6947d5c9f63e95244e94 +Subproject commit a2b255b88918715173942f2c5e1f97ac9e90c877 diff --git a/roms/seabios-hppa b/roms/seabios-hppa index e4eac85880e..03774edaad3 160000 --- a/roms/seabios-hppa +++ b/roms/seabios-hppa @@ -1 +1 @@ -Subproject commit e4eac85880e8677f96d8b9e94de9f2eec9c0751f +Subproject commit 03774edaad3bfae090ac96ca5450353c641637d1 diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py index de506cb8bf1..8a254a5b6a2 100755 --- a/scripts/analyze-migration.py +++ b/scripts/analyze-migration.py @@ -151,17 +151,12 @@ def read(self): addr &= ~(self.TARGET_PAGE_SIZE - 1) if flags & self.RAM_SAVE_FLAG_MEM_SIZE: - while True: + total_length = addr + while total_length > 0: namelen = self.file.read8() - # We assume that no RAM chunk is big enough to ever - # hit the first byte of the address, so when we see - # a zero here we know it has to be an address, not the - # length of the next block. - if namelen == 0: - self.file.file.seek(-1, 1) - break self.name = self.file.readstr(len = namelen) len = self.file.read64() + total_length -= len self.sizeinfo[self.name] = '0x%016x' % len if self.write_memory: print(self.name) @@ -263,6 +258,34 @@ def getDict(self): return "" +class S390StorageAttributes(object): + STATTR_FLAG_EOS = 0x01 + STATTR_FLAG_MORE = 0x02 + STATTR_FLAG_ERROR = 0x04 + STATTR_FLAG_DONE = 0x08 + + def __init__(self, file, version_id, device, section_key): + if version_id != 0: + raise Exception("Unknown storage_attributes version %d" % version_id) + + self.file = file + self.section_key = section_key + + def read(self): + while True: + addr_flags = self.file.read64() + flags = addr_flags & 0xfff + if (flags & (self.STATTR_FLAG_DONE | self.STATTR_FLAG_EOS)): + return + if (flags & self.STATTR_FLAG_ERROR): + raise Exception("Error in migration stream") + count = self.file.read64() + self.file.readvar(count) + + def getDict(self): + return "" + + class ConfigurationSection(object): def __init__(self, file, desc): self.file = file @@ -544,8 +567,11 @@ class MigrationDump(object): QEMU_VM_SECTION_FOOTER= 0x7e def __init__(self, filename): - self.section_classes = { ( 'ram', 0 ) : [ RamSection, None ], - ( 'spapr/htab', 0) : ( HTABSection, None ) } + self.section_classes = { + ( 'ram', 0 ) : [ RamSection, None ], + ( 's390-storage_attributes', 0 ) : [ S390StorageAttributes, None], + ( 'spapr/htab', 0) : ( HTABSection, None ) + } self.filename = filename self.vmsd_desc = None diff --git a/scripts/block-coroutine-wrapper.py b/scripts/block-coroutine-wrapper.py index a38e5833fb3..dbbde99e39e 100644 --- a/scripts/block-coroutine-wrapper.py +++ b/scripts/block-coroutine-wrapper.py @@ -92,8 +92,6 @@ def __init__(self, wrapper_type: str, return_type: str, name: str, f"{self.name}") self.target_name = f'{subsystem}_{subname}' - self.ctx = self.gen_ctx() - self.get_result = 's->ret = ' self.ret = 'return s.ret;' self.co_ret = 'return ' @@ -167,7 +165,7 @@ def create_mixed_wrapper(func: FuncDecl) -> str: {func.co_ret}{name}({ func.gen_list('{name}') }); }} else {{ {struct_name} s = {{ - .poll_state.ctx = {func.ctx}, + .poll_state.ctx = qemu_get_current_aio_context(), .poll_state.in_progress = true, { func.gen_block(' .{name} = {name},') } @@ -191,7 +189,7 @@ def create_co_wrapper(func: FuncDecl) -> str: {func.return_type} {func.name}({ func.gen_list('{decl}') }) {{ {struct_name} s = {{ - .poll_state.ctx = {func.ctx}, + .poll_state.ctx = qemu_get_current_aio_context(), .poll_state.in_progress = true, { func.gen_block(' .{name} = {name},') } @@ -261,8 +259,8 @@ def gen_no_co_wrapper(func: FuncDecl) -> str: graph_lock=' bdrv_graph_rdlock_main_loop();' graph_unlock=' bdrv_graph_rdunlock_main_loop();' elif func.graph_wrlock: - graph_lock=' bdrv_graph_wrlock(NULL);' - graph_unlock=' bdrv_graph_wrunlock(NULL);' + graph_lock=' bdrv_graph_wrlock();' + graph_unlock=' bdrv_graph_wrunlock();' return f"""\ /* @@ -278,12 +276,9 @@ def gen_no_co_wrapper(func: FuncDecl) -> str: static void {name}_bh(void *opaque) {{ {struct_name} *s = opaque; - AioContext *ctx = {func.gen_ctx('s->')}; {graph_lock} - aio_context_acquire(ctx); {func.get_result}{name}({ func.gen_list('s->{name}') }); - aio_context_release(ctx); {graph_unlock} aio_co_wake(s->co); diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 6e4100d2a41..70268950741 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -35,6 +35,9 @@ my $root; my %debug; my $help = 0; +my $codespell = 0; +my $codespellfile = "/usr/share/codespell/dictionary.txt"; +my $user_codespellfile = ""; sub help { my ($exitcode) = @_; @@ -66,6 +69,9 @@ sub help { is all off) --test-only=WORD report only warnings/errors containing WORD literally + --codespell Use the codespell dictionary for spelling/typos + (default: $codespellfile) + --codespellfile Use this codespell dictionary --color[=WHEN] Use colors 'always', 'never', or only when output is a terminal ('auto'). Default is 'auto'. -h, --help, --version display this help and exit @@ -85,28 +91,50 @@ sub help { } GetOptions( - 'q|quiet+' => \$quiet, - 'tree!' => \$tree, - 'signoff!' => \$chk_signoff, - 'patch!' => \$chk_patch, - 'branch!' => \$chk_branch, - 'emacs!' => \$emacs, - 'terse!' => \$terse, - 'f|file!' => \$file, - 'strict!' => \$no_warnings, - 'root=s' => \$root, - 'summary!' => \$summary, - 'mailback!' => \$mailback, - 'summary-file!' => \$summary_file, - - 'debug=s' => \%debug, - 'test-only=s' => \$tst_only, - 'color=s' => \$color, - 'no-color' => sub { $color = 'never'; }, - 'h|help' => \$help, - 'version' => \$help + 'q|quiet+' => \$quiet, + 'tree!' => \$tree, + 'signoff!' => \$chk_signoff, + 'patch!' => \$chk_patch, + 'branch!' => \$chk_branch, + 'emacs!' => \$emacs, + 'terse!' => \$terse, + 'f|file!' => \$file, + 'strict!' => \$no_warnings, + 'root=s' => \$root, + 'summary!' => \$summary, + 'mailback!' => \$mailback, + 'summary-file!' => \$summary_file, + 'debug=s' => \%debug, + 'test-only=s' => \$tst_only, + 'codespell!' => \$codespell, + 'codespellfile=s' => \$user_codespellfile, + 'color=s' => \$color, + 'no-color' => sub { $color = 'never'; }, + 'h|help' => \$help, + 'version' => \$help ) or help(1); +if ($user_codespellfile) { + # Use the user provided codespell file unconditionally + $codespellfile = $user_codespellfile; +} elsif (!(-f $codespellfile)) { + # If /usr/share/codespell/dictionary.txt is not present, try to find it + # under codespell's install directory: /data/dictionary.txt + if (($codespell || $help) && which("python3") ne "") { + my $python_codespell_dict = << "EOF"; + +import os.path as op +import codespell_lib +codespell_dir = op.dirname(codespell_lib.__file__) +codespell_file = op.join(codespell_dir, 'data', 'dictionary.txt') +print(codespell_file, end='') +EOF + + my $codespell_dict = `python3 -c "$python_codespell_dict" 2> /dev/null`; + $codespellfile = $codespell_dict if (-f $codespell_dict); + } +} + help(0) if ($help); my $exit = 0; @@ -337,6 +365,36 @@ sub help { qr{guintptr}, ); +# Load common spelling mistakes and build regular expression list. +my $misspellings; +my %spelling_fix; + +if ($codespell) { + if (open(my $spelling, '<', $codespellfile)) { + while (<$spelling>) { + my $line = $_; + + $line =~ s/\s*\n?$//g; + $line =~ s/^\s*//g; + + next if ($line =~ m/^\s*#/); + next if ($line =~ m/^\s*$/); + next if ($line =~ m/, disabled/i); + + $line =~ s/,.*$//; + + my ($suspect, $fix) = split(/->/, $line); + + $spelling_fix{$suspect} = $fix; + } + close($spelling); + } else { + warn "No codespell typos will be found - file '$codespellfile': $!\n"; + } +} + +$misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix; + # This can be modified by sub possible. Since it can be empty, be careful # about regexes that always match, because they can cause infinite loops. our @modifierList = ( @@ -477,6 +535,18 @@ sub top_of_kernel_tree { return 1; } +sub which { + my ($bin) = @_; + + foreach my $path (split(/:/, $ENV{PATH})) { + if (-e "$path/$bin") { + return "$path/$bin"; + } + } + + return ""; +} + sub expand_tabs { my ($str) = @_; @@ -1585,6 +1655,21 @@ sub process { WARN("8-bit UTF-8 used in possible commit log\n" . $herecurr); } +# Check for various typo / spelling mistakes + if (defined($misspellings) && + ($in_commit_log || $line =~ /^(?:\+|Subject:)/i)) { + while ($rawline =~ /(?:^|[^\w\-'`])($misspellings)(?:[^\w\-'`]|$)/gi) { + my $typo = $1; + my $blank = copy_spacing($rawline); + my $ptr = substr($blank, 0, $-[1]) . "^" x length($typo); + my $hereptr = "$hereline$ptr\n"; + my $typo_fix = $spelling_fix{lc($typo)}; + $typo_fix = ucfirst($typo_fix) if ($typo =~ /^[A-Z]/); + $typo_fix = uc($typo_fix) if ($typo =~ /^[A-Z]+$/); + WARN("'$typo' may be misspelled - perhaps '$typo_fix'?\n" . $hereptr); + } + } + # ignore non-hunk lines and lines being removed next if (!$hunk_line || $line =~ /^-/); diff --git a/scripts/ci/gitlab-pipeline-status b/scripts/ci/gitlab-pipeline-status index e3343b05108..39f3c22c665 100755 --- a/scripts/ci/gitlab-pipeline-status +++ b/scripts/ci/gitlab-pipeline-status @@ -131,7 +131,7 @@ def create_parser(): 'checks of the pipeline status. Defaults ' 'to %(default)s')) parser.add_argument('-w', '--wait', action='store_true', default=False, - help=('Wether to wait, instead of checking only once ' + help=('Whether to wait, instead of checking only once ' 'the status of a pipeline')) parser.add_argument('-p', '--project-id', type=int, default=11167699, help=('The GitLab project ID. Defaults to the project ' diff --git a/scripts/clean-includes b/scripts/clean-includes index 58e1607a82e..bdbf4040240 100755 --- a/scripts/clean-includes +++ b/scripts/clean-includes @@ -51,7 +51,7 @@ GIT=no DUPHEAD=no # Extended regular expression defining files to ignore when using --all -XDIRREGEX='^(tests/tcg|tests/multiboot|pc-bios)' +XDIRREGEX='^(tests/tcg|tests/multiboot|tests/fp|tests/plugin|tests/uefi-test-tools|pc-bios|subprojects|contrib/plugins|tools/ebpf|ebpf/rss.bpf.skeleton.h|linux-user/(mips64|x86_64)/(cpu_loop|signal).c)' while true do diff --git a/scripts/coverity-scan/run-coverity-scan b/scripts/coverity-scan/run-coverity-scan index d56c9b66776..43cf770f5e3 100755 --- a/scripts/coverity-scan/run-coverity-scan +++ b/scripts/coverity-scan/run-coverity-scan @@ -28,6 +28,7 @@ # project settings, if you have maintainer access there. # Command line options: +# --check-upload-only : return success if upload is possible # --dry-run : run the tools, but don't actually do the upload # --docker : create and work inside a container # --docker-engine : specify the container engine to use (docker/podman/auto); @@ -57,18 +58,18 @@ # putting it in a file and using --tokenfile. Everything else has # a reasonable default if this is run from a git tree. -check_upload_permissions() { - # Check whether we can do an upload to the server; will exit the script - # with status 1 if the check failed (usually a bad token); - # will exit the script with status 0 if the check indicated that we - # can't upload yet (ie we are at quota) - # Assumes that COVERITY_TOKEN, PROJNAME and DRYRUN have been initialized. +upload_permitted() { + # Check whether we can do an upload to the server; will exit *the script* + # with status 99 if the check failed (usually a bad token); + # will return from the function with status 1 if the check indicated + # that we can't upload yet (ie we are at quota) + # Assumes that COVERITY_TOKEN and PROJNAME have been initialized. echo "Checking upload permissions..." if ! up_perm="$(wget https://scan.coverity.com/api/upload_permitted --post-data "token=$COVERITY_TOKEN&project=$PROJNAME" -q -O -)"; then echo "Coverity Scan API access denied: bad token?" - exit 1 + exit 99 fi # Really up_perm is a JSON response with either @@ -76,25 +77,40 @@ check_upload_permissions() { # We do some hacky string parsing instead of properly parsing it. case "$up_perm" in *upload_permitted*true*) - echo "Coverity Scan: upload permitted" + return 0 ;; *next_upload_permitted_at*) - if [ "$DRYRUN" = yes ]; then - echo "Coverity Scan: upload quota reached, continuing dry run" - else - echo "Coverity Scan: upload quota reached; stopping here" - # Exit success as this isn't a build error. - exit 0 - fi + return 1 ;; *) echo "Coverity Scan upload check: unexpected result $up_perm" - exit 1 + exit 99 ;; esac } +check_upload_permissions() { + # Check whether we can do an upload to the server; will exit the script + # with status 99 if the check failed (usually a bad token); + # will exit the script with status 0 if the check indicated that we + # can't upload yet (ie we are at quota) + # Assumes that COVERITY_TOKEN, PROJNAME and DRYRUN have been initialized. + + if upload_permitted; then + echo "Coverity Scan: upload permitted" + else + if [ "$DRYRUN" = yes ]; then + echo "Coverity Scan: upload quota reached, continuing dry run" + else + echo "Coverity Scan: upload quota reached; stopping here" + # Exit success as this isn't a build error. + exit 0 + fi + fi +} + + build_docker_image() { # build docker container including the coverity-scan tools echo "Building docker container..." @@ -152,9 +168,14 @@ update_coverity_tools () { DRYRUN=no UPDATE=yes DOCKER=no +PROJNAME=QEMU while [ "$#" -ge 1 ]; do case "$1" in + --check-upload-only) + shift + DRYRUN=check + ;; --dry-run) shift DRYRUN=yes @@ -251,6 +272,11 @@ if [ -z "$COVERITY_TOKEN" ]; then exit 1 fi +if [ "$DRYRUN" = check ]; then + upload_permitted + exit $? +fi + if [ -z "$COVERITY_BUILD_CMD" ]; then NPROC=$(nproc) COVERITY_BUILD_CMD="make -j$NPROC" @@ -266,7 +292,6 @@ if [ -z "$SRCDIR" ]; then SRCDIR="$PWD" fi -PROJNAME=QEMU TARBALL=cov-int.tar.xz if [ "$UPDATE" = only ]; then diff --git a/scripts/cpu-x86-uarch-abi.py b/scripts/cpu-x86-uarch-abi.py index 052ddd75142..7360e55c6e0 100644 --- a/scripts/cpu-x86-uarch-abi.py +++ b/scripts/cpu-x86-uarch-abi.py @@ -179,7 +179,6 @@ models[name]["delta"][level] = delta def print_uarch_abi_csv(): - print("# Automatically generated from '%s'" % __file__) print("Model,baseline,v2,v3,v4") for name in models.keys(): print(name, end="") diff --git a/scripts/feature_to_c.py b/scripts/feature_to_c.py index e04d6b2df7f..807af0e685c 100644 --- a/scripts/feature_to_c.py +++ b/scripts/feature_to_c.py @@ -50,7 +50,9 @@ def writeliteral(indent, bytes): sys.stderr.write(f'unexpected start tag: {element.tag}\n') exit(1) + feature_name = element.attrib['name'] regnum = 0 + regnames = [] regnums = [] tags = ['feature'] for event, element in events: @@ -67,6 +69,7 @@ def writeliteral(indent, bytes): if 'regnum' in element.attrib: regnum = int(element.attrib['regnum']) + regnames.append(element.attrib['name']) regnums.append(regnum) regnum += 1 @@ -85,6 +88,15 @@ def writeliteral(indent, bytes): writeliteral(8, bytes(os.path.basename(input), 'utf-8')) sys.stdout.write(',\n') writeliteral(8, read) - sys.stdout.write(f',\n {num_regs},\n }},\n') + sys.stdout.write(',\n') + writeliteral(8, bytes(feature_name, 'utf-8')) + sys.stdout.write(',\n (const char * const []) {\n') + + for index, regname in enumerate(regnames): + sys.stdout.write(f' [{regnums[index] - base_reg}] =\n') + writeliteral(16, bytes(regname, 'utf-8')) + sys.stdout.write(',\n') + + sys.stdout.write(f' }},\n {num_regs},\n }},\n') sys.stdout.write(' { NULL }\n};\n') diff --git a/scripts/make-release b/scripts/make-release index 9c570b87f4a..6e0433de24d 100755 --- a/scripts/make-release +++ b/scripts/make-release @@ -47,5 +47,5 @@ meson subprojects download $SUBPROJECTS CryptoPkg/Library/OpensslLib/openssl \ MdeModulePkg/Library/BrotliCustomDecompressLib/brotli) popd -tar --exclude=.git -cjf ${destination}.tar.bz2 ${destination} +tar --exclude=.git -cJf ${destination}.tar.xz ${destination} rm -rf ${destination} diff --git a/scripts/mtest2make.py b/scripts/mtest2make.py index 179dd548718..eb01a05ddbd 100644 --- a/scripts/mtest2make.py +++ b/scripts/mtest2make.py @@ -27,7 +27,8 @@ def names(self, base): .speed.slow = $(foreach s,$(sort $(filter-out %-thorough, $1)), --suite $s) .speed.thorough = $(foreach s,$(sort $1), --suite $s) -.mtestargs = --no-rebuild -t 0 +TIMEOUT_MULTIPLIER = 1 +.mtestargs = --no-rebuild -t $(TIMEOUT_MULTIPLIER) ifneq ($(SPEED), quick) .mtestargs += --setup $(SPEED) endif diff --git a/scripts/qapi/parser.py b/scripts/qapi/parser.py index bf31018aef0..d8f76060b8c 100644 --- a/scripts/qapi/parser.py +++ b/scripts/qapi/parser.py @@ -71,7 +71,7 @@ class QAPISchemaParser: Parse QAPI schema source. Parse a JSON-esque schema file and process directives. See - qapi-code-gen.txt section "Schema Syntax" for the exact syntax. + qapi-code-gen.rst section "Schema Syntax" for the exact syntax. Grammatical validation is handled later by `expr.check_exprs()`. :param fname: Source file name. @@ -134,8 +134,8 @@ def _parse(self) -> None: info = self.info if self.tok == '#': self.reject_expr_doc(cur_doc) - for cur_doc in self.get_doc(info): - self.docs.append(cur_doc) + cur_doc = self.get_doc() + self.docs.append(cur_doc) continue expr = self.get_expr() @@ -238,6 +238,8 @@ def check_list_str(name: str, value: object) -> List[str]: pragma.command_name_exceptions = check_list_str(name, value) elif name == 'command-returns-exceptions': pragma.command_returns_exceptions = check_list_str(name, value) + elif name == 'documentation-exceptions': + pragma.documentation_exceptions = check_list_str(name, value) elif name == 'member-name-exceptions': pragma.member_name_exceptions = check_list_str(name, value) else: @@ -412,39 +414,177 @@ def get_expr(self) -> _ExprValue: self, "expected '{', '[', string, or boolean") return expr - def get_doc(self, info: QAPISourceInfo) -> List['QAPIDoc']: + def get_doc_line(self) -> Optional[str]: + if self.tok != '#': + raise QAPIParseError( + self, "documentation comment must end with '##'") + assert isinstance(self.val, str) + if self.val.startswith('##'): + # End of doc comment + if self.val != '##': + raise QAPIParseError( + self, "junk after '##' at end of documentation comment") + return None + if self.val == '#': + return '' + if self.val[1] != ' ': + raise QAPIParseError(self, "missing space after #") + return self.val[2:].rstrip() + + @staticmethod + def _match_at_name_colon(string: str) -> Optional[Match[str]]: + return re.match(r'@([^:]*): *', string) + + def get_doc_indented(self, doc: 'QAPIDoc') -> Optional[str]: + self.accept(False) + line = self.get_doc_line() + while line == '': + doc.append_line(line) + self.accept(False) + line = self.get_doc_line() + if line is None: + return line + indent = must_match(r'\s*', line).end() + if not indent: + return line + doc.append_line(line[indent:]) + prev_line_blank = False + while True: + self.accept(False) + line = self.get_doc_line() + if line is None: + return line + if self._match_at_name_colon(line): + return line + cur_indent = must_match(r'\s*', line).end() + if line != '' and cur_indent < indent: + if prev_line_blank: + return line + raise QAPIParseError( + self, + "unexpected de-indent (expected at least %d spaces)" % + indent) + doc.append_line(line[indent:]) + prev_line_blank = True + + def get_doc_paragraph(self, doc: 'QAPIDoc') -> Optional[str]: + while True: + self.accept(False) + line = self.get_doc_line() + if line is None: + return line + if line == '': + return line + doc.append_line(line) + + def get_doc(self) -> 'QAPIDoc': if self.val != '##': raise QAPIParseError( self, "junk after '##' at start of documentation comment") - - docs = [] - cur_doc = QAPIDoc(self, info) + info = self.info self.accept(False) - while self.tok == '#': - assert isinstance(self.val, str) - if self.val.startswith('##'): - # End of doc comment - if self.val != '##': + line = self.get_doc_line() + if line is not None and line.startswith('@'): + # Definition documentation + if not line.endswith(':'): + raise QAPIParseError(self, "line should end with ':'") + # Invalid names are not checked here, but the name + # provided *must* match the following definition, + # which *is* validated in expr.py. + symbol = line[1:-1] + if not symbol: + raise QAPIParseError(self, "name required after '@'") + doc = QAPIDoc(info, symbol) + self.accept(False) + line = self.get_doc_line() + no_more_args = False + + while line is not None: + # Blank lines + while line == '': + self.accept(False) + line = self.get_doc_line() + if line is None: + break + # Non-blank line, first of a section + if line == 'Features:': + if doc.features: + raise QAPIParseError( + self, "duplicated 'Features:' line") + self.accept(False) + line = self.get_doc_line() + while line == '': + self.accept(False) + line = self.get_doc_line() + while (line is not None + and (match := self._match_at_name_colon(line))): + doc.new_feature(self.info, match.group(1)) + text = line[match.end():] + if text: + doc.append_line(text) + line = self.get_doc_indented(doc) + if not doc.features: + raise QAPIParseError( + self, 'feature descriptions expected') + no_more_args = True + elif match := self._match_at_name_colon(line): + # description + if no_more_args: + raise QAPIParseError( + self, + "description of '@%s:' follows a section" + % match.group(1)) + while (line is not None + and (match := self._match_at_name_colon(line))): + doc.new_argument(self.info, match.group(1)) + text = line[match.end():] + if text: + doc.append_line(text) + line = self.get_doc_indented(doc) + no_more_args = True + elif match := re.match( + r'(Returns|Errors|Since|Notes?|Examples?|TODO): *', + line): + # tagged section + doc.new_tagged_section(self.info, match.group(1)) + text = line[match.end():] + if text: + doc.append_line(text) + line = self.get_doc_indented(doc) + no_more_args = True + elif line.startswith('='): raise QAPIParseError( self, - "junk after '##' at end of documentation comment") - cur_doc.end_comment() - docs.append(cur_doc) - self.accept() - return docs - if self.val.startswith('# ='): - if cur_doc.symbol: + "unexpected '=' markup in definition documentation") + else: + # tag-less paragraph + doc.ensure_untagged_section(self.info) + doc.append_line(line) + line = self.get_doc_paragraph(doc) + else: + # Free-form documentation + doc = QAPIDoc(info) + doc.ensure_untagged_section(self.info) + first = True + while line is not None: + if match := self._match_at_name_colon(line): raise QAPIParseError( self, - "unexpected '=' markup in definition documentation") - if cur_doc.body.text: - cur_doc.end_comment() - docs.append(cur_doc) - cur_doc = QAPIDoc(self, info) - cur_doc.append(self.val) - self.accept(False) + "'@%s:' not allowed in free-form documentation" + % match.group(1)) + if line.startswith('='): + if not first: + raise QAPIParseError( + self, + "'=' heading must come first in a comment block") + doc.append_line(line) + self.accept(False) + line = self.get_doc_line() + first = False - raise QAPIParseError(self, "documentation comment must end with '##'") + self.accept(False) + doc.end() + return doc class QAPIDoc: @@ -467,281 +607,110 @@ class QAPIDoc: """ class Section: - # pylint: disable=too-few-public-methods - def __init__(self, parser: QAPISchemaParser, - name: Optional[str] = None): - # parser, for error messages about indentation - self._parser = parser - # optional section name (argument/member or section name) - self.name = name - # section text without section name + def __init__(self, info: QAPISourceInfo, + tag: Optional[str] = None): + # section source info, i.e. where it begins + self.info = info + # section tag, if any ('Returns', '@name', ...) + self.tag = tag + # section text without tag self.text = '' - # indentation to strip (None means indeterminate) - self._indent = None if self.name else 0 - - def append(self, line: str) -> None: - line = line.rstrip() - - if line: - indent = must_match(r'\s*', line).end() - if self._indent is None: - # indeterminate indentation - if self.text != '': - # non-blank, non-first line determines indentation - self._indent = indent - elif indent < self._indent: - raise QAPIParseError( - self._parser, - "unexpected de-indent (expected at least %d spaces)" % - self._indent) - line = line[self._indent:] + def append_line(self, line: str) -> None: self.text += line + '\n' class ArgSection(Section): - def __init__(self, parser: QAPISchemaParser, - name: str): - super().__init__(parser, name) + def __init__(self, info: QAPISourceInfo, tag: str): + super().__init__(info, tag) self.member: Optional['QAPISchemaMember'] = None def connect(self, member: 'QAPISchemaMember') -> None: self.member = member - class NullSection(Section): - """ - Immutable dummy section for use at the end of a doc block. - """ - # pylint: disable=too-few-public-methods - def append(self, line: str) -> None: - assert False, "Text appended after end_comment() called." - - def __init__(self, parser: QAPISchemaParser, info: QAPISourceInfo): - # self._parser is used to report errors with QAPIParseError. The - # resulting error position depends on the state of the parser. - # It happens to be the beginning of the comment. More or less - # servicable, but action at a distance. - self._parser = parser + def __init__(self, info: QAPISourceInfo, symbol: Optional[str] = None): + # info points to the doc comment block's first line self.info = info - self.symbol: Optional[str] = None - self.body = QAPIDoc.Section(parser) - # dicts mapping parameter/feature names to their ArgSection - self.args: Dict[str, QAPIDoc.ArgSection] = OrderedDict() - self.features: Dict[str, QAPIDoc.ArgSection] = OrderedDict() + # definition doc's symbol, None for free-form doc + self.symbol: Optional[str] = symbol + # the sections in textual order + self.all_sections: List[QAPIDoc.Section] = [QAPIDoc.Section(info)] + # the body section + self.body: Optional[QAPIDoc.Section] = self.all_sections[0] + # dicts mapping parameter/feature names to their description + self.args: Dict[str, QAPIDoc.ArgSection] = {} + self.features: Dict[str, QAPIDoc.ArgSection] = {} + # a command's "Returns" and "Errors" section + self.returns: Optional[QAPIDoc.Section] = None + self.errors: Optional[QAPIDoc.Section] = None + # "Since" section + self.since: Optional[QAPIDoc.Section] = None + # sections other than .body, .args, .features self.sections: List[QAPIDoc.Section] = [] - # the current section - self._section = self.body - self._append_line = self._append_body_line - - def has_section(self, name: str) -> bool: - """Return True if we have a section with this name.""" - for i in self.sections: - if i.name == name: - return True - return False - - def append(self, line: str) -> None: - """ - Parse a comment line and add it to the documentation. - - The way that the line is dealt with depends on which part of - the documentation we're parsing right now: - * The body section: ._append_line is ._append_body_line - * An argument section: ._append_line is ._append_args_line - * A features section: ._append_line is ._append_features_line - * An additional section: ._append_line is ._append_various_line - """ - line = line[1:] - if not line: - self._append_freeform(line) - return - if line[0] != ' ': - raise QAPIParseError(self._parser, "missing space after #") - line = line[1:] - self._append_line(line) - - def end_comment(self) -> None: - self._switch_section(QAPIDoc.NullSection(self._parser)) - - @staticmethod - def _match_at_name_colon(string: str) -> Optional[Match[str]]: - return re.match(r'@([^:]*): *', string) - - @staticmethod - def _match_section_tag(string: str) -> Optional[Match[str]]: - return re.match(r'(Returns|Since|Notes?|Examples?|TODO): *', string) - - def _append_body_line(self, line: str) -> None: - """ - Process a line of documentation text in the body section. - - If this a symbol line and it is the section's first line, this - is a definition documentation block for that symbol. - - If it's a definition documentation block, another symbol line - begins the argument section for the argument named by it, and - a section tag begins an additional section. Start that - section and append the line to it. - - Else, append the line to the current section. - """ - # FIXME not nice: things like '# @foo:' and '# @foo: ' aren't - # recognized, and get silently treated as ordinary text - if not self.symbol and not self.body.text and line.startswith('@'): - if not line.endswith(':'): - raise QAPIParseError(self._parser, "line should end with ':'") - self.symbol = line[1:-1] - # Invalid names are not checked here, but the name provided MUST - # match the following definition, which *is* validated in expr.py. - if not self.symbol: - raise QAPIParseError( - self._parser, "name required after '@'") - elif self.symbol: - # This is a definition documentation block - if self._match_at_name_colon(line): - self._append_line = self._append_args_line - self._append_args_line(line) - elif line == 'Features:': - self._append_line = self._append_features_line - elif self._match_section_tag(line): - self._append_line = self._append_various_line - self._append_various_line(line) - else: - self._append_freeform(line) - else: - # This is a free-form documentation block - self._append_freeform(line) - - def _append_args_line(self, line: str) -> None: - """ - Process a line of documentation text in an argument section. - - A symbol line begins the next argument section, a section tag - section or a non-indented line after a blank line begins an - additional section. Start that section and append the line to - it. - - Else, append the line to the current section. - - """ - match = self._match_at_name_colon(line) - if match: - line = line[match.end():] - self._start_args_section(match.group(1)) - elif self._match_section_tag(line): - self._append_line = self._append_various_line - self._append_various_line(line) - return - elif (self._section.text.endswith('\n\n') - and line and not line[0].isspace()): - if line == 'Features:': - self._append_line = self._append_features_line - else: - self._start_section() - self._append_line = self._append_various_line - self._append_various_line(line) - return - - self._append_freeform(line) + def end(self) -> None: + for section in self.all_sections: + section.text = section.text.strip('\n') + if section.tag is not None and section.text == '': + raise QAPISemError( + section.info, "text required after '%s:'" % section.tag) - def _append_features_line(self, line: str) -> None: - match = self._match_at_name_colon(line) - if match: - line = line[match.end():] - self._start_features_section(match.group(1)) - elif self._match_section_tag(line): - self._append_line = self._append_various_line - self._append_various_line(line) - return - elif (self._section.text.endswith('\n\n') - and line and not line[0].isspace()): - self._start_section() - self._append_line = self._append_various_line - self._append_various_line(line) + def ensure_untagged_section(self, info: QAPISourceInfo) -> None: + if self.all_sections and not self.all_sections[-1].tag: + # extend current section + self.all_sections[-1].text += '\n' return + # start new section + section = self.Section(info) + self.sections.append(section) + self.all_sections.append(section) + + def new_tagged_section(self, info: QAPISourceInfo, tag: str) -> None: + section = self.Section(info, tag) + if tag == 'Returns': + if self.returns: + raise QAPISemError( + info, "duplicated '%s' section" % tag) + self.returns = section + elif tag == 'Errors': + if self.errors: + raise QAPISemError( + info, "duplicated '%s' section" % tag) + self.errors = section + elif tag == 'Since': + if self.since: + raise QAPISemError( + info, "duplicated '%s' section" % tag) + self.since = section + self.sections.append(section) + self.all_sections.append(section) - self._append_freeform(line) - - def _append_various_line(self, line: str) -> None: - """ - Process a line of documentation text in an additional section. - - A symbol line is an error. - - A section tag begins an additional section. Start that - section and append the line to it. - - Else, append the line to the current section. - """ - match = self._match_at_name_colon(line) - if match: - raise QAPIParseError(self._parser, - "description of '@%s:' follows a section" - % match.group(1)) - match = self._match_section_tag(line) - if match: - line = line[match.end():] - self._start_section(match.group(1)) - - self._append_freeform(line) - - def _start_symbol_section( - self, - symbols_dict: Dict[str, 'QAPIDoc.ArgSection'], - name: str) -> None: - # FIXME invalid names other than the empty string aren't flagged + def _new_description(self, info: QAPISourceInfo, name: str, + desc: Dict[str, ArgSection]) -> None: if not name: - raise QAPIParseError(self._parser, "invalid parameter name") - if name in symbols_dict: - raise QAPIParseError(self._parser, - "'%s' parameter name duplicated" % name) - assert not self.sections - new_section = QAPIDoc.ArgSection(self._parser, name) - self._switch_section(new_section) - symbols_dict[name] = new_section - - def _start_args_section(self, name: str) -> None: - self._start_symbol_section(self.args, name) - - def _start_features_section(self, name: str) -> None: - self._start_symbol_section(self.features, name) - - def _start_section(self, name: Optional[str] = None) -> None: - if name in ('Returns', 'Since') and self.has_section(name): - raise QAPIParseError(self._parser, - "duplicated '%s' section" % name) - new_section = QAPIDoc.Section(self._parser, name) - self._switch_section(new_section) - self.sections.append(new_section) - - def _switch_section(self, new_section: 'QAPIDoc.Section') -> None: - text = self._section.text = self._section.text.strip('\n') - - # Only the 'body' section is allowed to have an empty body. - # All other sections, including anonymous ones, must have text. - if self._section != self.body and not text: - # We do not create anonymous sections unless there is - # something to put in them; this is a parser bug. - assert self._section.name - raise QAPIParseError( - self._parser, - "empty doc section '%s'" % self._section.name) + raise QAPISemError(info, "invalid parameter name") + if name in desc: + raise QAPISemError(info, "'%s' parameter name duplicated" % name) + section = self.ArgSection(info, '@' + name) + self.all_sections.append(section) + desc[name] = section - self._section = new_section + def new_argument(self, info: QAPISourceInfo, name: str) -> None: + self._new_description(info, name, self.args) - def _append_freeform(self, line: str) -> None: - match = re.match(r'(@\S+:)', line) - if match: - raise QAPIParseError(self._parser, - "'%s' not allowed in free-form documentation" - % match.group(1)) - self._section.append(line) + def new_feature(self, info: QAPISourceInfo, name: str) -> None: + self._new_description(info, name, self.features) + + def append_line(self, line: str) -> None: + self.all_sections[-1].append_line(line) def connect_member(self, member: 'QAPISchemaMember') -> None: if member.name not in self.args: - # Undocumented TODO outlaw - self.args[member.name] = QAPIDoc.ArgSection(self._parser, - member.name) + if self.symbol not in member.info.pragma.documentation_exceptions: + raise QAPISemError(member.info, + "%s '%s' lacks documentation" + % (member.role, member.name)) + self.args[member.name] = QAPIDoc.ArgSection( + self.info, '@' + member.name) self.args[member.name].connect(member) def connect_feature(self, feature: 'QAPISchemaFeature') -> None: @@ -752,9 +721,20 @@ def connect_feature(self, feature: 'QAPISchemaFeature') -> None: self.features[feature.name].connect(feature) def check_expr(self, expr: QAPIExpression) -> None: - if self.has_section('Returns') and 'command' not in expr: - raise QAPISemError(self.info, - "'Returns:' is only valid for commands") + if 'command' in expr: + if self.returns and 'returns' not in expr: + raise QAPISemError( + self.returns.info, + "'Returns' section, but command doesn't return anything") + else: + if self.returns: + raise QAPISemError( + self.returns.info, + "'Returns' section is only valid for commands") + if self.errors: + raise QAPISemError( + self.returns.info, + "'Errors' section is only valid for commands") def check(self) -> None: @@ -765,7 +745,7 @@ def check_args_section( if not section.member] if bogus: raise QAPISemError( - self.info, + args[bogus[0]].info, "documented %s%s '%s' %s not exist" % ( what, "s" if len(bogus) > 1 else "", diff --git a/scripts/qapi/schema.py b/scripts/qapi/schema.py index 6a836950a9a..8ba5665bc68 100644 --- a/scripts/qapi/schema.py +++ b/scripts/qapi/schema.py @@ -95,10 +95,6 @@ def connect_doc(self, doc=None): for f in self.features: doc.connect_feature(f) - def check_doc(self): - if self.doc: - self.doc.check() - def _set_module(self, schema, info): assert self._checked fname = info.fname if info else QAPISchemaModule.BUILTIN_MODULE_NAME @@ -1223,9 +1219,10 @@ def check(self): for ent in self._entity_list: ent.check(self) ent.connect_doc() - ent.check_doc() for ent in self._entity_list: ent.set_module(self) + for doc in self.docs: + doc.check() def visit(self, visitor): visitor.visit_begin(self) diff --git a/scripts/qapi/source.py b/scripts/qapi/source.py index 04193cc9643..7b379fdc925 100644 --- a/scripts/qapi/source.py +++ b/scripts/qapi/source.py @@ -24,6 +24,8 @@ def __init__(self) -> None: self.command_name_exceptions: List[str] = [] # Commands allowed to return a non-dictionary self.command_returns_exceptions: List[str] = [] + # Types, commands, and events with undocumented members + self.documentation_exceptions: List[str] = [] # Types whose member names may violate case conventions self.member_name_exceptions: List[str] = [] diff --git a/scripts/replay-dump.py b/scripts/replay-dump.py index b89dc29555a..d668193e793 100755 --- a/scripts/replay-dump.py +++ b/scripts/replay-dump.py @@ -21,6 +21,7 @@ import argparse import struct from collections import namedtuple +from os import path # This mirrors some of the global replay state which some of the # stream loading refers to. Some decoders may read the next event so @@ -82,6 +83,12 @@ def read_qword(fin): "Read a 64 bit word" return struct.unpack('>Q', fin.read(8))[0] +def read_array(fin): + "Read a sized array" + size = read_dword(fin) + data = fin.read(size) + return data + # Generic decoder structure Decoder = namedtuple("Decoder", "eid name fn") @@ -115,6 +122,11 @@ def decode_unimp(eid, name, _unused_dumpfile): print("%s not handled - will now stop" % (name)) return False +def decode_plain(eid, name, _unused_dumpfile): + "Plain events without additional data" + print_event(eid, name, "no data") + return True + # Checkpoint decoder def swallow_async_qword(eid, name, dumpfile): "Swallow a qword of data without looking at it" @@ -145,10 +157,19 @@ def decode_async(eid, name, dumpfile): return call_decode(async_decode_table, async_event_kind, dumpfile) +total_insns = 0 def decode_instruction(eid, name, dumpfile): + global total_insns ins_diff = read_dword(dumpfile) - print_event(eid, name, "0x%x" % (ins_diff)) + total_insns += ins_diff + print_event(eid, name, "+ %d -> %d" % (ins_diff, total_insns)) + return True + +def decode_char_write(eid, name, dumpfile): + res = read_dword(dumpfile) + offset = read_dword(dumpfile) + print_event(eid, name, "%d -> %d" % (offset, res)) return True def decode_audio_out(eid, name, dumpfile): @@ -189,14 +210,19 @@ def decode_clock(eid, name, dumpfile): print_event(eid, name, "0x%x" % (clock_data)) return True +def decode_random(eid, name, dumpfile): + ret = read_dword(dumpfile) + data = read_array(dumpfile) + print_event(eid, "%d bytes of random data" % len(data)) + return True # pre-MTTCG merge v5_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction), Decoder(1, "EVENT_INTERRUPT", decode_interrupt), - Decoder(2, "EVENT_EXCEPTION", decode_unimp), + Decoder(2, "EVENT_EXCEPTION", decode_plain), Decoder(3, "EVENT_ASYNC", decode_async), Decoder(4, "EVENT_SHUTDOWN", decode_unimp), - Decoder(5, "EVENT_CHAR_WRITE", decode_unimp), + Decoder(5, "EVENT_CHAR_WRITE", decode_char_write), Decoder(6, "EVENT_CHAR_READ_ALL", decode_unimp), Decoder(7, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp), Decoder(8, "EVENT_CLOCK_HOST", decode_clock), @@ -215,10 +241,10 @@ def decode_clock(eid, name, dumpfile): # post-MTTCG merge, AUDIO support added v6_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction), Decoder(1, "EVENT_INTERRUPT", decode_interrupt), - Decoder(2, "EVENT_EXCEPTION", decode_unimp), + Decoder(2, "EVENT_EXCEPTION", decode_plain), Decoder(3, "EVENT_ASYNC", decode_async), Decoder(4, "EVENT_SHUTDOWN", decode_unimp), - Decoder(5, "EVENT_CHAR_WRITE", decode_unimp), + Decoder(5, "EVENT_CHAR_WRITE", decode_char_write), Decoder(6, "EVENT_CHAR_READ_ALL", decode_unimp), Decoder(7, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp), Decoder(8, "EVENT_AUDIO_OUT", decode_audio_out), @@ -250,7 +276,7 @@ def decode_clock(eid, name, dumpfile): Decoder(10, "EVENT_SHUTDOWN_GUEST_RESET", decode_unimp), Decoder(11, "EVENT_SHUTDOWN_GUEST_PANIC", decode_unimp), Decoder(12, "EVENT_SHUTDOWN___MAX", decode_unimp), - Decoder(13, "EVENT_CHAR_WRITE", decode_unimp), + Decoder(13, "EVENT_CHAR_WRITE", decode_char_write), Decoder(14, "EVENT_CHAR_READ_ALL", decode_unimp), Decoder(15, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp), Decoder(16, "EVENT_AUDIO_OUT", decode_audio_out), @@ -268,6 +294,48 @@ def decode_clock(eid, name, dumpfile): Decoder(28, "EVENT_CP_RESET", decode_checkpoint), ] +v12_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction), + Decoder(1, "EVENT_INTERRUPT", decode_interrupt), + Decoder(2, "EVENT_EXCEPTION", decode_plain), + Decoder(3, "EVENT_ASYNC", decode_async), + Decoder(4, "EVENT_ASYNC", decode_async), + Decoder(5, "EVENT_ASYNC", decode_async), + Decoder(6, "EVENT_ASYNC", decode_async), + Decoder(6, "EVENT_ASYNC", decode_async), + Decoder(8, "EVENT_ASYNC", decode_async), + Decoder(9, "EVENT_ASYNC", decode_async), + Decoder(10, "EVENT_ASYNC", decode_async), + Decoder(11, "EVENT_SHUTDOWN", decode_unimp), + Decoder(12, "EVENT_SHUTDOWN_HOST_ERR", decode_unimp), + Decoder(13, "EVENT_SHUTDOWN_HOST_QMP_QUIT", decode_unimp), + Decoder(14, "EVENT_SHUTDOWN_HOST_QMP_RESET", decode_unimp), + Decoder(14, "EVENT_SHUTDOWN_HOST_SIGNAL", decode_unimp), + Decoder(15, "EVENT_SHUTDOWN_HOST_UI", decode_unimp), + Decoder(16, "EVENT_SHUTDOWN_GUEST_SHUTDOWN", decode_unimp), + Decoder(17, "EVENT_SHUTDOWN_GUEST_RESET", decode_unimp), + Decoder(18, "EVENT_SHUTDOWN_GUEST_PANIC", decode_unimp), + Decoder(19, "EVENT_SHUTDOWN_GUEST_SUBSYSTEM_RESET", decode_unimp), + Decoder(20, "EVENT_SHUTDOWN_GUEST_SNAPSHOT_LOAD", decode_unimp), + Decoder(21, "EVENT_SHUTDOWN___MAX", decode_unimp), + Decoder(22, "EVENT_CHAR_WRITE", decode_char_write), + Decoder(23, "EVENT_CHAR_READ_ALL", decode_unimp), + Decoder(24, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp), + Decoder(25, "EVENT_AUDIO_IN", decode_unimp), + Decoder(26, "EVENT_AUDIO_OUT", decode_audio_out), + Decoder(27, "EVENT_RANDOM", decode_random), + Decoder(28, "EVENT_CLOCK_HOST", decode_clock), + Decoder(29, "EVENT_CLOCK_VIRTUAL_RT", decode_clock), + Decoder(30, "EVENT_CP_CLOCK_WARP_START", decode_checkpoint), + Decoder(31, "EVENT_CP_CLOCK_WARP_ACCOUNT", decode_checkpoint), + Decoder(32, "EVENT_CP_RESET_REQUESTED", decode_checkpoint), + Decoder(33, "EVENT_CP_SUSPEND_REQUESTED", decode_checkpoint), + Decoder(34, "EVENT_CP_CLOCK_VIRTUAL", decode_checkpoint), + Decoder(35, "EVENT_CP_CLOCK_HOST", decode_checkpoint), + Decoder(36, "EVENT_CP_CLOCK_VIRTUAL_RT", decode_checkpoint), + Decoder(37, "EVENT_CP_INIT", decode_checkpoint_init), + Decoder(38, "EVENT_CP_RESET", decode_checkpoint), +] + def parse_arguments(): "Grab arguments for script" parser = argparse.ArgumentParser() @@ -278,14 +346,18 @@ def parse_arguments(): def decode_file(filename): "Decode a record/replay dump" dumpfile = open(filename, "rb") - + dumpsize = path.getsize(filename) # read and throwaway the header version = read_dword(dumpfile) junk = read_qword(dumpfile) + # see REPLAY_VERSION print("HEADER: version 0x%x" % (version)) - if version == 0xe02007: + if version == 0xe0200c: + event_decode_table = v12_event_table + replay_state.checkpoint_start = 30 + elif version == 0xe02007: event_decode_table = v7_event_table replay_state.checkpoint_start = 12 elif version == 0xe02006: @@ -299,8 +371,13 @@ def decode_file(filename): decode_ok = True while decode_ok: event = read_event(dumpfile) - decode_ok = call_decode(event_decode_table, event, dumpfile) + decode_ok = call_decode(event_decode_table, event, + dumpfile) + except Exception as inst: + print(f"error {inst}") + finally: + print(f"Reached {dumpfile.tell()} of {dumpsize} bytes") dumpfile.close() if __name__ == "__main__": diff --git a/scripts/tracetool.py b/scripts/tracetool.py index ab7653a5ce0..5de9ce96d30 100755 --- a/scripts/tracetool.py +++ b/scripts/tracetool.py @@ -44,12 +44,9 @@ def error_opt(msg = None): --help This help message. --list-backends Print list of available backends. --check-backends Check if the given backend is valid. - --binary Full path to QEMU binary. - --target-type QEMU emulator target type ('system' or 'user'). - --target-name QEMU emulator target name. - --group Name of the event group - --probe-prefix Prefix for dtrace probe names - (default: qemu--).\ + --binary Full path to QEMU binary (required for 'stap' backend). + --group Name of the event group. + --probe-prefix Prefix for dtrace probe names (required for 'stap' backend). """ % { "script" : _SCRIPT, "backends" : backend_descr, @@ -67,7 +64,7 @@ def main(args): long_opts = ["backends=", "format=", "help", "list-backends", "check-backends", "group="] - long_opts += ["binary=", "target-type=", "target-name=", "probe-prefix="] + long_opts += ["binary=", "probe-prefix="] try: opts, args = getopt.getopt(args[1:], "", long_opts) @@ -79,8 +76,6 @@ def main(args): arg_format = "" arg_group = None binary = None - target_type = None - target_name = None probe_prefix = None for opt, arg in opts: if opt == "--help": @@ -102,10 +97,6 @@ def main(args): elif opt == "--binary": binary = arg - elif opt == '--target-type': - target_type = arg - elif opt == '--target-name': - target_name = arg elif opt == '--probe-prefix': probe_prefix = arg @@ -127,13 +118,8 @@ def main(args): if arg_format == "stap": if binary is None: error_opt("--binary is required for SystemTAP tapset generator") - if probe_prefix is None and target_type is None: - error_opt("--target-type is required for SystemTAP tapset generator") - if probe_prefix is None and target_name is None: - error_opt("--target-name is required for SystemTAP tapset generator") - if probe_prefix is None: - probe_prefix = ".".join(["qemu", target_type, target_name]) + error_opt("--probe-prefix is required for SystemTAP tapset generator") if len(args) < 2: error_opt("missing trace-events and output filepaths") diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh index 34295c0fe55..a0006eec6fd 100755 --- a/scripts/update-linux-headers.sh +++ b/scripts/update-linux-headers.sh @@ -156,6 +156,9 @@ for arch in $ARCHLIST; do cp_portable "$tmpdir/bootparam.h" \ "$output/include/standard-headers/asm-$arch" fi + if [ $arch = riscv ]; then + cp "$tmpdir/include/asm/ptrace.h" "$output/linux-headers/asm-riscv/" + fi done rm -rf "$output/linux-headers/linux" diff --git a/scsi/meson.build b/scsi/meson.build index 53f3a1f7169..cdb91e11b0e 100644 --- a/scsi/meson.build +++ b/scsi/meson.build @@ -1,4 +1,6 @@ block_ss.add(files('utils.c')) -block_ss.add(when: 'CONFIG_LINUX', - if_true: files('pr-manager.c', 'pr-manager-helper.c'), - if_false: files('pr-manager-stub.c')) +if host_os == 'linux' + block_ss.add(files('pr-manager.c', 'pr-manager-helper.c')) +else + block_ss.add(files('pr-manager-stub.c')) +endif diff --git a/semihosting/arm-compat-semi.c b/semihosting/arm-compat-semi.c index 329ea112607..d78c6428b90 100644 --- a/semihosting/arm-compat-semi.c +++ b/semihosting/arm-compat-semi.c @@ -214,7 +214,7 @@ static target_ulong syscall_err; static inline uint32_t get_swi_errno(CPUState *cs) { #ifdef CONFIG_USER_ONLY - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); return ts->swi_errno; #else @@ -226,7 +226,7 @@ static void common_semi_cb(CPUState *cs, uint64_t ret, int err) { if (err) { #ifdef CONFIG_USER_ONLY - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); ts->swi_errno = err; #else syscall_err = err; @@ -586,7 +586,7 @@ void do_common_semihosting(CPUState *cs) #if !defined(CONFIG_USER_ONLY) const char *cmdline; #else - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); #endif GET_ARG(0); GET_ARG(1); @@ -664,7 +664,7 @@ void do_common_semihosting(CPUState *cs) target_ulong retvals[4]; int i; #ifdef CONFIG_USER_ONLY - TaskState *ts = cs->opaque; + TaskState *ts = get_task_state(cs); target_ulong limit; #else LayoutInfo info = common_semi_find_bases(cs); diff --git a/semihosting/console.c b/semihosting/console.c index 5d61e8207e2..60102bbab66 100644 --- a/semihosting/console.c +++ b/semihosting/console.c @@ -43,7 +43,7 @@ static SemihostingConsole console; static int console_can_read(void *opaque) { SemihostingConsole *c = opaque; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); return (int)fifo8_num_free(&c->fifo); } @@ -58,7 +58,7 @@ static void console_wake_up(gpointer data, gpointer user_data) static void console_read(void *opaque, const uint8_t *buf, int size) { SemihostingConsole *c = opaque; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); while (size-- && !fifo8_is_full(&c->fifo)) { fifo8_push(&c->fifo, *buf++); } @@ -70,7 +70,7 @@ bool qemu_semihosting_console_ready(void) { SemihostingConsole *c = &console; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); return !fifo8_is_empty(&c->fifo); } @@ -78,7 +78,7 @@ void qemu_semihosting_console_block_until_ready(CPUState *cs) { SemihostingConsole *c = &console; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* Block if the fifo is completely empty. */ if (fifo8_is_empty(&c->fifo)) { diff --git a/semihosting/uaccess.c b/semihosting/uaccess.c index 5d889f92638..dc587d73bc4 100644 --- a/semihosting/uaccess.c +++ b/semihosting/uaccess.c @@ -26,7 +26,7 @@ void *uaccess_lock_user(CPUArchState *env, target_ulong addr, ssize_t uaccess_strlen_user(CPUArchState *env, target_ulong addr) { - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = cpu_mmu_index(env_cpu(env), false); size_t len = 0; while (1) { diff --git a/storage-daemon/meson.build b/storage-daemon/meson.build index 5e90cd32b40..46267b63e72 100644 --- a/storage-daemon/meson.build +++ b/storage-daemon/meson.build @@ -5,7 +5,7 @@ qsd_ss.add(blockdev, chardev, qmp, qom, qemuutil, gnutls) subdir('qapi') if have_tools - qsd_ss = qsd_ss.apply(config_targetos, strict: false) + qsd_ss = qsd_ss.apply({}) qsd = executable('qemu-storage-daemon', qsd_ss.sources(), dependencies: qsd_ss.dependencies(), diff --git a/stubs/colo.c b/stubs/colo.c index 08c9f982d58..f8c069b7394 100644 --- a/stubs/colo.c +++ b/stubs/colo.c @@ -2,7 +2,6 @@ #include "qemu/notify.h" #include "net/colo-compare.h" #include "migration/colo.h" -#include "migration/migration.h" #include "qemu/error-report.h" #include "qapi/qapi-commands-migration.h" diff --git a/stubs/icount.c b/stubs/icount.c index 6df8c2bf7d4..9f9a59f55b9 100644 --- a/stubs/icount.c +++ b/stubs/icount.c @@ -4,37 +4,20 @@ /* icount - Instruction Counter API */ -int use_icount; +ICountMode use_icount = ICOUNT_DISABLED; -void icount_update(CPUState *cpu) -{ - abort(); -} -void icount_configure(QemuOpts *opts, Error **errp) +bool icount_configure(QemuOpts *opts, Error **errp) { /* signal error */ error_setg(errp, "cannot configure icount, TCG support not available"); + + return false; } int64_t icount_get_raw(void) { abort(); return 0; } -int64_t icount_get(void) -{ - abort(); - return 0; -} -int64_t icount_to_ns(int64_t icount) -{ - abort(); - return 0; -} -int64_t icount_round(int64_t count) -{ - abort(); - return 0; -} void icount_start_warp_timer(void) { abort(); @@ -43,7 +26,7 @@ void icount_account_warp_timer(void) { abort(); } - void icount_notify_exit(void) { + abort(); } diff --git a/stubs/iothread-lock.c b/stubs/iothread-lock.c index 5b45b7fc8b9..d7890e5581c 100644 --- a/stubs/iothread-lock.c +++ b/stubs/iothread-lock.c @@ -1,15 +1,15 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" -bool qemu_mutex_iothread_locked(void) +bool bql_locked(void) { return false; } -void qemu_mutex_lock_iothread_impl(const char *file, int line) +void bql_lock_impl(const char *file, int line) { } -void qemu_mutex_unlock_iothread(void) +void bql_unlock(void) { } diff --git a/stubs/xen-hw-stub.c b/stubs/xen-hw-stub.c index 7d7ffe83a93..6cf0e9a4c1d 100644 --- a/stubs/xen-hw-stub.c +++ b/stubs/xen-hw-stub.c @@ -24,10 +24,6 @@ int xen_set_pci_link_route(uint8_t link, uint8_t irq) return -1; } -void xen_hvm_inject_msi(uint64_t addr, uint32_t data) -{ -} - int xen_is_pirq_msi(uint32_t msi_data) { return 0; diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c index 6684057370d..a879149fefa 100644 --- a/subprojects/libvhost-user/libvhost-user.c +++ b/subprojects/libvhost-user/libvhost-user.c @@ -43,6 +43,8 @@ #include #include #include +#include +#include #ifdef __NR_userfaultfd #include @@ -195,30 +197,58 @@ vu_panic(VuDev *dev, const char *msg, ...) */ } +/* Search for a memory region that covers this guest physical address. */ +static VuDevRegion * +vu_gpa_to_mem_region(VuDev *dev, uint64_t guest_addr) +{ + int low = 0; + int high = dev->nregions - 1; + + /* + * Memory regions cannot overlap in guest physical address space. Each + * GPA belongs to exactly one memory region, so there can only be one + * match. + * + * We store our memory regions ordered by GPA and can simply perform a + * binary search. + */ + while (low <= high) { + unsigned int mid = low + (high - low) / 2; + VuDevRegion *cur = &dev->regions[mid]; + + if (guest_addr >= cur->gpa && guest_addr < cur->gpa + cur->size) { + return cur; + } + if (guest_addr >= cur->gpa + cur->size) { + low = mid + 1; + } + if (guest_addr < cur->gpa) { + high = mid - 1; + } + } + return NULL; +} + /* Translate guest physical address to our virtual address. */ void * vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr) { - unsigned int i; + VuDevRegion *r; if (*plen == 0) { return NULL; } - /* Find matching memory region. */ - for (i = 0; i < dev->nregions; i++) { - VuDevRegion *r = &dev->regions[i]; - - if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) { - if ((guest_addr + *plen) > (r->gpa + r->size)) { - *plen = r->gpa + r->size - guest_addr; - } - return (void *)(uintptr_t) - guest_addr - r->gpa + r->mmap_addr + r->mmap_offset; - } + r = vu_gpa_to_mem_region(dev, guest_addr); + if (!r) { + return NULL; } - return NULL; + if ((guest_addr + *plen) > (r->gpa + r->size)) { + *plen = r->gpa + r->size - guest_addr; + } + return (void *)(uintptr_t)guest_addr - r->gpa + r->mmap_addr + + r->mmap_offset; } /* Translate qemu virtual address to our virtual address. */ @@ -240,6 +270,221 @@ qva_to_va(VuDev *dev, uint64_t qemu_addr) return NULL; } +static void +vu_remove_all_mem_regs(VuDev *dev) +{ + unsigned int i; + + for (i = 0; i < dev->nregions; i++) { + VuDevRegion *r = &dev->regions[i]; + + munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); + } + dev->nregions = 0; +} + +static bool +map_ring(VuDev *dev, VuVirtq *vq) +{ + vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); + vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); + vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); + + DPRINT("Setting virtq addresses:\n"); + DPRINT(" vring_desc at %p\n", vq->vring.desc); + DPRINT(" vring_used at %p\n", vq->vring.used); + DPRINT(" vring_avail at %p\n", vq->vring.avail); + + return !(vq->vring.desc && vq->vring.used && vq->vring.avail); +} + +static bool +vu_is_vq_usable(VuDev *dev, VuVirtq *vq) +{ + if (unlikely(dev->broken)) { + return false; + } + + if (likely(vq->vring.avail)) { + return true; + } + + /* + * In corner cases, we might temporarily remove a memory region that + * mapped a ring. When removing a memory region we make sure to + * unmap any rings that would be impacted. Let's try to remap if we + * already succeeded mapping this ring once. + */ + if (!vq->vra.desc_user_addr || !vq->vra.used_user_addr || + !vq->vra.avail_user_addr) { + return false; + } + if (map_ring(dev, vq)) { + vu_panic(dev, "remapping queue on access"); + return false; + } + return true; +} + +static void +unmap_rings(VuDev *dev, VuDevRegion *r) +{ + int i; + + for (i = 0; i < dev->max_queues; i++) { + VuVirtq *vq = &dev->vq[i]; + const uintptr_t desc = (uintptr_t)vq->vring.desc; + const uintptr_t used = (uintptr_t)vq->vring.used; + const uintptr_t avail = (uintptr_t)vq->vring.avail; + + if (desc < r->mmap_addr || desc >= r->mmap_addr + r->size) { + continue; + } + if (used < r->mmap_addr || used >= r->mmap_addr + r->size) { + continue; + } + if (avail < r->mmap_addr || avail >= r->mmap_addr + r->size) { + continue; + } + + DPRINT("Unmapping rings of queue %d\n", i); + vq->vring.desc = NULL; + vq->vring.used = NULL; + vq->vring.avail = NULL; + } +} + +static size_t +get_fd_hugepagesize(int fd) +{ +#if defined(__linux__) + struct statfs fs; + int ret; + + do { + ret = fstatfs(fd, &fs); + } while (ret != 0 && errno == EINTR); + + if (!ret && (unsigned int)fs.f_type == HUGETLBFS_MAGIC) { + return fs.f_bsize; + } +#endif + return 0; +} + +static void +_vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd) +{ + const uint64_t start_gpa = msg_region->guest_phys_addr; + const uint64_t end_gpa = start_gpa + msg_region->memory_size; + int prot = PROT_READ | PROT_WRITE; + uint64_t mmap_offset, fd_offset; + size_t hugepagesize; + VuDevRegion *r; + void *mmap_addr; + int low = 0; + int high = dev->nregions - 1; + unsigned int idx; + + DPRINT("Adding region %d\n", dev->nregions); + DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", + msg_region->guest_phys_addr); + DPRINT(" memory_size: 0x%016"PRIx64"\n", + msg_region->memory_size); + DPRINT(" userspace_addr: 0x%016"PRIx64"\n", + msg_region->userspace_addr); + DPRINT(" old mmap_offset: 0x%016"PRIx64"\n", + msg_region->mmap_offset); + + if (dev->postcopy_listening) { + /* + * In postcopy we're using PROT_NONE here to catch anyone + * accessing it before we userfault + */ + prot = PROT_NONE; + } + + /* + * We will add memory regions into the array sorted by GPA. Perform a + * binary search to locate the insertion point: it will be at the low + * index. + */ + while (low <= high) { + unsigned int mid = low + (high - low) / 2; + VuDevRegion *cur = &dev->regions[mid]; + + /* Overlap of GPA addresses. */ + if (start_gpa < cur->gpa + cur->size && cur->gpa < end_gpa) { + vu_panic(dev, "regions with overlapping guest physical addresses"); + return; + } + if (start_gpa >= cur->gpa + cur->size) { + low = mid + 1; + } + if (start_gpa < cur->gpa) { + high = mid - 1; + } + } + idx = low; + + /* + * Convert most of msg_region->mmap_offset to fd_offset. In almost all + * cases, this will leave us with mmap_offset == 0, mmap()'ing only + * what we really need. Only if a memory region would partially cover + * hugetlb pages, we'd get mmap_offset != 0, which usually doesn't happen + * anymore (i.e., modern QEMU). + * + * Note that mmap() with hugetlb would fail if the offset into the file + * is not aligned to the huge page size. + */ + hugepagesize = get_fd_hugepagesize(fd); + if (hugepagesize) { + fd_offset = ALIGN_DOWN(msg_region->mmap_offset, hugepagesize); + mmap_offset = msg_region->mmap_offset - fd_offset; + } else { + fd_offset = msg_region->mmap_offset; + mmap_offset = 0; + } + + DPRINT(" fd_offset: 0x%016"PRIx64"\n", + fd_offset); + DPRINT(" new mmap_offset: 0x%016"PRIx64"\n", + mmap_offset); + + mmap_addr = mmap(0, msg_region->memory_size + mmap_offset, + prot, MAP_SHARED | MAP_NORESERVE, fd, fd_offset); + if (mmap_addr == MAP_FAILED) { + vu_panic(dev, "region mmap error: %s", strerror(errno)); + return; + } + DPRINT(" mmap_addr: 0x%016"PRIx64"\n", + (uint64_t)(uintptr_t)mmap_addr); + +#if defined(__linux__) + /* Don't include all guest memory in a coredump. */ + madvise(mmap_addr, msg_region->memory_size + mmap_offset, + MADV_DONTDUMP); +#endif + + /* Shift all affected entries by 1 to open a hole at idx. */ + r = &dev->regions[idx]; + memmove(r + 1, r, sizeof(VuDevRegion) * (dev->nregions - idx)); + r->gpa = msg_region->guest_phys_addr; + r->size = msg_region->memory_size; + r->qva = msg_region->userspace_addr; + r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; + r->mmap_offset = mmap_offset; + dev->nregions++; + + if (dev->postcopy_listening) { + /* + * Return the address to QEMU so that it can translate the ufd + * fault addresses back. + */ + msg_region->userspace_addr = r->mmap_addr + r->mmap_offset; + } +} + static void vmsg_close_fds(VhostUserMsg *vmsg) { @@ -612,21 +857,6 @@ vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg) return false; } -static bool -map_ring(VuDev *dev, VuVirtq *vq) -{ - vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); - vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); - vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); - - DPRINT("Setting virtq addresses:\n"); - DPRINT(" vring_desc at %p\n", vq->vring.desc); - DPRINT(" vring_used at %p\n", vq->vring.used); - DPRINT(" vring_avail at %p\n", vq->vring.avail); - - return !(vq->vring.desc && vq->vring.used && vq->vring.avail); -} - static bool generate_faults(VuDev *dev) { unsigned int i; @@ -684,7 +914,7 @@ generate_faults(VuDev *dev) { dev->postcopy_ufd, strerror(errno)); return false; } - if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { + if (!(reg_struct.ioctls & (1ULL << _UFFDIO_COPY))) { vu_panic(dev, "%s Region (%d) doesn't support COPY", __func__, i); return false; @@ -710,11 +940,7 @@ generate_faults(VuDev *dev) { static bool vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { - int i; - bool track_ramblocks = dev->postcopy_listening; VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; - VuDevRegion *dev_region = &dev->regions[dev->nregions]; - void *mmap_addr; if (vmsg->fd_num != 1) { vmsg_close_fds(vmsg); @@ -744,84 +970,24 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { * we know all the postcopy client bases have been received, and we * should start generating faults. */ - if (track_ramblocks && + if (dev->postcopy_listening && vmsg->size == sizeof(vmsg->payload.u64) && vmsg->payload.u64 == 0) { (void)generate_faults(dev); return false; } - DPRINT("Adding region: %u\n", dev->nregions); - DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", - msg_region->guest_phys_addr); - DPRINT(" memory_size: 0x%016"PRIx64"\n", - msg_region->memory_size); - DPRINT(" userspace_addr 0x%016"PRIx64"\n", - msg_region->userspace_addr); - DPRINT(" mmap_offset 0x%016"PRIx64"\n", - msg_region->mmap_offset); - - dev_region->gpa = msg_region->guest_phys_addr; - dev_region->size = msg_region->memory_size; - dev_region->qva = msg_region->userspace_addr; - dev_region->mmap_offset = msg_region->mmap_offset; - - /* - * We don't use offset argument of mmap() since the - * mapped address has to be page aligned, and we use huge - * pages. - */ - if (track_ramblocks) { - /* - * In postcopy we're using PROT_NONE here to catch anyone - * accessing it before we userfault. - */ - mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, - PROT_NONE, MAP_SHARED | MAP_NORESERVE, - vmsg->fds[0], 0); - } else { - mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, - PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, - vmsg->fds[0], 0); - } - - if (mmap_addr == MAP_FAILED) { - vu_panic(dev, "region mmap error: %s", strerror(errno)); - } else { - dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; - DPRINT(" mmap_addr: 0x%016"PRIx64"\n", - dev_region->mmap_addr); - } - + _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]); close(vmsg->fds[0]); - if (track_ramblocks) { - /* - * Return the address to QEMU so that it can translate the ufd - * fault addresses back. - */ - msg_region->userspace_addr = (uintptr_t)(mmap_addr + - dev_region->mmap_offset); - + if (dev->postcopy_listening) { /* Send the message back to qemu with the addresses filled in. */ vmsg->fd_num = 0; DPRINT("Successfully added new region in postcopy\n"); - dev->nregions++; return true; - } else { - for (i = 0; i < dev->max_queues; i++) { - if (dev->vq[i].vring.desc) { - if (map_ring(dev, &dev->vq[i])) { - vu_panic(dev, "remapping queue %d for new memory region", - i); - } - } - } - - DPRINT("Successfully added new region\n"); - dev->nregions++; - return false; } + DPRINT("Successfully added new region\n"); + return false; } static inline bool reg_equal(VuDevRegion *vudev_reg, @@ -839,8 +1005,8 @@ static inline bool reg_equal(VuDevRegion *vudev_reg, static bool vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; - unsigned int i; - bool found = false; + unsigned int idx; + VuDevRegion *r; if (vmsg->fd_num > 1) { vmsg_close_fds(vmsg); @@ -867,35 +1033,31 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { DPRINT(" mmap_offset 0x%016"PRIx64"\n", msg_region->mmap_offset); - for (i = 0; i < dev->nregions; i++) { - if (reg_equal(&dev->regions[i], msg_region)) { - VuDevRegion *r = &dev->regions[i]; - void *ma = (void *) (uintptr_t) r->mmap_addr; - - if (ma) { - munmap(ma, r->size + r->mmap_offset); - } - - /* - * Shift all affected entries by 1 to close the hole at index i and - * zero out the last entry. - */ - memmove(dev->regions + i, dev->regions + i + 1, - sizeof(VuDevRegion) * (dev->nregions - i - 1)); - memset(dev->regions + dev->nregions - 1, 0, sizeof(VuDevRegion)); - DPRINT("Successfully removed a region\n"); - dev->nregions--; - i--; + r = vu_gpa_to_mem_region(dev, msg_region->guest_phys_addr); + if (!r || !reg_equal(r, msg_region)) { + vmsg_close_fds(vmsg); + vu_panic(dev, "Specified region not found\n"); + return false; + } - found = true; + /* + * There might be valid cases where we temporarily remove memory regions + * to readd them again, or remove memory regions and don't use the rings + * anymore before we set the ring addresses and restart the device. + * + * Unmap all affected rings, remapping them on demand later. This should + * be a corner case. + */ + unmap_rings(dev, r); - /* Continue the search for eventual duplicates. */ - } - } + munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); - if (!found) { - vu_panic(dev, "Specified region not found\n"); - } + idx = r - dev->regions; + assert(idx < dev->nregions); + /* Shift all affected entries by 1 to close the hole. */ + memmove(r, r + 1, sizeof(VuDevRegion) * (dev->nregions - idx - 1)); + DPRINT("Successfully removed a region\n"); + dev->nregions--; vmsg_close_fds(vmsg); @@ -920,140 +1082,42 @@ vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg) return true; } -static bool -vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg) -{ - unsigned int i; - VhostUserMemory m = vmsg->payload.memory, *memory = &m; - dev->nregions = memory->nregions; - - DPRINT("Nregions: %u\n", memory->nregions); - for (i = 0; i < dev->nregions; i++) { - void *mmap_addr; - VhostUserMemoryRegion *msg_region = &memory->regions[i]; - VuDevRegion *dev_region = &dev->regions[i]; - - DPRINT("Region %d\n", i); - DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", - msg_region->guest_phys_addr); - DPRINT(" memory_size: 0x%016"PRIx64"\n", - msg_region->memory_size); - DPRINT(" userspace_addr 0x%016"PRIx64"\n", - msg_region->userspace_addr); - DPRINT(" mmap_offset 0x%016"PRIx64"\n", - msg_region->mmap_offset); - - dev_region->gpa = msg_region->guest_phys_addr; - dev_region->size = msg_region->memory_size; - dev_region->qva = msg_region->userspace_addr; - dev_region->mmap_offset = msg_region->mmap_offset; - - /* We don't use offset argument of mmap() since the - * mapped address has to be page aligned, and we use huge - * pages. - * In postcopy we're using PROT_NONE here to catch anyone - * accessing it before we userfault - */ - mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, - PROT_NONE, MAP_SHARED | MAP_NORESERVE, - vmsg->fds[i], 0); - - if (mmap_addr == MAP_FAILED) { - vu_panic(dev, "region mmap error: %s", strerror(errno)); - } else { - dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; - DPRINT(" mmap_addr: 0x%016"PRIx64"\n", - dev_region->mmap_addr); - } - - /* Return the address to QEMU so that it can translate the ufd - * fault addresses back. - */ - msg_region->userspace_addr = (uintptr_t)(mmap_addr + - dev_region->mmap_offset); - close(vmsg->fds[i]); - } - - /* Send the message back to qemu with the addresses filled in */ - vmsg->fd_num = 0; - if (!vu_send_reply(dev, dev->sock, vmsg)) { - vu_panic(dev, "failed to respond to set-mem-table for postcopy"); - return false; - } - - /* Wait for QEMU to confirm that it's registered the handler for the - * faults. - */ - if (!dev->read_msg(dev, dev->sock, vmsg) || - vmsg->size != sizeof(vmsg->payload.u64) || - vmsg->payload.u64 != 0) { - vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table"); - return false; - } - - /* OK, now we can go and register the memory and generate faults */ - (void)generate_faults(dev); - - return false; -} - static bool vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) { - unsigned int i; VhostUserMemory m = vmsg->payload.memory, *memory = &m; + unsigned int i; - for (i = 0; i < dev->nregions; i++) { - VuDevRegion *r = &dev->regions[i]; - void *ma = (void *) (uintptr_t) r->mmap_addr; + vu_remove_all_mem_regs(dev); - if (ma) { - munmap(ma, r->size + r->mmap_offset); - } + DPRINT("Nregions: %u\n", memory->nregions); + for (i = 0; i < memory->nregions; i++) { + _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]); + close(vmsg->fds[i]); } - dev->nregions = memory->nregions; if (dev->postcopy_listening) { - return vu_set_mem_table_exec_postcopy(dev, vmsg); - } - - DPRINT("Nregions: %u\n", memory->nregions); - for (i = 0; i < dev->nregions; i++) { - void *mmap_addr; - VhostUserMemoryRegion *msg_region = &memory->regions[i]; - VuDevRegion *dev_region = &dev->regions[i]; + /* Send the message back to qemu with the addresses filled in */ + vmsg->fd_num = 0; + if (!vu_send_reply(dev, dev->sock, vmsg)) { + vu_panic(dev, "failed to respond to set-mem-table for postcopy"); + return false; + } - DPRINT("Region %d\n", i); - DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", - msg_region->guest_phys_addr); - DPRINT(" memory_size: 0x%016"PRIx64"\n", - msg_region->memory_size); - DPRINT(" userspace_addr 0x%016"PRIx64"\n", - msg_region->userspace_addr); - DPRINT(" mmap_offset 0x%016"PRIx64"\n", - msg_region->mmap_offset); - - dev_region->gpa = msg_region->guest_phys_addr; - dev_region->size = msg_region->memory_size; - dev_region->qva = msg_region->userspace_addr; - dev_region->mmap_offset = msg_region->mmap_offset; - - /* We don't use offset argument of mmap() since the - * mapped address has to be page aligned, and we use huge - * pages. */ - mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, - PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, - vmsg->fds[i], 0); - - if (mmap_addr == MAP_FAILED) { - vu_panic(dev, "region mmap error: %s", strerror(errno)); - } else { - dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; - DPRINT(" mmap_addr: 0x%016"PRIx64"\n", - dev_region->mmap_addr); + /* + * Wait for QEMU to confirm that it's registered the handler for the + * faults. + */ + if (!dev->read_msg(dev, dev->sock, vmsg) || + vmsg->size != sizeof(vmsg->payload.u64) || + vmsg->payload.u64 != 0) { + vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table"); + return false; } - close(vmsg->fds[i]); + /* OK, now we can go and register the memory and generate faults */ + (void)generate_faults(dev); + return false; } for (i = 0; i < dev->max_queues; i++) { @@ -2112,14 +2176,7 @@ vu_deinit(VuDev *dev) { unsigned int i; - for (i = 0; i < dev->nregions; i++) { - VuDevRegion *r = &dev->regions[i]; - void *m = (void *) (uintptr_t) r->mmap_addr; - if (m != MAP_FAILED) { - munmap(m, r->size + r->mmap_offset); - } - } - dev->nregions = 0; + vu_remove_all_mem_regs(dev); for (i = 0; i < dev->max_queues; i++) { VuVirtq *vq = &dev->vq[i]; @@ -2171,6 +2228,8 @@ vu_deinit(VuDev *dev) free(dev->vq); dev->vq = NULL; + free(dev->regions); + dev->regions = NULL; } bool @@ -2205,9 +2264,17 @@ vu_init(VuDev *dev, dev->backend_fd = -1; dev->max_queues = max_queues; + dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0])); + if (!dev->regions) { + DPRINT("%s: failed to malloc mem regions\n", __func__); + return false; + } + dev->vq = malloc(max_queues * sizeof(dev->vq[0])); if (!dev->vq) { DPRINT("%s: failed to malloc virtqueues\n", __func__); + free(dev->regions); + dev->regions = NULL; return false; } @@ -2374,8 +2441,7 @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, idx = vq->last_avail_idx; total_bufs = in_total = out_total = 0; - if (unlikely(dev->broken) || - unlikely(!vq->vring.avail)) { + if (!vu_is_vq_usable(dev, vq)) { goto done; } @@ -2490,8 +2556,7 @@ vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, bool vu_queue_empty(VuDev *dev, VuVirtq *vq) { - if (unlikely(dev->broken) || - unlikely(!vq->vring.avail)) { + if (!vu_is_vq_usable(dev, vq)) { return true; } @@ -2530,8 +2595,7 @@ vring_notify(VuDev *dev, VuVirtq *vq) static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) { - if (unlikely(dev->broken) || - unlikely(!vq->vring.avail)) { + if (!vu_is_vq_usable(dev, vq)) { return; } @@ -2856,8 +2920,7 @@ vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz) unsigned int head; VuVirtqElement *elem; - if (unlikely(dev->broken) || - unlikely(!vq->vring.avail)) { + if (!vu_is_vq_usable(dev, vq)) { return NULL; } @@ -3014,8 +3077,7 @@ vu_queue_fill(VuDev *dev, VuVirtq *vq, { struct vring_used_elem uelem; - if (unlikely(dev->broken) || - unlikely(!vq->vring.avail)) { + if (!vu_is_vq_usable(dev, vq)) { return; } @@ -3044,8 +3106,7 @@ vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count) { uint16_t old, new; - if (unlikely(dev->broken) || - unlikely(!vq->vring.avail)) { + if (!vu_is_vq_usable(dev, vq)) { return; } diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h index c2352904f07..deb40e77b3f 100644 --- a/subprojects/libvhost-user/libvhost-user.h +++ b/subprojects/libvhost-user/libvhost-user.h @@ -31,10 +31,12 @@ #define VHOST_MEMORY_BASELINE_NREGIONS 8 /* - * Set a reasonable maximum number of ram slots, which will be supported by - * any architecture. + * vhost in the kernel usually supports 509 mem slots. 509 used to be the + * KVM limit, it supported 512, but 3 were used for internal purposes. This + * limit is sufficient to support many DIMMs and virtio-mem in + * "dynamic-memslots" mode. */ -#define VHOST_USER_MAX_RAM_SLOTS 32 +#define VHOST_USER_MAX_RAM_SLOTS 509 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64) @@ -398,7 +400,7 @@ typedef struct VuDevInflightInfo { struct VuDev { int sock; uint32_t nregions; - VuDevRegion regions[VHOST_USER_MAX_RAM_SLOTS]; + VuDevRegion *regions; VuVirtq *vq; VuDevInflightInfo inflight_info; int log_call_fd; diff --git a/system/bootdevice.c b/system/bootdevice.c index 2106f1026ff..2579b26dc8b 100644 --- a/system/bootdevice.c +++ b/system/bootdevice.c @@ -101,20 +101,23 @@ void validate_bootdevices(const char *devices, Error **errp) void restore_boot_order(void *opaque) { char *normal_boot_order = opaque; - static int first = 1; + static int bootcount; - /* Restore boot order and remove ourselves after the first boot */ - if (first) { - first = 0; + switch (bootcount++) { + case 0: + /* First boot: use the one-time config */ + return; + case 1: + /* Second boot: restore normal boot order */ + if (boot_set_handler) { + qemu_boot_set(normal_boot_order, &error_abort); + } + g_free(normal_boot_order); + return; + default: + /* Subsequent boots: keep using normal boot order */ return; } - - if (boot_set_handler) { - qemu_boot_set(normal_boot_order, &error_abort); - } - - qemu_unregister_reset(restore_boot_order, normal_boot_order); - g_free(normal_boot_order); } void check_boot_index(int32_t bootindex, Error **errp) diff --git a/system/cpu-throttle.c b/system/cpu-throttle.c index d9bb30a223d..c951a6c65e1 100644 --- a/system/cpu-throttle.c +++ b/system/cpu-throttle.c @@ -54,12 +54,12 @@ static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns; while (sleeptime_ns > 0 && !cpu->stop) { if (sleeptime_ns > SCALE_MS) { - qemu_cond_timedwait_iothread(cpu->halt_cond, + qemu_cond_timedwait_bql(cpu->halt_cond, sleeptime_ns / SCALE_MS); } else { - qemu_mutex_unlock_iothread(); + bql_unlock(); g_usleep(sleeptime_ns / SCALE_US); - qemu_mutex_lock_iothread(); + bql_lock(); } sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } diff --git a/system/cpu-timers.c b/system/cpu-timers.c index 7452d97b673..0b31c9a1b6a 100644 --- a/system/cpu-timers.c +++ b/system/cpu-timers.c @@ -154,7 +154,7 @@ static bool adjust_timers_state_needed(void *opaque) static bool icount_shift_state_needed(void *opaque) { - return icount_enabled() == 2; + return icount_enabled() == ICOUNT_ADAPTATIVE; } /* @@ -165,7 +165,7 @@ static const VMStateDescription icount_vmstate_warp_timer = { .version_id = 1, .minimum_version_id = 1, .needed = warp_timer_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(vm_clock_warp_start, TimersState), VMSTATE_TIMER_PTR(icount_warp_timer, TimersState), VMSTATE_END_OF_LIST() @@ -177,7 +177,7 @@ static const VMStateDescription icount_vmstate_adjust_timers = { .version_id = 1, .minimum_version_id = 1, .needed = adjust_timers_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(icount_rt_timer, TimersState), VMSTATE_TIMER_PTR(icount_vm_timer, TimersState), VMSTATE_END_OF_LIST() @@ -189,7 +189,7 @@ static const VMStateDescription icount_vmstate_shift = { .version_id = 2, .minimum_version_id = 2, .needed = icount_shift_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT16(icount_time_shift, TimersState), VMSTATE_INT64(last_delta, TimersState), VMSTATE_END_OF_LIST() @@ -204,12 +204,12 @@ static const VMStateDescription icount_vmstate_timers = { .version_id = 1, .minimum_version_id = 1, .needed = icount_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(qemu_icount_bias, TimersState), VMSTATE_INT64(qemu_icount, TimersState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &icount_vmstate_warp_timer, &icount_vmstate_adjust_timers, &icount_vmstate_shift, @@ -221,13 +221,13 @@ static const VMStateDescription vmstate_timers = { .name = "timer", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(cpu_ticks_offset, TimersState), VMSTATE_UNUSED(8), VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &icount_vmstate_timers, NULL } diff --git a/system/cpus.c b/system/cpus.c index a444a747f01..68d161d96b7 100644 --- a/system/cpus.c +++ b/system/cpus.c @@ -65,7 +65,8 @@ #endif /* CONFIG_LINUX */ -static QemuMutex qemu_global_mutex; +/* The Big QEMU Lock (BQL) */ +static QemuMutex bql; /* * The chosen accelerator is supposed to register this. @@ -259,14 +260,33 @@ void cpu_interrupt(CPUState *cpu, int mask) } } +/* + * True if the vm was previously suspended, and has not been woken or reset. + */ +static int vm_was_suspended; + +void vm_set_suspended(bool suspended) +{ + vm_was_suspended = suspended; +} + +bool vm_get_suspended(void) +{ + return vm_was_suspended; +} + static int do_vm_stop(RunState state, bool send_stop) { int ret = 0; + RunState oldstate = runstate_get(); - if (runstate_is_running()) { + if (runstate_is_live(oldstate)) { + vm_was_suspended = (oldstate == RUN_STATE_SUSPENDED); runstate_set(state); cpu_disable_ticks(); - pause_all_vcpus(); + if (oldstate == RUN_STATE_RUNNING) { + pause_all_vcpus(); + } vm_state_notify(0, state); if (send_stop) { qapi_event_send_stop(); @@ -389,14 +409,14 @@ void qemu_init_cpu_loop(void) qemu_init_sigbus(); qemu_cond_init(&qemu_cpu_cond); qemu_cond_init(&qemu_pause_cond); - qemu_mutex_init(&qemu_global_mutex); + qemu_mutex_init(&bql); qemu_thread_get_self(&io_thread); } void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) { - do_run_on_cpu(cpu, func, data, &qemu_global_mutex); + do_run_on_cpu(cpu, func, data, &bql); } static void qemu_cpu_stop(CPUState *cpu, bool exit) @@ -428,7 +448,7 @@ void qemu_wait_io_event(CPUState *cpu) slept = true; qemu_plugin_vcpu_idle_cb(cpu); } - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); + qemu_cond_wait(cpu->halt_cond, &bql); } if (slept) { qemu_plugin_vcpu_resume_cb(cpu); @@ -481,46 +501,46 @@ bool qemu_in_vcpu_thread(void) return current_cpu && qemu_cpu_is_self(current_cpu); } -QEMU_DEFINE_STATIC_CO_TLS(bool, iothread_locked) +QEMU_DEFINE_STATIC_CO_TLS(bool, bql_locked) -bool qemu_mutex_iothread_locked(void) +bool bql_locked(void) { - return get_iothread_locked(); + return get_bql_locked(); } bool qemu_in_main_thread(void) { - return qemu_mutex_iothread_locked(); + return bql_locked(); } /* * The BQL is taken from so many places that it is worth profiling the * callers directly, instead of funneling them all through a single function. */ -void qemu_mutex_lock_iothread_impl(const char *file, int line) +void bql_lock_impl(const char *file, int line) { - QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func); + QemuMutexLockFunc bql_lock_fn = qatomic_read(&bql_mutex_lock_func); - g_assert(!qemu_mutex_iothread_locked()); - bql_lock(&qemu_global_mutex, file, line); - set_iothread_locked(true); + g_assert(!bql_locked()); + bql_lock_fn(&bql, file, line); + set_bql_locked(true); } -void qemu_mutex_unlock_iothread(void) +void bql_unlock(void) { - g_assert(qemu_mutex_iothread_locked()); - set_iothread_locked(false); - qemu_mutex_unlock(&qemu_global_mutex); + g_assert(bql_locked()); + set_bql_locked(false); + qemu_mutex_unlock(&bql); } -void qemu_cond_wait_iothread(QemuCond *cond) +void qemu_cond_wait_bql(QemuCond *cond) { - qemu_cond_wait(cond, &qemu_global_mutex); + qemu_cond_wait(cond, &bql); } -void qemu_cond_timedwait_iothread(QemuCond *cond, int ms) +void qemu_cond_timedwait_bql(QemuCond *cond, int ms) { - qemu_cond_timedwait(cond, &qemu_global_mutex, ms); + qemu_cond_timedwait(cond, &bql, ms); } /* signal CPU creation */ @@ -571,15 +591,15 @@ void pause_all_vcpus(void) replay_mutex_unlock(); while (!all_vcpus_paused()) { - qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); + qemu_cond_wait(&qemu_pause_cond, &bql); CPU_FOREACH(cpu) { qemu_cpu_kick(cpu); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); } void cpu_resume(CPUState *cpu) @@ -608,9 +628,9 @@ void cpu_remove_sync(CPUState *cpu) cpu->stop = true; cpu->unplug = true; qemu_cpu_kick(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_thread_join(cpu->thread); - qemu_mutex_lock_iothread(); + bql_lock(); } void cpus_register_accel(const AccelOpsClass *ops) @@ -649,7 +669,7 @@ void qemu_init_vcpu(CPUState *cpu) cpus_accel->create_vcpu_thread(cpu); while (!cpu->created) { - qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); + qemu_cond_wait(&qemu_cpu_cond, &bql); } } @@ -679,11 +699,13 @@ int vm_stop(RunState state) /** * Prepare for (re)starting the VM. - * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already - * running or in case of an error condition), 0 otherwise. + * Returns 0 if the vCPUs should be restarted, -1 on an error condition, + * and 1 otherwise. */ int vm_prepare_start(bool step_pending) { + int ret = vm_was_suspended ? 1 : 0; + RunState state = vm_was_suspended ? RUN_STATE_SUSPENDED : RUN_STATE_RUNNING; RunState requested; qemu_vmstop_requested(&requested); @@ -714,9 +736,10 @@ int vm_prepare_start(bool step_pending) qapi_event_send_resume(); cpu_enable_ticks(); - runstate_set(RUN_STATE_RUNNING); - vm_state_notify(1, RUN_STATE_RUNNING); - return 0; + runstate_set(state); + vm_state_notify(1, state); + vm_was_suspended = false; + return ret; } void vm_start(void) @@ -726,11 +749,20 @@ void vm_start(void) } } +void vm_resume(RunState state) +{ + if (runstate_is_live(state)) { + vm_start(); + } else { + runstate_set(state); + } +} + /* does a state transition even if the VM is already stopped, current state is forgotten forever */ int vm_stop_force_state(RunState state) { - if (runstate_is_running()) { + if (runstate_is_live(runstate_get())) { return vm_stop(state); } else { int ret; diff --git a/system/dirtylimit.c b/system/dirtylimit.c index 495c7a7082f..ab20da34bb9 100644 --- a/system/dirtylimit.c +++ b/system/dirtylimit.c @@ -25,8 +25,6 @@ #include "sysemu/kvm.h" #include "trace.h" #include "migration/misc.h" -#include "migration/migration.h" -#include "migration/options.h" /* * Dirtylimit stop working if dirty page rate error @@ -78,14 +76,13 @@ static bool dirtylimit_quit; static void vcpu_dirty_rate_stat_collect(void) { - MigrationState *s = migrate_get_current(); VcpuStat stat; int i = 0; int64_t period = DIRTYLIMIT_CALC_TIME_MS; if (migrate_dirty_limit() && - migration_is_active(s)) { - period = s->parameters.x_vcpu_dirty_limit_period; + migration_is_active()) { + period = migrate_vcpu_dirty_limit_period(); } /* calculate vcpu dirtyrate */ @@ -148,9 +145,9 @@ void vcpu_dirty_rate_stat_stop(void) { qatomic_set(&vcpu_dirty_rate_stat->running, 0); dirtylimit_state_unlock(); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_thread_join(&vcpu_dirty_rate_stat->thread); - qemu_mutex_lock_iothread(); + bql_lock(); dirtylimit_state_lock(); } @@ -450,10 +447,8 @@ static void dirtylimit_cleanup(void) */ static bool dirtylimit_is_allowed(void) { - MigrationState *ms = migrate_get_current(); - - if (migration_is_running(ms->state) && - (!qemu_thread_is_self(&ms->thread)) && + if (migration_is_running() && + !migration_thread_is_self() && migrate_dirty_limit() && dirtylimit_in_service()) { return false; diff --git a/system/dma-helpers.c b/system/dma-helpers.c index 36211acc7ea..9b221cf94e2 100644 --- a/system/dma-helpers.c +++ b/system/dma-helpers.c @@ -119,13 +119,15 @@ static void dma_blk_cb(void *opaque, int ret) trace_dma_blk_cb(dbs, ret); - aio_context_acquire(ctx); + /* DMAAIOCB is not thread-safe and must be accessed only from dbs->ctx */ + assert(ctx == qemu_get_current_aio_context()); + dbs->acb = NULL; dbs->offset += dbs->iov.size; if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { dma_complete(dbs, ret); - goto out; + return; } dma_blk_unmap(dbs); @@ -168,7 +170,7 @@ static void dma_blk_cb(void *opaque, int ret) trace_dma_map_wait(dbs); dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs); cpu_register_map_client(dbs->bh); - goto out; + return; } if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) { @@ -179,8 +181,6 @@ static void dma_blk_cb(void *opaque, int ret) dbs->acb = dbs->io_func(dbs->offset, &dbs->iov, dma_blk_cb, dbs, dbs->io_func_opaque); assert(dbs->acb); -out: - aio_context_release(ctx); } static void dma_aio_cancel(BlockAIOCB *acb) diff --git a/system/globals.c b/system/globals.c index e83b5428d12..e3535842010 100644 --- a/system/globals.c +++ b/system/globals.c @@ -36,14 +36,10 @@ int display_opengl; const char* keyboard_layout; bool enable_mlock; bool enable_cpu_pm; -int nb_nics; -NICInfo nd_table[MAX_NICS]; int autostart = 1; int vga_interface_type = VGA_NONE; bool vga_interface_created; Chardev *parallel_hds[MAX_PARALLEL_PORTS]; -int win2k_install_hack; -int fd_bootchk = 1; int graphic_rotate; QEMUOptionRom option_rom[MAX_OPTION_ROMS]; int nb_option_roms; diff --git a/system/ioport.c b/system/ioport.c index 1824aa808c7..fd551d0375e 100644 --- a/system/ioport.c +++ b/system/ioport.c @@ -133,6 +133,7 @@ void portio_list_init(PortioList *piolist, piolist->nr = 0; piolist->regions = g_new0(MemoryRegion *, n); piolist->address_space = NULL; + piolist->addr = 0; piolist->opaque = opaque; piolist->owner = owner; piolist->name = name; @@ -181,13 +182,13 @@ static uint64_t portio_read(void *opaque, hwaddr addr, unsigned size) data = ((uint64_t)1 << (size * 8)) - 1; if (mrp) { - data = mrp->read(mrpio->portio_opaque, mrp->base + addr); + data = mrp->read(mrpio->portio_opaque, mrpio->mr.addr + addr); } else if (size == 2) { mrp = find_portio(mrpio, addr, 1, false); if (mrp) { - data = mrp->read(mrpio->portio_opaque, mrp->base + addr); + data = mrp->read(mrpio->portio_opaque, mrpio->mr.addr + addr); if (addr + 1 < mrp->offset + mrp->len) { - data |= mrp->read(mrpio->portio_opaque, mrp->base + addr + 1) << 8; + data |= mrp->read(mrpio->portio_opaque, mrpio->mr.addr + addr + 1) << 8; } else { data |= 0xff00; } @@ -203,13 +204,13 @@ static void portio_write(void *opaque, hwaddr addr, uint64_t data, const MemoryRegionPortio *mrp = find_portio(mrpio, addr, size, true); if (mrp) { - mrp->write(mrpio->portio_opaque, mrp->base + addr, data); + mrp->write(mrpio->portio_opaque, mrpio->mr.addr + addr, data); } else if (size == 2) { mrp = find_portio(mrpio, addr, 1, true); if (mrp) { - mrp->write(mrpio->portio_opaque, mrp->base + addr, data & 0xff); + mrp->write(mrpio->portio_opaque, mrpio->mr.addr + addr, data & 0xff); if (addr + 1 < mrp->offset + mrp->len) { - mrp->write(mrpio->portio_opaque, mrp->base + addr + 1, data >> 8); + mrp->write(mrpio->portio_opaque, mrpio->mr.addr + addr + 1, data >> 8); } } } @@ -244,7 +245,6 @@ static void portio_list_add_1(PortioList *piolist, /* Adjust the offsets to all be zero-based for the region. */ for (i = 0; i < count; ++i) { mrpio->ports[i].offset -= off_low; - mrpio->ports[i].base = start + off_low; } /* @@ -283,6 +283,7 @@ void portio_list_add(PortioList *piolist, unsigned int off_low, off_high, off_last, count; piolist->address_space = address_space; + piolist->addr = start; /* Handle the first entry specially. */ off_last = off_low = pio_start->offset; @@ -323,6 +324,32 @@ void portio_list_del(PortioList *piolist) } } +void portio_list_set_enabled(PortioList *piolist, bool enabled) +{ + unsigned i; + + for (i = 0; i < piolist->nr; ++i) { + memory_region_set_enabled(piolist->regions[i], enabled); + } +} + +void portio_list_set_address(PortioList *piolist, uint32_t addr) +{ + MemoryRegionPortioList *mrpio; + unsigned i, j; + + for (i = 0; i < piolist->nr; ++i) { + mrpio = container_of(piolist->regions[i], MemoryRegionPortioList, mr); + memory_region_set_address(&mrpio->mr, + mrpio->mr.addr - piolist->addr + addr); + for (j = 0; mrpio->ports[j].size; ++j) { + mrpio->ports[j].offset += addr - piolist->addr; + } + } + + piolist->addr = addr; +} + static void memory_region_portio_list_finalize(Object *obj) { MemoryRegionPortioList *mrpio = MEMORY_REGION_PORTIO_LIST(obj); diff --git a/system/memory.c b/system/memory.c index 798b6c0a171..a229a79988f 100644 --- a/system/memory.c +++ b/system/memory.c @@ -1119,7 +1119,7 @@ void memory_region_transaction_commit(void) AddressSpace *as; assert(memory_region_transaction_depth); - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); --memory_region_transaction_depth; if (!memory_region_transaction_depth) { @@ -1546,16 +1546,17 @@ void memory_region_init_io(MemoryRegion *mr, mr->terminates = true; } -void memory_region_init_ram_nomigrate(MemoryRegion *mr, +bool memory_region_init_ram_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, Error **errp) { - memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp); + return memory_region_init_ram_flags_nomigrate(mr, owner, name, + size, 0, errp); } -void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, +bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1572,10 +1573,12 @@ void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } -void memory_region_init_resizeable_ram(MemoryRegion *mr, +bool memory_region_init_resizeable_ram(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1596,11 +1599,13 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } #ifdef CONFIG_POSIX -void memory_region_init_ram_from_file(MemoryRegion *mr, +bool memory_region_init_ram_from_file(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1623,10 +1628,12 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } -void memory_region_init_ram_from_fd(MemoryRegion *mr, +bool memory_region_init_ram_from_fd(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1647,7 +1654,9 @@ void memory_region_init_ram_from_fd(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } #endif @@ -1698,17 +1707,22 @@ void memory_region_init_alias(MemoryRegion *mr, mr->alias_offset = offset; } -void memory_region_init_rom_nomigrate(MemoryRegion *mr, +bool memory_region_init_rom_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, Error **errp) { - memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp); + if (!memory_region_init_ram_flags_nomigrate(mr, owner, name, + size, 0, errp)) { + return false; + } mr->readonly = true; + + return true; } -void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, +bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr, Object *owner, const MemoryRegionOps *ops, void *opaque, @@ -1729,7 +1743,9 @@ void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } void memory_region_init_iommu(void *_iommu_mr, @@ -3562,19 +3578,16 @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled) } } -void memory_region_init_ram(MemoryRegion *mr, +bool memory_region_init_ram(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, Error **errp) { DeviceState *owner_dev; - Error *err = NULL; - memory_region_init_ram_nomigrate(mr, owner, name, size, &err); - if (err) { - error_propagate(errp, err); - return; + if (!memory_region_init_ram_nomigrate(mr, owner, name, size, errp)) { + return false; } /* This will assert if owner is neither NULL nor a DeviceState. * We only want the owner here for the purposes of defining a @@ -3584,21 +3597,20 @@ void memory_region_init_ram(MemoryRegion *mr, */ owner_dev = DEVICE(owner); vmstate_register_ram(mr, owner_dev); + + return true; } -void memory_region_init_rom(MemoryRegion *mr, +bool memory_region_init_rom(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, Error **errp) { DeviceState *owner_dev; - Error *err = NULL; - memory_region_init_rom_nomigrate(mr, owner, name, size, &err); - if (err) { - error_propagate(errp, err); - return; + if (!memory_region_init_rom_nomigrate(mr, owner, name, size, errp)) { + return false; } /* This will assert if owner is neither NULL nor a DeviceState. * We only want the owner here for the purposes of defining a @@ -3608,9 +3620,11 @@ void memory_region_init_rom(MemoryRegion *mr, */ owner_dev = DEVICE(owner); vmstate_register_ram(mr, owner_dev); + + return true; } -void memory_region_init_rom_device(MemoryRegion *mr, +bool memory_region_init_rom_device(MemoryRegion *mr, Object *owner, const MemoryRegionOps *ops, void *opaque, @@ -3619,13 +3633,10 @@ void memory_region_init_rom_device(MemoryRegion *mr, Error **errp) { DeviceState *owner_dev; - Error *err = NULL; - memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, - name, size, &err); - if (err) { - error_propagate(errp, err); - return; + if (!memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, + name, size, errp)) { + return false; } /* This will assert if owner is neither NULL nor a DeviceState. * We only want the owner here for the purposes of defining a @@ -3635,6 +3646,8 @@ void memory_region_init_rom_device(MemoryRegion *mr, */ owner_dev = DEVICE(owner); vmstate_register_ram(mr, owner_dev); + + return true; } /* diff --git a/memory_ldst.c.inc b/system/memory_ldst.c.inc similarity index 97% rename from memory_ldst.c.inc rename to system/memory_ldst.c.inc index 84b868f2946..0e6f3940a9a 100644 --- a/memory_ldst.c.inc +++ b/system/memory_ldst.c.inc @@ -61,7 +61,7 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -130,7 +130,7 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -186,7 +186,7 @@ uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -234,7 +234,7 @@ static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -295,7 +295,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -339,7 +339,7 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -391,7 +391,7 @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -435,7 +435,7 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -499,7 +499,7 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } diff --git a/system/meson.build b/system/meson.build index 3a64dd89de1..25e21172505 100644 --- a/system/meson.build +++ b/system/meson.build @@ -33,4 +33,6 @@ endif system_ss.add(when: seccomp, if_true: files('qemu-seccomp.c')) system_ss.add(when: fdt, if_true: files('device_tree.c')) -system_ss.add(when: 'CONFIG_LINUX', if_true: files('async-teardown.c')) +if host_os == 'linux' + system_ss.add(files('async-teardown.c')) +endif diff --git a/system/physmem.c b/system/physmem.c index a63853a7bc9..a4fe3d2bf89 100644 --- a/system/physmem.c +++ b/system/physmem.c @@ -35,7 +35,7 @@ #include "hw/qdev-core.h" #include "hw/qdev-properties.h" #include "hw/boards.h" -#include "hw/xen/xen.h" +#include "sysemu/xen.h" #include "sysemu/kvm.h" #include "sysemu/tcg.h" #include "sysemu/qtest.h" @@ -799,7 +799,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr) abort(); found: - /* It is safe to write mru_block outside the iothread lock. This + /* It is safe to write mru_block outside the BQL. This * is what happens: * * mru_block = xxx @@ -819,7 +819,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr) return block; } -static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) +void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) { CPUState *cpu; ram_addr_t start1; @@ -881,8 +881,8 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); } - if (dirty && tcg_enabled()) { - tlb_reset_dirty_range_all(start, length); + if (dirty) { + cpu_physical_memory_dirty_bits_cleared(start, length); } return dirty; @@ -929,9 +929,7 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty } } - if (tcg_enabled()) { - tlb_reset_dirty_range_all(start, length); - } + cpu_physical_memory_dirty_bits_cleared(start, length); memory_region_clear_dirty_bitmap(mr, offset, length); @@ -1597,7 +1595,7 @@ int qemu_ram_get_fd(RAMBlock *rb) return rb->fd; } -/* Called with iothread lock held. */ +/* Called with the BQL held. */ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) { RAMBlock *block; @@ -1625,7 +1623,7 @@ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) } } -/* Called with iothread lock held. */ +/* Called with the BQL held. */ void qemu_ram_unset_idstr(RAMBlock *block) { /* FIXME: arch_init.c assumes that this is not called throughout @@ -1680,7 +1678,8 @@ int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) assert(block); - newsize = HOST_PAGE_ALIGN(newsize); + newsize = TARGET_PAGE_ALIGN(newsize); + newsize = REAL_HOST_PAGE_ALIGN(newsize); if (block->used_length == newsize) { /* @@ -1916,7 +1915,9 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, return NULL; } - size = HOST_PAGE_ALIGN(size); + size = TARGET_PAGE_ALIGN(size); + size = REAL_HOST_PAGE_ALIGN(size); + file_size = get_file_size(fd); if (file_size > offset && file_size < (offset + size)) { error_setg(errp, "backing store size 0x%" PRIx64 @@ -2014,13 +2015,17 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, { RAMBlock *new_block; Error *local_err = NULL; + int align; assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC | RAM_NORESERVE)) == 0); assert(!host ^ (ram_flags & RAM_PREALLOC)); - size = HOST_PAGE_ALIGN(size); - max_size = HOST_PAGE_ALIGN(max_size); + align = qemu_real_host_page_size(); + align = MAX(align, TARGET_PAGE_SIZE); + size = ROUND_UP(size, align); + max_size = ROUND_UP(max_size, align); + new_block = g_malloc0(sizeof(*new_block)); new_block->mr = mr; new_block->resized = resized; @@ -2154,10 +2159,8 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) * * Called within RCU critical section. */ -void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) +void *qemu_map_ram_ptr(RAMBlock *block, ram_addr_t addr) { - RAMBlock *block = ram_block; - if (block == NULL) { block = qemu_get_ram_block(addr); addr -= block->offset; @@ -2182,10 +2185,9 @@ void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) * * Called within RCU critical section. */ -static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr, +static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr, hwaddr *size, bool lock) { - RAMBlock *block = ram_block; if (*size == 0) { return NULL; } @@ -2639,8 +2641,8 @@ bool prepare_mmio_access(MemoryRegion *mr) { bool release_lock = false; - if (!qemu_mutex_iothread_locked()) { - qemu_mutex_lock_iothread(); + if (!bql_locked()) { + bql_lock(); release_lock = true; } if (mr->flush_coalesced_mmio) { @@ -2677,53 +2679,69 @@ static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs, return false; } +static MemTxResult flatview_write_continue_step(MemTxAttrs attrs, + const uint8_t *buf, + hwaddr len, hwaddr mr_addr, + hwaddr *l, MemoryRegion *mr) +{ + if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { + return MEMTX_ACCESS_ERROR; + } + + if (!memory_access_is_direct(mr, true)) { + uint64_t val; + MemTxResult result; + bool release_lock = prepare_mmio_access(mr); + + *l = memory_access_size(mr, *l, mr_addr); + /* + * XXX: could force current_cpu to NULL to avoid + * potential bugs + */ + + /* + * Assure Coverity (and ourselves) that we are not going to OVERRUN + * the buffer by following ldn_he_p(). + */ +#ifdef QEMU_STATIC_ANALYSIS + assert((*l == 1 && len >= 1) || + (*l == 2 && len >= 2) || + (*l == 4 && len >= 4) || + (*l == 8 && len >= 8)); +#endif + val = ldn_he_p(buf, *l); + result = memory_region_dispatch_write(mr, mr_addr, val, + size_memop(*l), attrs); + if (release_lock) { + bql_unlock(); + } + + return result; + } else { + /* RAM case */ + uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, + false); + + memmove(ram_ptr, buf, *l); + invalidate_and_set_dirty(mr, mr_addr, *l); + + return MEMTX_OK; + } +} + /* Called within RCU critical section. */ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, MemTxAttrs attrs, const void *ptr, - hwaddr len, hwaddr addr1, + hwaddr len, hwaddr mr_addr, hwaddr l, MemoryRegion *mr) { - uint8_t *ram_ptr; - uint64_t val; MemTxResult result = MEMTX_OK; - bool release_lock = false; const uint8_t *buf = ptr; for (;;) { - if (!flatview_access_allowed(mr, attrs, addr1, l)) { - result |= MEMTX_ACCESS_ERROR; - /* Keep going. */ - } else if (!memory_access_is_direct(mr, true)) { - release_lock |= prepare_mmio_access(mr); - l = memory_access_size(mr, l, addr1); - /* XXX: could force current_cpu to NULL to avoid - potential bugs */ - - /* - * Assure Coverity (and ourselves) that we are not going to OVERRUN - * the buffer by following ldn_he_p(). - */ -#ifdef QEMU_STATIC_ANALYSIS - assert((l == 1 && len >= 1) || - (l == 2 && len >= 2) || - (l == 4 && len >= 4) || - (l == 8 && len >= 8)); -#endif - val = ldn_he_p(buf, l); - result |= memory_region_dispatch_write(mr, addr1, val, - size_memop(l), attrs); - } else { - /* RAM case */ - ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); - memmove(ram_ptr, buf, l); - invalidate_and_set_dirty(mr, addr1, l); - } - - if (release_lock) { - qemu_mutex_unlock_iothread(); - release_lock = false; - } + result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, + mr); len -= l; buf += l; @@ -2734,7 +2752,7 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, } l = len; - mr = flatview_translate(fv, addr, &addr1, &l, true, attrs); + mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); } return result; @@ -2745,63 +2763,76 @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, const void *buf, hwaddr len) { hwaddr l; - hwaddr addr1; + hwaddr mr_addr; MemoryRegion *mr; l = len; - mr = flatview_translate(fv, addr, &addr1, &l, true, attrs); + mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); if (!flatview_access_allowed(mr, attrs, addr, len)) { return MEMTX_ACCESS_ERROR; } return flatview_write_continue(fv, addr, attrs, buf, len, - addr1, l, mr); + mr_addr, l, mr); +} + +static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf, + hwaddr len, hwaddr mr_addr, + hwaddr *l, + MemoryRegion *mr) +{ + if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { + return MEMTX_ACCESS_ERROR; + } + + if (!memory_access_is_direct(mr, false)) { + /* I/O case */ + uint64_t val; + MemTxResult result; + bool release_lock = prepare_mmio_access(mr); + + *l = memory_access_size(mr, *l, mr_addr); + result = memory_region_dispatch_read(mr, mr_addr, &val, size_memop(*l), + attrs); + + /* + * Assure Coverity (and ourselves) that we are not going to OVERRUN + * the buffer by following stn_he_p(). + */ +#ifdef QEMU_STATIC_ANALYSIS + assert((*l == 1 && len >= 1) || + (*l == 2 && len >= 2) || + (*l == 4 && len >= 4) || + (*l == 8 && len >= 8)); +#endif + stn_he_p(buf, *l, val); + + if (release_lock) { + bql_unlock(); + } + return result; + } else { + /* RAM case */ + uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, + false); + + memcpy(buf, ram_ptr, *l); + + return MEMTX_OK; + } } /* Called within RCU critical section. */ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, MemTxAttrs attrs, void *ptr, - hwaddr len, hwaddr addr1, hwaddr l, + hwaddr len, hwaddr mr_addr, hwaddr l, MemoryRegion *mr) { - uint8_t *ram_ptr; - uint64_t val; MemTxResult result = MEMTX_OK; - bool release_lock = false; uint8_t *buf = ptr; fuzz_dma_read_cb(addr, len, mr); for (;;) { - if (!flatview_access_allowed(mr, attrs, addr1, l)) { - result |= MEMTX_ACCESS_ERROR; - /* Keep going. */ - } else if (!memory_access_is_direct(mr, false)) { - /* I/O case */ - release_lock |= prepare_mmio_access(mr); - l = memory_access_size(mr, l, addr1); - result |= memory_region_dispatch_read(mr, addr1, &val, - size_memop(l), attrs); - - /* - * Assure Coverity (and ourselves) that we are not going to OVERRUN - * the buffer by following stn_he_p(). - */ -#ifdef QEMU_STATIC_ANALYSIS - assert((l == 1 && len >= 1) || - (l == 2 && len >= 2) || - (l == 4 && len >= 4) || - (l == 8 && len >= 8)); -#endif - stn_he_p(buf, l, val); - } else { - /* RAM case */ - ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); - memcpy(buf, ram_ptr, l); - } - - if (release_lock) { - qemu_mutex_unlock_iothread(); - release_lock = false; - } + result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); len -= l; buf += l; @@ -2812,7 +2843,7 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, } l = len; - mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); + mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); } return result; @@ -2823,16 +2854,16 @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr, MemTxAttrs attrs, void *buf, hwaddr len) { hwaddr l; - hwaddr addr1; + hwaddr mr_addr; MemoryRegion *mr; l = len; - mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); + mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); if (!flatview_access_allowed(mr, attrs, addr, len)) { return MEMTX_ACCESS_ERROR; } return flatview_read_continue(fv, addr, attrs, buf, len, - addr1, l, mr); + mr_addr, l, mr); } MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, @@ -3337,6 +3368,59 @@ static inline MemoryRegion *address_space_translate_cached( return section.mr; } +/* Called within RCU critical section. */ +static MemTxResult address_space_write_continue_cached(MemTxAttrs attrs, + const void *ptr, + hwaddr len, + hwaddr mr_addr, + hwaddr l, + MemoryRegion *mr) +{ + MemTxResult result = MEMTX_OK; + const uint8_t *buf = ptr; + + for (;;) { + result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, + mr); + + len -= l; + buf += l; + mr_addr += l; + + if (!len) { + break; + } + + l = len; + } + + return result; +} + +/* Called within RCU critical section. */ +static MemTxResult address_space_read_continue_cached(MemTxAttrs attrs, + void *ptr, hwaddr len, + hwaddr mr_addr, hwaddr l, + MemoryRegion *mr) +{ + MemTxResult result = MEMTX_OK; + uint8_t *buf = ptr; + + for (;;) { + result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); + len -= l; + buf += l; + mr_addr += l; + + if (!len) { + break; + } + l = len; + } + + return result; +} + /* Called from RCU critical section. address_space_read_cached uses this * out of line function when the target is an MMIO or IOMMU region. */ @@ -3344,15 +3428,14 @@ MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, void *buf, hwaddr len) { - hwaddr addr1, l; + hwaddr mr_addr, l; MemoryRegion *mr; l = len; - mr = address_space_translate_cached(cache, addr, &addr1, &l, false, + mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false, MEMTXATTRS_UNSPECIFIED); - return flatview_read_continue(cache->fv, - addr, MEMTXATTRS_UNSPECIFIED, buf, len, - addr1, l, mr); + return address_space_read_continue_cached(MEMTXATTRS_UNSPECIFIED, + buf, len, mr_addr, l, mr); } /* Called from RCU critical section. address_space_write_cached uses this @@ -3362,15 +3445,14 @@ MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, const void *buf, hwaddr len) { - hwaddr addr1, l; + hwaddr mr_addr, l; MemoryRegion *mr; l = len; - mr = address_space_translate_cached(cache, addr, &addr1, &l, true, + mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true, MEMTXATTRS_UNSPECIFIED); - return flatview_write_continue(cache->fv, - addr, MEMTXATTRS_UNSPECIFIED, buf, len, - addr1, l, mr); + return address_space_write_continue_cached(MEMTXATTRS_UNSPECIFIED, + buf, len, mr_addr, l, mr); } #define ARG1_DECL MemoryRegionCache *cache @@ -3431,11 +3513,6 @@ size_t qemu_target_page_size(void) return TARGET_PAGE_SIZE; } -int qemu_target_page_mask(void) -{ - return TARGET_PAGE_MASK; -} - int qemu_target_page_bits(void) { return TARGET_PAGE_BITS; @@ -3500,16 +3577,15 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) uint8_t *host_startaddr = rb->host + start; if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { - error_report("ram_block_discard_range: Unaligned start address: %p", - host_startaddr); + error_report("%s: Unaligned start address: %p", + __func__, host_startaddr); goto err; } if ((start + length) <= rb->max_length) { bool need_madvise, need_fallocate; if (!QEMU_IS_ALIGNED(length, rb->page_size)) { - error_report("ram_block_discard_range: Unaligned length: %zx", - length); + error_report("%s: Unaligned length: %zx", __func__, length); goto err; } @@ -3520,7 +3596,7 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) * fallocate works on hugepages and shmem * shared anonymous memory requires madvise REMOVE */ - need_madvise = (rb->page_size == qemu_host_page_size); + need_madvise = (rb->page_size == qemu_real_host_page_size()); need_fallocate = rb->fd != -1; if (need_fallocate) { /* For a file, this causes the area of the file to be zero'd @@ -3533,8 +3609,8 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) * proper error message. */ if (rb->flags & RAM_READONLY_FD) { - error_report("ram_block_discard_range: Discarding RAM" - " with readonly files is not supported"); + error_report("%s: Discarding RAM with readonly files is not" + " supported", __func__); goto err; } @@ -3549,27 +3625,26 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) * file. */ if (!qemu_ram_is_shared(rb)) { - warn_report_once("ram_block_discard_range: Discarding RAM" + warn_report_once("%s: Discarding RAM" " in private file mappings is possibly" " dangerous, because it will modify the" " underlying file and will affect other" - " users of the file"); + " users of the file", __func__); } ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, start, length); if (ret) { ret = -errno; - error_report("ram_block_discard_range: Failed to fallocate " - "%s:%" PRIx64 " +%zx (%d)", - rb->idstr, start, length, ret); + error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", + __func__, rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; - error_report("ram_block_discard_range: fallocate not available/file" + error_report("%s: fallocate not available/file" "%s:%" PRIx64 " +%zx (%d)", - rb->idstr, start, length, ret); + __func__, rb->idstr, start, length, ret); goto err; #endif } @@ -3587,25 +3662,23 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) } if (ret) { ret = -errno; - error_report("ram_block_discard_range: Failed to discard range " + error_report("%s: Failed to discard range " "%s:%" PRIx64 " +%zx (%d)", - rb->idstr, start, length, ret); + __func__, rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; - error_report("ram_block_discard_range: MADVISE not available" - "%s:%" PRIx64 " +%zx (%d)", - rb->idstr, start, length, ret); + error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)", + __func__, rb->idstr, start, length, ret); goto err; #endif } trace_ram_block_discard_range(rb->idstr, host_startaddr, length, need_madvise, need_fallocate, ret); } else { - error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 - "/%zx/" RAM_ADDR_FMT")", - rb->idstr, start, length, rb->max_length); + error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", + __func__, rb->idstr, start, length, rb->max_length); } err: diff --git a/system/qdev-monitor.c b/system/qdev-monitor.c index a13db763e5d..840177d19f0 100644 --- a/system/qdev-monitor.c +++ b/system/qdev-monitor.c @@ -38,7 +38,6 @@ #include "qemu/option_int.h" #include "sysemu/block-backend.h" #include "migration/misc.h" -#include "migration/migration.h" #include "qemu/cutils.h" #include "hw/qdev-properties.h" #include "hw/clock.h" @@ -744,7 +743,6 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) } #define qdev_printf(fmt, ...) monitor_printf(mon, "%*s" fmt, indent, "", ## __VA_ARGS__) -static void qbus_print(Monitor *mon, BusState *bus, int indent); static void qdev_print_props(Monitor *mon, DeviceState *dev, Property *props, int indent) @@ -784,13 +782,9 @@ static void bus_print_dev(BusState *bus, Monitor *mon, DeviceState *dev, int ind static void qdev_print(Monitor *mon, DeviceState *dev, int indent) { ObjectClass *class; - BusState *child; NamedGPIOList *ngl; NamedClockList *ncl; - qdev_printf("dev: %s, id \"%s\"\n", object_get_typename(OBJECT(dev)), - dev->id ? dev->id : ""); - indent += 2; QLIST_FOREACH(ngl, &dev->gpios, node) { if (ngl->num_in) { qdev_printf("gpio-in \"%s\" %d\n", ngl->name ? ngl->name : "", @@ -814,12 +808,9 @@ static void qdev_print(Monitor *mon, DeviceState *dev, int indent) class = object_class_get_parent(class); } while (class != object_class_by_name(TYPE_DEVICE)); bus_print_dev(dev->parent_bus, mon, dev, indent); - QLIST_FOREACH(child, &dev->child_bus, sibling) { - qbus_print(mon, child, indent); - } } -static void qbus_print(Monitor *mon, BusState *bus, int indent) +static void qbus_print(Monitor *mon, BusState *bus, int indent, bool details) { BusChild *kid; @@ -827,16 +818,27 @@ static void qbus_print(Monitor *mon, BusState *bus, int indent) indent += 2; qdev_printf("type %s\n", object_get_typename(OBJECT(bus))); QTAILQ_FOREACH(kid, &bus->children, sibling) { + BusState *child_bus; DeviceState *dev = kid->child; - qdev_print(mon, dev, indent); + qdev_printf("dev: %s, id \"%s\"\n", object_get_typename(OBJECT(dev)), + dev->id ? dev->id : ""); + if (details) { + qdev_print(mon, dev, indent + 2); + } + QLIST_FOREACH(child_bus, &dev->child_bus, sibling) { + qbus_print(mon, child_bus, indent + 2, details); + } } } #undef qdev_printf void hmp_info_qtree(Monitor *mon, const QDict *qdict) { - if (sysbus_get_default()) - qbus_print(mon, sysbus_get_default(), 0); + bool details = !qdict_get_try_bool(qdict, "brief", false); + + if (sysbus_get_default()) { + qbus_print(mon, sysbus_get_default(), 0, details); + } } void hmp_info_qdm(Monitor *mon, const QDict *qdict) @@ -858,19 +860,18 @@ void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp) return; } dev = qdev_device_add(opts, errp); - - /* - * Drain all pending RCU callbacks. This is done because - * some bus related operations can delay a device removal - * (in this case this can happen if device is added and then - * removed due to a configuration error) - * to a RCU callback, but user might expect that this interface - * will finish its job completely once qmp command returns result - * to the user - */ - drain_call_rcu(); - if (!dev) { + /* + * Drain all pending RCU callbacks. This is done because + * some bus related operations can delay a device removal + * (in this case this can happen if device is added and then + * removed due to a configuration error) + * to a RCU callback, but user might expect that this interface + * will finish its job completely once qmp command returns result + * to the user + */ + drain_call_rcu(); + qemu_opts_del(opts); return; } @@ -890,7 +891,7 @@ static DeviceState *find_device_state(const char *id, Error **errp) dev = (DeviceState *)object_dynamic_cast(obj, TYPE_DEVICE); if (!dev) { - error_setg(errp, "%s is not a hotpluggable device", id); + error_setg(errp, "%s is not a device", id); return NULL; } diff --git a/system/qemu-seccomp.c b/system/qemu-seccomp.c index 4d7439e7f71..98ffce075c3 100644 --- a/system/qemu-seccomp.c +++ b/system/qemu-seccomp.c @@ -74,7 +74,7 @@ const struct scmp_arg_cmp sched_setscheduler_arg[] = { #define RULE_CLONE_FLAG(flag) \ { SCMP_SYS(clone), QEMU_SECCOMP_SET_SPAWN, \ - ARRAY_SIZE(clone_arg ## flag), clone_arg ## flag, SCMP_ACT_TRAP } + ARRAY_SIZE(clone_arg ## flag), clone_arg ## flag, SCMP_ACT_ERRNO(EPERM) } /* If no CLONE_* flags are set, except CSIGNAL, deny */ const struct scmp_arg_cmp clone_arg_none[] = { @@ -214,13 +214,13 @@ static const struct QemuSeccompSyscall denylist[] = { 0, NULL, SCMP_ACT_TRAP }, /* spawn */ { SCMP_SYS(fork), QEMU_SECCOMP_SET_SPAWN, - 0, NULL, SCMP_ACT_TRAP }, + 0, NULL, SCMP_ACT_ERRNO(EPERM) }, { SCMP_SYS(vfork), QEMU_SECCOMP_SET_SPAWN, - 0, NULL, SCMP_ACT_TRAP }, + 0, NULL, SCMP_ACT_ERRNO(EPERM) }, { SCMP_SYS(execve), QEMU_SECCOMP_SET_SPAWN, - 0, NULL, SCMP_ACT_TRAP }, + 0, NULL, SCMP_ACT_ERRNO(EPERM) }, { SCMP_SYS(clone), QEMU_SECCOMP_SET_SPAWN, - ARRAY_SIZE(clone_arg_none), clone_arg_none, SCMP_ACT_TRAP }, + ARRAY_SIZE(clone_arg_none), clone_arg_none, SCMP_ACT_ERRNO(EPERM) }, RULE_CLONE_FLAG(CLONE_VM), RULE_CLONE_FLAG(CLONE_FS), RULE_CLONE_FLAG(CLONE_FILES), diff --git a/system/qtest.c b/system/qtest.c index 7964f0b2488..6da58b3874e 100644 --- a/system/qtest.c +++ b/system/qtest.c @@ -21,6 +21,7 @@ #include "exec/tswap.h" #include "hw/qdev-core.h" #include "hw/irq.h" +#include "hw/core/cpu.h" #include "qemu/accel.h" #include "sysemu/cpu-timers.h" #include "qemu/config-file.h" diff --git a/system/runstate.c b/system/runstate.c index ea9d6c2a32a..d6ab860ecaa 100644 --- a/system/runstate.c +++ b/system/runstate.c @@ -77,6 +77,7 @@ typedef struct { static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_PRELAUNCH, RUN_STATE_INMIGRATE }, + { RUN_STATE_PRELAUNCH, RUN_STATE_SUSPENDED }, { RUN_STATE_DEBUG, RUN_STATE_RUNNING }, { RUN_STATE_DEBUG, RUN_STATE_FINISH_MIGRATE }, @@ -108,6 +109,7 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_PAUSED, RUN_STATE_POSTMIGRATE }, { RUN_STATE_PAUSED, RUN_STATE_PRELAUNCH }, { RUN_STATE_PAUSED, RUN_STATE_COLO}, + { RUN_STATE_PAUSED, RUN_STATE_SUSPENDED}, { RUN_STATE_POSTMIGRATE, RUN_STATE_RUNNING }, { RUN_STATE_POSTMIGRATE, RUN_STATE_FINISH_MIGRATE }, @@ -131,6 +133,7 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_RESTORE_VM, RUN_STATE_RUNNING }, { RUN_STATE_RESTORE_VM, RUN_STATE_PRELAUNCH }, + { RUN_STATE_RESTORE_VM, RUN_STATE_SUSPENDED }, { RUN_STATE_COLO, RUN_STATE_RUNNING }, { RUN_STATE_COLO, RUN_STATE_PRELAUNCH }, @@ -149,6 +152,7 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_RUNNING, RUN_STATE_COLO}, { RUN_STATE_SAVE_VM, RUN_STATE_RUNNING }, + { RUN_STATE_SAVE_VM, RUN_STATE_SUSPENDED }, { RUN_STATE_SHUTDOWN, RUN_STATE_PAUSED }, { RUN_STATE_SHUTDOWN, RUN_STATE_FINISH_MIGRATE }, @@ -161,6 +165,10 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_SUSPENDED, RUN_STATE_FINISH_MIGRATE }, { RUN_STATE_SUSPENDED, RUN_STATE_PRELAUNCH }, { RUN_STATE_SUSPENDED, RUN_STATE_COLO}, + { RUN_STATE_SUSPENDED, RUN_STATE_PAUSED}, + { RUN_STATE_SUSPENDED, RUN_STATE_SAVE_VM }, + { RUN_STATE_SUSPENDED, RUN_STATE_RESTORE_VM }, + { RUN_STATE_SUSPENDED, RUN_STATE_SHUTDOWN }, { RUN_STATE_WATCHDOG, RUN_STATE_RUNNING }, { RUN_STATE_WATCHDOG, RUN_STATE_FINISH_MIGRATE }, @@ -234,15 +242,7 @@ bool runstate_needs_reset(void) StatusInfo *qmp_query_status(Error **errp) { StatusInfo *info = g_malloc0(sizeof(*info)); - AccelState *accel = current_accel(); - /* - * We ignore errors, which will happen if the accelerator - * is not TCG. "singlestep" is meaningless for other accelerators, - * so we will set the StatusInfo field to false for those. - */ - info->singlestep = object_property_get_bool(OBJECT(accel), - "one-insn-per-tb", NULL); info->running = runstate_is_running(); info->status = current_run_state; @@ -502,6 +502,7 @@ void qemu_system_reset(ShutdownCause reason) qapi_event_send_reset(shutdown_caused_by_guest(reason), reason); } cpu_synchronize_all_post_reset(); + vm_set_suspended(false); } /* @@ -810,7 +811,7 @@ void qemu_init_subsystems(void) qemu_init_cpu_list(); qemu_init_cpu_loop(); - qemu_mutex_lock_iothread(); + bql_lock(); atexit(qemu_run_exit_notifiers); diff --git a/system/vl.c b/system/vl.c index e18fa3ce465..c6442229824 100644 --- a/system/vl.c +++ b/system/vl.c @@ -96,7 +96,7 @@ #endif #include "sysemu/qtest.h" #ifdef CONFIG_TCG -#include "accel/tcg/perf.h" +#include "tcg/perf.h" #endif #include "disas/disas.h" @@ -181,7 +181,6 @@ static const char *log_file; static bool list_data_dirs; static const char *qtest_chrdev; static const char *qtest_log; -static bool opt_one_insn_per_tb; static int has_defaults = 1; static int default_audio = 1; @@ -1915,7 +1914,6 @@ static bool object_create_early(const char *type) * Allocation of large amounts of memory may delay * chardev initialization for too long, and trigger timeouts * on software that waits for a monitor socket to be created - * (e.g. libvirt). */ if (g_str_has_prefix(type, "memory-backend-")) { return false; @@ -1934,7 +1932,7 @@ static void qemu_apply_machine_options(QDict *qdict) } if (current_machine->smp.cpus > 1) { - replay_add_blocker("smp"); + replay_add_blocker("multiple CPUs"); } } @@ -2014,6 +2012,14 @@ static void qemu_create_late_backends(void) object_option_foreach_add(object_create_late); + /* + * Wait for any outstanding memory prealloc from created memory + * backends to complete. + */ + if (!qemu_finish_async_prealloc_mem(&error_fatal)) { + exit(1); + } + if (tpm_init() < 0) { exit(1); } @@ -2112,7 +2118,6 @@ static void qemu_create_machine(QDict *qdict) } cpu_exec_init_all(); - page_size_init(); if (machine_class->hw_version) { qemu_set_hw_version(machine_class->hw_version); @@ -2274,8 +2279,7 @@ static void user_register_global_props(void) static int do_configure_icount(void *opaque, QemuOpts *opts, Error **errp) { - icount_configure(opts, errp); - return 0; + return !icount_configure(opts, errp); } static int accelerator_set_property(void *opaque, @@ -2312,19 +2316,7 @@ static int do_configure_accelerator(void *opaque, QemuOpts *opts, Error **errp) qemu_opt_foreach(opts, accelerator_set_property, accel, &error_fatal); - /* - * If legacy -singlestep option is set, honour it for TCG and - * silently ignore for any other accelerator (which is how this - * option has always behaved). - */ - if (opt_one_insn_per_tb) { - /* - * This will always succeed for TCG, and we want to ignore - * the error from trying to set a nonexistent property - * on any other accelerator. - */ - object_property_set_bool(OBJECT(accel), "one-insn-per-tb", true, NULL); - } + ret = accel_init_machine(accel, current_machine); if (ret < 0) { if (!qtest_with_kvm || ret != -ENOENT) { @@ -2661,7 +2653,7 @@ static void qemu_create_cli_devices(void) rom_reset_order_override(); } -static void qemu_machine_creation_done(void) +static bool qemu_machine_creation_done(Error **errp) { MachineState *machine = MACHINE(qdev_get_machine()); @@ -2684,15 +2676,15 @@ static void qemu_machine_creation_done(void) qdev_machine_creation_done(); - if (machine->cgs) { - /* - * Verify that Confidential Guest Support has actually been initialized - */ - assert(machine->cgs->ready); + if (machine->cgs && !machine->cgs->ready) { + error_setg(errp, "accelerator does not support confidential guest %s", + object_get_typename(OBJECT(machine->cgs))); + exit(1); } if (foreach_device_config(DEV_GDB, gdbserver_start) < 0) { - exit(1); + error_setg(errp, "could not start gdbserver"); + return false; } if (!vga_interface_created && !default_vga && vga_interface_type != VGA_NONE) { @@ -2700,6 +2692,7 @@ static void qemu_machine_creation_done(void) "type does not use that option; " "No VGA device has been created"); } + return true; } void qmp_x_exit_preconfig(Error **errp) @@ -2711,10 +2704,14 @@ void qmp_x_exit_preconfig(Error **errp) qemu_init_board(); qemu_create_cli_devices(); - qemu_machine_creation_done(); + if (!qemu_machine_creation_done(errp)) { + return; + } if (loadvm) { + RunState state = autostart ? RUN_STATE_RUNNING : runstate_get(); load_snapshot(loadvm, NULL, false, NULL, &error_fatal); + load_snapshot_resume(state); } if (replay_mode != REPLAY_MODE_NONE) { replay_vmstate_init(); @@ -2782,6 +2779,8 @@ void qemu_init(int argc, char **argv) error_init(argv[0]); qemu_init_exec_dir(argv[0]); + os_setup_limits(); + qemu_init_arch_modules(); qemu_init_subsystems(); @@ -2930,7 +2929,7 @@ void qemu_init(int argc, char **argv) optarg, FD_OPTS); break; case QEMU_OPTION_no_fd_bootchk: - fd_bootchk = 0; + qdict_put_str(machine_opts_dict, "fd-bootchk", "off"); break; case QEMU_OPTION_netdev: default_net = 0; @@ -3059,9 +3058,6 @@ void qemu_init(int argc, char **argv) case QEMU_OPTION_bios: qdict_put_str(machine_opts_dict, "firmware", optarg); break; - case QEMU_OPTION_singlestep: - opt_one_insn_per_tb = true; - break; case QEMU_OPTION_S: autostart = 0; break; @@ -3271,7 +3267,7 @@ void qemu_init(int argc, char **argv) pid_file = optarg; break; case QEMU_OPTION_win2k_hack: - win2k_install_hack = 1; + object_register_sugar_prop("ide-device", "win2k-install-hack", "true", true); break; case QEMU_OPTION_acpitable: opts = qemu_opts_parse_noisily(qemu_find_opts("acpi"), @@ -3371,14 +3367,6 @@ void qemu_init(int argc, char **argv) display_remote++; break; #endif - case QEMU_OPTION_no_acpi: - warn_report("-no-acpi is deprecated, use '-machine acpi=off' instead"); - qdict_put_str(machine_opts_dict, "acpi", "off"); - break; - case QEMU_OPTION_no_hpet: - warn_report("-no-hpet is deprecated, use '-machine hpet=off' instead"); - qdict_put_str(machine_opts_dict, "hpet", "off"); - break; case QEMU_OPTION_no_reboot: olist = qemu_find_opts("action"); qemu_opts_parse_noisily(olist, "reboot=shutdown", false); @@ -3602,20 +3590,9 @@ void qemu_init(int argc, char **argv) exit(1); } break; - case QEMU_OPTION_chroot: - warn_report("option is deprecated," - " use '-run-with chroot=...' instead"); - os_set_chroot(optarg); - break; case QEMU_OPTION_daemonize: os_set_daemonize(true); break; -#if defined(CONFIG_LINUX) - /* deprecated */ - case QEMU_OPTION_asyncteardown: - init_async_teardown(); - break; -#endif case QEMU_OPTION_run_with: { const char *str; opts = qemu_opts_parse_noisily(qemu_find_opts("run-with"), @@ -3733,6 +3710,7 @@ void qemu_init(int argc, char **argv) * over memory-backend-file objects). */ qemu_create_late_backends(); + phase_advance(PHASE_LATE_BACKENDS_CREATED); /* * Note: creates a QOM object, must run only after global and @@ -3741,7 +3719,7 @@ void qemu_init(int argc, char **argv) migration_object_init(); /* parse features once if machine provides default cpu_type */ - current_machine->cpu_type = machine_class->default_cpu_type; + current_machine->cpu_type = machine_class_default_cpu_type(machine_class); if (cpu_option) { current_machine->cpu_type = parse_cpu_option(cpu_option); } diff --git a/system/watchpoint.c b/system/watchpoint.c index ba5ad13352c..2aa2a9ea63f 100644 --- a/system/watchpoint.c +++ b/system/watchpoint.c @@ -18,13 +18,8 @@ */ #include "qemu/osdep.h" -#include "qemu/main-loop.h" #include "qemu/error-report.h" #include "exec/exec-all.h" -#include "exec/translate-all.h" -#include "sysemu/tcg.h" -#include "sysemu/replay.h" -#include "hw/core/tcg-cpu-ops.h" #include "hw/core/cpu.h" /* Add a watchpoint. */ @@ -103,122 +98,3 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask) } } } - -#ifdef CONFIG_TCG - -/* - * Return true if this watchpoint address matches the specified - * access (ie the address range covered by the watchpoint overlaps - * partially or completely with the address range covered by the - * access). - */ -static inline bool watchpoint_address_matches(CPUWatchpoint *wp, - vaddr addr, vaddr len) -{ - /* - * We know the lengths are non-zero, but a little caution is - * required to avoid errors in the case where the range ends - * exactly at the top of the address space and so addr + len - * wraps round to zero. - */ - vaddr wpend = wp->vaddr + wp->len - 1; - vaddr addrend = addr + len - 1; - - return !(addr > wpend || wp->vaddr > addrend); -} - -/* Return flags for watchpoints that match addr + prot. */ -int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) -{ - CPUWatchpoint *wp; - int ret = 0; - - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (watchpoint_address_matches(wp, addr, len)) { - ret |= wp->flags; - } - } - return ret; -} - -/* Generate a debug exception if a watchpoint has been hit. */ -void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, - MemTxAttrs attrs, int flags, uintptr_t ra) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - CPUWatchpoint *wp; - - assert(tcg_enabled()); - if (cpu->watchpoint_hit) { - /* - * We re-entered the check after replacing the TB. - * Now raise the debug interrupt so that it will - * trigger after the current instruction. - */ - qemu_mutex_lock_iothread(); - cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); - qemu_mutex_unlock_iothread(); - return; - } - - if (cc->tcg_ops->adjust_watchpoint_address) { - /* this is currently used only by ARM BE32 */ - addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len); - } - - assert((flags & ~BP_MEM_ACCESS) == 0); - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - int hit_flags = wp->flags & flags; - - if (hit_flags && watchpoint_address_matches(wp, addr, len)) { - if (replay_running_debug()) { - /* - * replay_breakpoint reads icount. - * Force recompile to succeed, because icount may - * be read only at the end of the block. - */ - if (!cpu->neg.can_do_io) { - /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); - cpu_loop_exit_restore(cpu, ra); - } - /* - * Don't process the watchpoints when we are - * in a reverse debugging operation. - */ - replay_breakpoint(); - return; - } - - wp->flags |= hit_flags << BP_HIT_SHIFT; - wp->hitaddr = MAX(addr, wp->vaddr); - wp->hitattrs = attrs; - - if (wp->flags & BP_CPU - && cc->tcg_ops->debug_check_watchpoint - && !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { - wp->flags &= ~BP_WATCHPOINT_HIT; - continue; - } - cpu->watchpoint_hit = wp; - - mmap_lock(); - /* This call also restores vCPU state */ - tb_check_watchpoint(cpu, ra); - if (wp->flags & BP_STOP_BEFORE_ACCESS) { - cpu->exception_index = EXCP_DEBUG; - mmap_unlock(); - cpu_loop_exit(cpu); - } else { - /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); - mmap_unlock(); - cpu_loop_exit_noexc(cpu); - } - } else { - wp->flags &= ~BP_WATCHPOINT_HIT; - } - } -} - -#endif /* CONFIG_TCG */ diff --git a/target/alpha/clk_helper.c b/target/alpha/clk_helper.c new file mode 100644 index 00000000000..26ffc231cda --- /dev/null +++ b/target/alpha/clk_helper.c @@ -0,0 +1,32 @@ +/* + * QEMU Alpha clock helpers. + * + * Copyright (c) 2007 Jocelyn Mayer + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "exec/helper-proto.h" +#include "cpu.h" + +uint64_t helper_load_pcc(CPUAlphaState *env) +{ +#ifndef CONFIG_USER_ONLY + /* + * In system mode we have access to a decent high-resolution clock. + * In order to make OS-level time accounting work with the RPCC, + * present it with a well-timed clock fixed at 250MHz. + */ + return (((uint64_t)env->pcc_ofs << 32) + | (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2)); +#else + /* + * In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through + * the host cpu clock ticks. Also, don't bother taking PCC_OFS into + * account. + */ + return (uint32_t)cpu_get_host_ticks(); +#endif +} diff --git a/target/alpha/cpu-param.h b/target/alpha/cpu-param.h index 68c46f7998a..c969cb016bf 100644 --- a/target/alpha/cpu-param.h +++ b/target/alpha/cpu-param.h @@ -9,10 +9,22 @@ #define ALPHA_CPU_PARAM_H #define TARGET_LONG_BITS 64 -#define TARGET_PAGE_BITS 13 /* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */ #define TARGET_PHYS_ADDR_SPACE_BITS 44 -#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS) + +#ifdef CONFIG_USER_ONLY +/* + * Allow user-only to vary page size. Real hardware allows only 8k and 64k, + * but since any variance means guests cannot assume a fixed value, allow + * a 4k minimum to match x86 host, which can minimize emulation issues. + */ +# define TARGET_PAGE_BITS_VARY +# define TARGET_PAGE_BITS_MIN 12 +# define TARGET_VIRT_ADDR_SPACE_BITS 63 +#else +# define TARGET_PAGE_BITS 13 +# define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS) +#endif #endif diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index 39cf841b3ee..05f9ee41e9f 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -64,6 +64,11 @@ static bool alpha_cpu_has_work(CPUState *cs) | CPU_INTERRUPT_MCHK); } +static int alpha_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return alpha_env_mmu_index(cpu_env(cs)); +} + static void alpha_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) { info->mach = bfd_mach_alpha_ev6; @@ -87,23 +92,6 @@ static void alpha_cpu_realizefn(DeviceState *dev, Error **errp) acc->parent_realize(dev, errp); } -static void alpha_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - - qemu_printf(" %s\n", object_class_get_name(oc)); -} - -void alpha_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list_sorted(TYPE_ALPHA_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, alpha_cpu_list_entry, NULL); - g_slist_free(list); -} - /* Models */ typedef struct AlphaCPUAlias { const char *alias; @@ -142,51 +130,32 @@ static ObjectClass *alpha_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_free(typename); - /* TODO: remove match everything nonsense */ - if (!oc || object_class_is_abstract(oc)) { - /* Default to ev67; no reason not to emulate insns by default. */ - oc = object_class_by_name(ALPHA_CPU_TYPE_NAME("ev67")); - } - return oc; } static void ev4_cpu_initfn(Object *obj) { - AlphaCPU *cpu = ALPHA_CPU(obj); - CPUAlphaState *env = &cpu->env; - - env->implver = IMPLVER_2106x; + cpu_env(CPU(obj))->implver = IMPLVER_2106x; } static void ev5_cpu_initfn(Object *obj) { - AlphaCPU *cpu = ALPHA_CPU(obj); - CPUAlphaState *env = &cpu->env; - - env->implver = IMPLVER_21164; + cpu_env(CPU(obj))->implver = IMPLVER_21164; } static void ev56_cpu_initfn(Object *obj) { - AlphaCPU *cpu = ALPHA_CPU(obj); - CPUAlphaState *env = &cpu->env; - - env->amask |= AMASK_BWX; + cpu_env(CPU(obj))->amask |= AMASK_BWX; } static void pca56_cpu_initfn(Object *obj) { - AlphaCPU *cpu = ALPHA_CPU(obj); - CPUAlphaState *env = &cpu->env; - - env->amask |= AMASK_MVI; + cpu_env(CPU(obj))->amask |= AMASK_MVI; } static void ev6_cpu_initfn(Object *obj) { - AlphaCPU *cpu = ALPHA_CPU(obj); - CPUAlphaState *env = &cpu->env; + CPUAlphaState *env = cpu_env(CPU(obj)); env->implver = IMPLVER_21264; env->amask = AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP; @@ -194,16 +163,12 @@ static void ev6_cpu_initfn(Object *obj) static void ev67_cpu_initfn(Object *obj) { - AlphaCPU *cpu = ALPHA_CPU(obj); - CPUAlphaState *env = &cpu->env; - - env->amask |= AMASK_CIX | AMASK_PREFETCH; + cpu_env(CPU(obj))->amask |= AMASK_CIX | AMASK_PREFETCH; } static void alpha_cpu_initfn(Object *obj) { - AlphaCPU *cpu = ALPHA_CPU(obj); - CPUAlphaState *env = &cpu->env; + CPUAlphaState *env = cpu_env(CPU(obj)); env->lock_addr = -1; #if defined(CONFIG_USER_ONLY) @@ -226,7 +191,7 @@ static const struct SysemuCPUOps alpha_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps alpha_tcg_ops = { +static const TCGCPUOps alpha_tcg_ops = { .initialize = alpha_translate_init, .restore_state_to_opc = alpha_restore_state_to_opc, @@ -253,6 +218,7 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = alpha_cpu_class_by_name; cc->has_work = alpha_cpu_has_work; + cc->mmu_index = alpha_cpu_mmu_index; cc->dump_state = alpha_cpu_dump_state; cc->set_pc = alpha_cpu_set_pc; cc->get_pc = alpha_cpu_get_pc; diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index d672e911ddd..7188a409a04 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -292,8 +292,6 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags); int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -#define cpu_list alpha_cpu_list - #include "exec/cpu-all.h" enum { @@ -391,7 +389,7 @@ enum { #define TB_FLAG_UNALIGN (1u << 1) -static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch) +static inline int alpha_env_mmu_index(CPUAlphaState *env) { int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX; if (env->flags & ENV_FLAG_PAL_MODE) { @@ -441,7 +439,6 @@ void alpha_translate_init(void); #define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU -void alpha_cpu_list(void); G_NORETURN void dynamic_excp(CPUAlphaState *, uintptr_t, int, int); G_NORETURN void arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t); diff --git a/target/alpha/gdbstub.c b/target/alpha/gdbstub.c index 0f8fa150f89..13694fd321e 100644 --- a/target/alpha/gdbstub.c +++ b/target/alpha/gdbstub.c @@ -23,8 +23,7 @@ int alpha_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; + CPUAlphaState *env = cpu_env(cs); uint64_t val; CPU_DoubleU d; @@ -59,8 +58,7 @@ int alpha_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int alpha_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; + CPUAlphaState *env = cpu_env(cs); target_ulong tmp = ldtul_p(mem_buf); CPU_DoubleU d; diff --git a/target/alpha/helper.c b/target/alpha/helper.c index 970c8697715..d6d4353edde 100644 --- a/target/alpha/helper.c +++ b/target/alpha/helper.c @@ -286,11 +286,10 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr, hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { - AlphaCPU *cpu = ALPHA_CPU(cs); target_ulong phys; int prot, fail; - fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot); + fail = get_physical_address(cpu_env(cs), addr, 0, 0, &phys, &prot); return (fail >= 0 ? -1 : phys); } @@ -298,8 +297,7 @@ bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; + CPUAlphaState *env = cpu_env(cs); target_ulong phys; int prot, fail; @@ -325,8 +323,7 @@ bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, void alpha_cpu_do_interrupt(CPUState *cs) { - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; + CPUAlphaState *env = cpu_env(cs); int i = cs->exception_index; if (qemu_loglevel_mask(CPU_LOG_INT)) { @@ -435,8 +432,7 @@ void alpha_cpu_do_interrupt(CPUState *cs) bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; + CPUAlphaState *env = cpu_env(cs); int idx = -1; /* We never take interrupts while in PALmode. */ @@ -487,8 +483,7 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags) "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", "t10", "t11", "ra", "t12", "at", "gp", "sp" }; - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; + CPUAlphaState *env = cpu_env(cs); int i; qemu_fprintf(f, "PC " TARGET_FMT_lx " PS %02x\n", diff --git a/target/alpha/machine.c b/target/alpha/machine.c index 2b7c8148ff5..f09834f635d 100644 --- a/target/alpha/machine.c +++ b/target/alpha/machine.c @@ -24,7 +24,7 @@ static const VMStateInfo vmstate_fpcr = { .put = put_fpcr, }; -static VMStateField vmstate_env_fields[] = { +static const VMStateField vmstate_env_fields[] = { VMSTATE_UINTTL_ARRAY(ir, CPUAlphaState, 31), VMSTATE_UINTTL_ARRAY(fir, CPUAlphaState, 31), /* Save the architecture value of the fpcr, not the internally @@ -73,7 +73,7 @@ static const VMStateDescription vmstate_env = { .fields = vmstate_env_fields, }; -static VMStateField vmstate_cpu_fields[] = { +static const VMStateField vmstate_cpu_fields[] = { VMSTATE_CPU(), VMSTATE_STRUCT(env, AlphaCPU, 1, vmstate_env, CPUAlphaState), VMSTATE_END_OF_LIST() diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c index a39b52c5dd6..872955f5e74 100644 --- a/target/alpha/mem_helper.c +++ b/target/alpha/mem_helper.c @@ -42,18 +42,14 @@ static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retadd void alpha_cpu_record_sigbus(CPUState *cs, vaddr addr, MMUAccessType access_type, uintptr_t retaddr) { - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; - - do_unaligned_access(env, addr, retaddr); + do_unaligned_access(cpu_env(cs), addr, retaddr); } #else void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; + CPUAlphaState *env = cpu_env(cs); do_unaligned_access(env, addr, retaddr); cs->exception_index = EXCP_UNALIGN; @@ -67,8 +63,7 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; + CPUAlphaState *env = cpu_env(cs); env->trap_arg0 = addr; env->trap_arg1 = access_type == MMU_DATA_STORE ? 1 : 0; diff --git a/target/alpha/meson.build b/target/alpha/meson.build index d3502dd823f..7dbbd557171 100644 --- a/target/alpha/meson.build +++ b/target/alpha/meson.build @@ -4,15 +4,18 @@ alpha_ss.add(files( 'fpu_helper.c', 'gdbstub.c', 'helper.c', + 'clk_helper.c', 'int_helper.c', 'mem_helper.c', - 'sys_helper.c', 'translate.c', 'vax_helper.c', )) alpha_system_ss = ss.source_set() -alpha_system_ss.add(files('machine.c')) +alpha_system_ss.add(files( + 'machine.c', + 'sys_helper.c', +)) target_arch += {'alpha': alpha_ss} target_system_arch += {'alpha': alpha_system_ss} diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c index c83c92dd4ce..768116ef32b 100644 --- a/target/alpha/sys_helper.c +++ b/target/alpha/sys_helper.c @@ -27,23 +27,7 @@ #include "qemu/timer.h" -uint64_t helper_load_pcc(CPUAlphaState *env) -{ -#ifndef CONFIG_USER_ONLY - /* In system mode we have access to a decent high-resolution clock. - In order to make OS-level time accounting work with the RPCC, - present it with a well-timed clock fixed at 250MHz. */ - return (((uint64_t)env->pcc_ofs << 32) - | (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2)); -#else - /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through the host cpu - clock ticks. Also, don't bother taking PCC_OFS into account. */ - return (uint32_t)cpu_get_host_ticks(); -#endif -} - /* PALcode support special instructions */ -#ifndef CONFIG_USER_ONLY void helper_tbia(CPUAlphaState *env) { tlb_flush(env_cpu(env)); @@ -89,5 +73,3 @@ void helper_set_alarm(CPUAlphaState *env, uint64_t expire) timer_del(cpu->alarm_timer); } } - -#endif /* CONFIG_USER_ONLY */ diff --git a/target/alpha/translate.c b/target/alpha/translate.c index 32333081d8a..a97cd54f0c1 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -453,13 +453,13 @@ static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp) } static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond, - TCGv cmp, int32_t disp) + TCGv cmp, uint64_t imm, int32_t disp) { uint64_t dest = ctx->base.pc_next + (disp << 2); TCGLabel *lab_true = gen_new_label(); if (use_goto_tb(ctx, dest)) { - tcg_gen_brcondi_i64(cond, cmp, 0, lab_true); + tcg_gen_brcondi_i64(cond, cmp, imm, lab_true); tcg_gen_goto_tb(0); tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); @@ -472,81 +472,71 @@ static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond, return DISAS_NORETURN; } else { - TCGv_i64 z = load_zero(ctx); + TCGv_i64 i = tcg_constant_i64(imm); TCGv_i64 d = tcg_constant_i64(dest); TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next); - tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p); + tcg_gen_movcond_i64(cond, cpu_pc, cmp, i, d, p); return DISAS_PC_UPDATED; } } static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra, - int32_t disp, int mask) + int32_t disp) { - if (mask) { - TCGv tmp = tcg_temp_new(); - DisasJumpType ret; - - tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1); - ret = gen_bcond_internal(ctx, cond, tmp, disp); - return ret; - } - return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp); + return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), + is_tst_cond(cond), disp); } /* Fold -0.0 for comparison with COND. */ -static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src) +static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src) { - uint64_t mzero = 1ull << 63; + TCGv_i64 tmp; - switch (cond) { + *pimm = 0; + switch (*pcond) { case TCG_COND_LE: case TCG_COND_GT: /* For <= or >, the -0.0 value directly compares the way we want. */ - tcg_gen_mov_i64(dest, src); - break; + return src; case TCG_COND_EQ: case TCG_COND_NE: - /* For == or !=, we can simply mask off the sign bit and compare. */ - tcg_gen_andi_i64(dest, src, mzero - 1); - break; + /* For == or !=, we can compare without the sign bit. */ + *pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE; + *pimm = INT64_MAX; + return src; case TCG_COND_GE: case TCG_COND_LT: /* For >= or <, map -0.0 to +0.0. */ - tcg_gen_movcond_i64(TCG_COND_NE, dest, src, tcg_constant_i64(mzero), - src, tcg_constant_i64(0)); - break; + tmp = tcg_temp_new_i64(); + tcg_gen_movcond_i64(TCG_COND_EQ, tmp, + src, tcg_constant_i64(INT64_MIN), + tcg_constant_i64(0), src); + return tmp; default: - abort(); + g_assert_not_reached(); } } static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp) { - TCGv cmp_tmp = tcg_temp_new(); - DisasJumpType ret; - - gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra)); - ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp); - return ret; + uint64_t imm; + TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra)); + return gen_bcond_internal(ctx, cond, tmp, imm, disp); } static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) { - TCGv_i64 va, vb, z; - - z = load_zero(ctx); - vb = load_fpr(ctx, rb); - va = tcg_temp_new(); - gen_fold_mzero(cond, va, load_fpr(ctx, ra)); - - tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc)); + uint64_t imm; + TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra)); + tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), + tmp, tcg_constant_i64(imm), + load_fpr(ctx, rb), load_fpr(ctx, rc)); } #define QUAL_RM_N 0x080 /* Round mode nearest even */ @@ -1683,16 +1673,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) break; case 0x14: /* CMOVLBS */ - tmp = tcg_temp_new(); - tcg_gen_andi_i64(tmp, va, 1); - tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx), + tcg_gen_movcond_i64(TCG_COND_TSTNE, vc, va, tcg_constant_i64(1), vb, load_gpr(ctx, rc)); break; case 0x16: /* CMOVLBC */ - tmp = tcg_temp_new(); - tcg_gen_andi_i64(tmp, va, 1); - tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx), + tcg_gen_movcond_i64(TCG_COND_TSTEQ, vc, va, tcg_constant_i64(1), vb, load_gpr(ctx, rc)); break; case 0x20: @@ -2827,35 +2813,35 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) break; case 0x38: /* BLBC */ - ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); + ret = gen_bcond(ctx, TCG_COND_TSTEQ, ra, disp21); break; case 0x39: /* BEQ */ - ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0); + ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21); break; case 0x3A: /* BLT */ - ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0); + ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21); break; case 0x3B: /* BLE */ - ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0); + ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21); break; case 0x3C: /* BLBS */ - ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); + ret = gen_bcond(ctx, TCG_COND_TSTNE, ra, disp21); break; case 0x3D: /* BNE */ - ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0); + ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21); break; case 0x3E: /* BGE */ - ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0); + ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21); break; case 0x3F: /* BGT */ - ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0); + ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21); break; invalid_opc: ret = gen_invalid(ctx); @@ -2875,7 +2861,7 @@ static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) int64_t bound; ctx->tbflags = ctx->base.tb->flags; - ctx->mem_idx = cpu_mmu_index(env, false); + ctx->mem_idx = alpha_env_mmu_index(env); ctx->implver = env->implver; ctx->amask = env->amask; @@ -2917,8 +2903,8 @@ static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); - CPUAlphaState *env = cpu_env(cpu); - uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); + uint32_t insn = translator_ldl(cpu_env(cpu), &ctx->base, + ctx->base.pc_next); ctx->base.pc_next += 4; ctx->base.is_jmp = translate_one(ctx, insn); @@ -2971,7 +2957,7 @@ static const TranslatorOps alpha_tr_ops = { }; void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base); diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c index c078849403c..2b2055c6acc 100644 --- a/target/arm/arm-powerctl.c +++ b/target/arm/arm-powerctl.c @@ -16,6 +16,7 @@ #include "qemu/log.h" #include "qemu/main-loop.h" #include "sysemu/tcg.h" +#include "target/arm/multiprocessing.h" #ifndef DEBUG_ARM_POWERCTL #define DEBUG_ARM_POWERCTL 0 @@ -37,7 +38,7 @@ CPUState *arm_get_cpu_by_id(uint64_t id) CPU_FOREACH(cpu) { ARMCPU *armcpu = ARM_CPU(cpu); - if (armcpu->mp_affinity == id) { + if (arm_cpu_mp_affinity(armcpu) == id) { return cpu; } } @@ -88,7 +89,7 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, g_free(info); /* Finally set the power status */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); target_cpu->power_state = PSCI_ON; } @@ -99,7 +100,7 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, ARMCPU *target_cpu; struct CpuOnInfo *info; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64 "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry, @@ -196,7 +197,7 @@ static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state, target_cpu_state->halted = 0; /* Finally set the power status */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); target_cpu->power_state = PSCI_ON; } @@ -205,7 +206,7 @@ int arm_set_cpu_on_and_reset(uint64_t cpuid) CPUState *target_cpu_state; ARMCPU *target_cpu; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); /* Retrieve the cpu we are powering up */ target_cpu_state = arm_get_cpu_by_id(cpuid); @@ -247,7 +248,7 @@ static void arm_set_cpu_off_async_work(CPUState *target_cpu_state, { ARMCPU *target_cpu = ARM_CPU(target_cpu_state); - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); target_cpu->power_state = PSCI_OFF; target_cpu_state->halted = 1; target_cpu_state->exception_index = EXCP_HLT; @@ -258,7 +259,7 @@ int arm_set_cpu_off(uint64_t cpuid) CPUState *target_cpu_state; ARMCPU *target_cpu; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); DPRINTF("cpu %" PRId64 "\n", cpuid); @@ -294,7 +295,7 @@ int arm_reset_cpu(uint64_t cpuid) CPUState *target_cpu_state; ARMCPU *target_cpu; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); DPRINTF("cpu %" PRId64 "\n", cpuid); diff --git a/target/arm/arm-qmp-cmds.c b/target/arm/arm-qmp-cmds.c index b53d5efe13d..3cc8cc738bb 100644 --- a/target/arm/arm-qmp-cmds.c +++ b/target/arm/arm-qmp-cmds.c @@ -28,7 +28,6 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-commands-machine-target.h" #include "qapi/qapi-commands-misc-target.h" -#include "qapi/qmp/qerror.h" #include "qapi/qmp/qdict.h" #include "qom/qom-qobject.h" @@ -104,7 +103,7 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, Error **errp) { CpuModelExpansionInfo *expansion_info; - const QDict *qdict_in = NULL; + const QDict *qdict_in; QDict *qdict_out; ObjectClass *oc; Object *obj; @@ -151,27 +150,20 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, } } - if (model->props) { - qdict_in = qobject_to(QDict, model->props); - if (!qdict_in) { - error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict"); - return NULL; - } - } - obj = object_new(object_class_get_name(oc)); - if (qdict_in) { + if (model->props) { Visitor *visitor; Error *err = NULL; visitor = qobject_input_visitor_new(model->props); - if (!visit_start_struct(visitor, NULL, NULL, 0, errp)) { + if (!visit_start_struct(visitor, "model.props", NULL, 0, errp)) { visit_free(visitor); object_unref(obj); return NULL; } + qdict_in = qobject_to(QDict, model->props); i = 0; while ((name = cpu_model_advertised_features[i++]) != NULL) { if (qdict_get(qdict_in, name)) { @@ -237,8 +229,7 @@ static void arm_cpu_add_definition(gpointer data, gpointer user_data) typename = object_class_get_name(oc); info = g_malloc0(sizeof(*info)); - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_ARM_CPU)); + info->name = cpu_model_from_type(typename); info->q_typename = g_strdup(typename); QAPI_LIST_PREPEND(*cpu_list, info); diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h index f1293d16c07..cc7c54378f4 100644 --- a/target/arm/cpregs.h +++ b/target/arm/cpregs.h @@ -21,6 +21,9 @@ #ifndef TARGET_ARM_CPREGS_H #define TARGET_ARM_CPREGS_H +#include "hw/registerfields.h" +#include "target/arm/kvm-consts.h" + /* * ARMCPRegInfo type field bits: */ @@ -118,6 +121,11 @@ enum { * ARM pseudocode function CheckSMEAccess(). */ ARM_CP_SME = 1 << 19, + /* + * Flag: one of the four EL2 registers which redirect to the + * equivalent EL1 register when FEAT_NV2 is enabled. + */ + ARM_CP_NV2_REDIRECT = 1 << 20, }; /* @@ -821,6 +829,11 @@ typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); #define CP_ANY 0xff +/* Flags in the high bits of nv2_redirect_offset */ +#define NV2_REDIR_NV1 0x4000 /* Only redirect when HCR_EL2.NV1 == 1 */ +#define NV2_REDIR_NO_NV1 0x8000 /* Only redirect when HCR_EL2.NV1 == 0 */ +#define NV2_REDIR_FLAG_MASK 0xc000 + /* Definition of an ARM coprocessor register */ struct ARMCPRegInfo { /* Name of register (useful mainly for debugging, need not be unique) */ @@ -862,6 +875,13 @@ struct ARMCPRegInfo { * value encodes both the trap register and bit within it. */ FGTBit fgt; + + /* + * Offset from VNCR_EL2 when FEAT_NV2 redirects access to memory; + * may include an NV2_REDIR_* flag. + */ + uint32_t nv2_redirect_offset; + /* * The opaque pointer passed to define_arm_cp_regs_with_opaque() when * this register was defined: can be used to hand data through to the @@ -937,7 +957,7 @@ struct ARMCPRegInfo { CPResetFn *resetfn; /* - * "Original" writefn and readfn. + * "Original" readfn, writefn, accessfn. * For ARMv8.1-VHE register aliases, we overwrite the read/write * accessor functions of various EL1/EL0 to perform the runtime * check for which sysreg should actually be modified, and then @@ -948,6 +968,7 @@ struct ARMCPRegInfo { */ CPReadFn *orig_readfn; CPWriteFn *orig_writefn; + CPAccessFn *orig_accessfn; }; /* @@ -1079,4 +1100,38 @@ void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu); CPAccessResult access_tvm_trvm(CPUARMState *, const ARMCPRegInfo *, bool); +/** + * arm_cpreg_trap_in_nv: Return true if cpreg traps in nested virtualization + * + * Return true if this cpreg is one which should be trapped to EL2 if + * it is executed at EL1 when nested virtualization is enabled via HCR_EL2.NV. + */ +static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri) +{ + /* + * The Arm ARM defines the registers to be trapped in terms of + * their names (I_TZTZL). However the underlying principle is "if + * it would UNDEF at EL1 but work at EL2 then it should trap", and + * the way the encoding of sysregs and system instructions is done + * means that the right set of registers is exactly those where + * the opc1 field is 4 or 5. (You can see this also in the assert + * we do that the opc1 field and the permissions mask line up in + * define_one_arm_cp_reg_with_opaque().) + * Checking the opc1 field is easier for us and avoids the problem + * that we do not consistently use the right architectural names + * for all sysregs, since we treat the name field as largely for debug. + * + * However we do this check, it is going to be at least potentially + * fragile to future new sysregs, but this seems the least likely + * to break. + * + * In particular, note that the released sysreg XML defines that + * the FEAT_MEC sysregs and instructions do not follow this FEAT_NV + * trapping rule, so we will need to add an ARM_CP_* flag to indicate + * "register does not trap on NV" to handle those if/when we implement + * FEAT_MEC. + */ + return ri->opc1 == 4 || ri->opc1 == 5; +} + #endif /* TARGET_ARM_CPREGS_H */ diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h index 165a497f7b9..e5758d9fbc8 100644 --- a/target/arm/cpu-features.h +++ b/target/arm/cpu-features.h @@ -20,6 +20,8 @@ #ifndef TARGET_ARM_FEATURES_H #define TARGET_ARM_FEATURES_H +#include "hw/registerfields.h" + /* * Naming convention for isar_feature functions: * Functions which test 32-bit ID registers should have _aa32_ in @@ -739,6 +741,16 @@ static inline bool isar_feature_aa64_fgt(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, FGT) != 0; } +static inline bool isar_feature_aa64_ecv_traps(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, ECV) > 0; +} + +static inline bool isar_feature_aa64_ecv(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, ECV) > 1; +} + static inline bool isar_feature_aa64_vh(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0; @@ -839,6 +851,16 @@ static inline bool isar_feature_aa64_e0pd(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, E0PD) != 0; } +static inline bool isar_feature_aa64_nv(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, NV) != 0; +} + +static inline bool isar_feature_aa64_nv2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, NV) >= 2; +} + static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h index f9b462a98fa..da3243ab217 100644 --- a/target/arm/cpu-param.h +++ b/target/arm/cpu-param.h @@ -19,9 +19,13 @@ #endif #ifdef CONFIG_USER_ONLY -#define TARGET_PAGE_BITS 12 # ifdef TARGET_AARCH64 # define TARGET_TAGGED_ADDRESSES +/* Allow user-only to vary page size from 4k */ +# define TARGET_PAGE_BITS_VARY +# define TARGET_PAGE_BITS_MIN 12 +# else +# define TARGET_PAGE_BITS 12 # endif #else /* diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h index 02b914c8767..8e032691dbf 100644 --- a/target/arm/cpu-qom.h +++ b/target/arm/cpu-qom.h @@ -33,4 +33,28 @@ typedef struct AArch64CPUClass AArch64CPUClass; DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU, TYPE_AARCH64_CPU) +#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU +#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) + +/* Meanings of the ARMCPU object's four inbound GPIO lines */ +#define ARM_CPU_IRQ 0 +#define ARM_CPU_FIQ 1 +#define ARM_CPU_VIRQ 2 +#define ARM_CPU_VFIQ 3 + +/* For M profile, some registers are banked secure vs non-secure; + * these are represented as a 2-element array where the first element + * is the non-secure copy and the second is the secure copy. + * When the CPU does not have implement the security extension then + * only the first element is used. + * This means that the copy for the current security state can be + * accessed via env->registerfield[env->v7m.secure] (whether the security + * extension is implemented or not). + */ +enum { + M_REG_NS = 0, + M_REG_S = 1, + M_REG_NUM_BANKS = 2, +}; + #endif diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 5d9bca5b8db..ab8d007a86c 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -48,6 +48,8 @@ #include "disas/capstone.h" #include "fpu/softfloat.h" #include "cpregs.h" +#include "target/arm/cpu-qom.h" +#include "target/arm/gtimer.h" static void arm_cpu_set_pc(CPUState *cs, vaddr value) { @@ -131,6 +133,11 @@ static bool arm_cpu_has_work(CPUState *cs) | CPU_INTERRUPT_EXITTB); } +static int arm_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return arm_env_mmu_index(cpu_env(cs)); +} + void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void *opaque) { @@ -207,9 +214,9 @@ static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque) static void arm_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - ARMCPU *cpu = ARM_CPU(s); - ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu); + CPUState *cs = CPU(obj); + ARMCPU *cpu = ARM_CPU(cs); + ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj); CPUARMState *env = &cpu->env; if (acc->parent_phases.hold) { @@ -226,7 +233,7 @@ static void arm_cpu_reset_hold(Object *obj) env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1; env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2; - cpu->power_state = s->start_powered_off ? PSCI_OFF : PSCI_ON; + cpu->power_state = cs->start_powered_off ? PSCI_OFF : PSCI_ON; if (arm_feature(env, ARM_FEATURE_IWMMXT)) { env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q'; @@ -431,7 +438,7 @@ static void arm_cpu_reset_hold(Object *obj) /* Load the initial SP and PC from offset 0 and 4 in the vector table */ vecbase = env->v7m.vecbase[env->v7m.secure]; - rom = rom_ptr_for_as(s->as, vecbase, 8); + rom = rom_ptr_for_as(cs->as, vecbase, 8); if (rom) { /* Address zero is covered by ROM which hasn't yet been * copied into physical memory. @@ -444,8 +451,8 @@ static void arm_cpu_reset_hold(Object *obj) * it got copied into memory. In the latter case, rom_ptr * will return a NULL pointer and we should use ldl_phys instead. */ - initial_msp = ldl_phys(s->as, vecbase); - initial_pc = ldl_phys(s->as, vecbase + 4); + initial_msp = ldl_phys(cs->as, vecbase); + initial_pc = ldl_phys(cs->as, vecbase + 4); } qemu_log_mask(CPU_LOG_INT, @@ -1059,6 +1066,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) uint32_t psr = pstate_read(env); int i, j; int el = arm_current_el(env); + uint64_t hcr = arm_hcr_el2_eff(env); const char *ns_status; bool sve; @@ -1096,6 +1104,10 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) if (cpu_isar_feature(aa64_bti, cpu)) { qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10); } + qemu_fprintf(f, "%s%s%s", + (hcr & HCR_NV) ? " NV" : "", + (hcr & HCR_NV1) ? " NV1" : "", + (hcr & HCR_NV2) ? " NV2" : ""); if (!(flags & CPU_DUMP_FPU)) { qemu_fprintf(f, "\n"); return; @@ -1302,13 +1314,18 @@ static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags) } } -uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz) +uint64_t arm_build_mp_affinity(int idx, uint8_t clustersz) { uint32_t Aff1 = idx / clustersz; uint32_t Aff0 = idx % clustersz; return (Aff1 << ARM_AFF1_SHIFT) | Aff0; } +uint64_t arm_cpu_mp_affinity(ARMCPU *cpu) +{ + return cpu->mp_affinity; +} + static void arm_cpu_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -1690,7 +1707,7 @@ void arm_cpu_post_init(Object *obj) } if (kvm_enabled()) { - kvm_arm_add_vcpu_properties(obj); + kvm_arm_add_vcpu_properties(cpu); } #ifndef CONFIG_USER_ONLY @@ -1792,11 +1809,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) ARMCPU *cpu = ARM_CPU(dev); ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev); CPUARMState *env = &cpu->env; - int pagebits; Error *local_err = NULL; +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) /* Use pc-relative instructions in system-mode */ -#ifndef CONFIG_USER_ONLY cs->tcg_cflags |= CF_PCREL; #endif @@ -2079,32 +2095,40 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * We rely on no XScale CPU having VFP so we can use the same bits in the * TB flags field for VECSTRIDE and XSCALE_CPAR. */ - assert(arm_feature(&cpu->env, ARM_FEATURE_AARCH64) || + assert(arm_feature(env, ARM_FEATURE_AARCH64) || !cpu_isar_feature(aa32_vfp_simd, cpu) || !arm_feature(env, ARM_FEATURE_XSCALE)); - if (arm_feature(env, ARM_FEATURE_V7) && - !arm_feature(env, ARM_FEATURE_M) && - !arm_feature(env, ARM_FEATURE_PMSA)) { - /* v7VMSA drops support for the old ARMv5 tiny pages, so we - * can use 4K pages. - */ - pagebits = 12; - } else { - /* For CPUs which might have tiny 1K pages, or which have an - * MPU and might have small region sizes, stick with 1K pages. - */ - pagebits = 10; - } - if (!set_preferred_target_page_bits(pagebits)) { - /* This can only ever happen for hotplugging a CPU, or if - * the board code incorrectly creates a CPU which it has - * promised via minimum_page_size that it will not. - */ - error_setg(errp, "This CPU requires a smaller page size than the " - "system is using"); - return; +#ifndef CONFIG_USER_ONLY + { + int pagebits; + if (arm_feature(env, ARM_FEATURE_V7) && + !arm_feature(env, ARM_FEATURE_M) && + !arm_feature(env, ARM_FEATURE_PMSA)) { + /* + * v7VMSA drops support for the old ARMv5 tiny pages, + * so we can use 4K pages. + */ + pagebits = 12; + } else { + /* + * For CPUs which might have tiny 1K pages, or which have an + * MPU and might have small region sizes, stick with 1K pages. + */ + pagebits = 10; + } + if (!set_preferred_target_page_bits(pagebits)) { + /* + * This can only ever happen for hotplugging a CPU, or if + * the board code incorrectly creates a CPU which it has + * promised via minimum_page_size that it will not. + */ + error_setg(errp, "This CPU requires a smaller page size " + "than the system is using"); + return; + } } +#endif /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it. * We don't support setting cluster ID ([16..23]) (known as Aff2 @@ -2112,8 +2136,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * so these bits always RAZ. */ if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) { - cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index, - ARM_DEFAULT_CPUS_PER_CLUSTER); + cpu->mp_affinity = arm_build_mp_affinity(cs->cpu_index, + ARM_DEFAULT_CPUS_PER_CLUSTER); } if (cpu->reset_hivecs) { @@ -2121,7 +2145,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } if (cpu->cfgend) { - if (arm_feature(&cpu->env, ARM_FEATURE_V7)) { + if (arm_feature(env, ARM_FEATURE_V7)) { cpu->reset_sctlr |= SCTLR_EE; } else { cpu->reset_sctlr |= SCTLR_B; @@ -2242,9 +2266,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) /* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */ cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0); - /* FEAT_NV (Nested Virtualization) */ - cpu->isar.id_aa64mmfr2 = - FIELD_DP64(cpu->isar.id_aa64mmfr2, ID_AA64MMFR2, NV, 0); } /* MPU can be configured out of a PMSA CPU either by setting has-mpu @@ -2415,9 +2436,7 @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_strfreev(cpuname); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU)) { - return NULL; - } + return oc; } @@ -2455,7 +2474,7 @@ static const struct SysemuCPUOps arm_sysemu_ops = { #endif #ifdef CONFIG_TCG -static const struct TCGCPUOps arm_tcg_ops = { +static const TCGCPUOps arm_tcg_ops = { .initialize = arm_translate_init, .synchronize_from_tb = arm_cpu_synchronize_from_tb, .debug_excp_handler = arm_debug_excp_handler, @@ -2494,6 +2513,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = arm_cpu_class_by_name; cc->has_work = arm_cpu_has_work; + cc->mmu_index = arm_cpu_mmu_index; cc->dump_state = arm_cpu_dump_state; cc->set_pc = arm_cpu_set_pc; cc->get_pc = arm_cpu_get_pc; @@ -2502,9 +2522,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) #ifndef CONFIG_USER_ONLY cc->sysemu_ops = &arm_sysemu_ops; #endif - cc->gdb_num_core_regs = 26; cc->gdb_arch_name = arm_gdb_arch_name; - cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml; cc->gdb_stop_before_watchpoint = true; cc->disas_set_info = arm_disas_set_info; diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 8eaed86956f..b5834d5058e 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -25,7 +25,10 @@ #include "hw/registerfields.h" #include "cpu-qom.h" #include "exec/cpu-defs.h" +#include "exec/gdbstub.h" #include "qapi/qapi-types-common.h" +#include "target/arm/multiprocessing.h" +#include "target/arm/gtimer.h" /* ARM processors have a weak memory model */ #define TCG_GUEST_DEFAULT_MO (0) @@ -72,21 +75,6 @@ #define ARMV7M_EXCP_PENDSV 14 #define ARMV7M_EXCP_SYSTICK 15 -/* For M profile, some registers are banked secure vs non-secure; - * these are represented as a 2-element array where the first element - * is the non-secure copy and the second is the secure copy. - * When the CPU does not have implement the security extension then - * only the first element is used. - * This means that the copy for the current security state can be - * accessed via env->registerfield[env->v7m.secure] (whether the security - * extension is implemented or not). - */ -enum { - M_REG_NS = 0, - M_REG_S = 1, - M_REG_NUM_BANKS = 2, -}; - /* ARM-specific interrupt pending bits. */ #define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 @@ -107,12 +95,6 @@ enum { #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t)) #endif -/* Meanings of the ARMCPU object's four inbound GPIO lines */ -#define ARM_CPU_IRQ 0 -#define ARM_CPU_FIQ 1 -#define ARM_CPU_VIRQ 2 -#define ARM_CPU_VFIQ 3 - /* ARM-specific extra insn start words: * 1: Conditional execution bits * 2: Partial exception syndrome for data aborts @@ -120,12 +102,12 @@ enum { #define TARGET_INSN_START_EXTRA_WORDS 2 /* The 2nd extra word holding syndrome info for data aborts does not use - * the upper 6 bits nor the lower 14 bits. We mask and shift it down to + * the upper 6 bits nor the lower 13 bits. We mask and shift it down to * help the sleb128 encoder do a better job. * When restoring the CPU state, we shift it back up. */ #define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1) -#define ARM_INSN_START_WORD2_SHIFT 14 +#define ARM_INSN_START_WORD2_SHIFT 13 /* We currently assume float and double are IEEE single and double precision respectively. @@ -136,23 +118,21 @@ enum { */ /** - * DynamicGDBXMLInfo: - * @desc: Contains the XML descriptions. - * @num: Number of the registers in this XML seen by GDB. + * DynamicGDBFeatureInfo: + * @desc: Contains the feature descriptions. * @data: A union with data specific to the set of registers * @cpregs_keys: Array that contains the corresponding Key of * a given cpreg with the same order of the cpreg * in the XML description. */ -typedef struct DynamicGDBXMLInfo { - char *desc; - int num; +typedef struct DynamicGDBFeatureInfo { + GDBFeature desc; union { struct { uint32_t *keys; } cpregs; } data; -} DynamicGDBXMLInfo; +} DynamicGDBFeatureInfo; /* CPU state for each instance of a generic timer (in cp15 c14) */ typedef struct ARMGenericTimer { @@ -160,18 +140,6 @@ typedef struct ARMGenericTimer { uint64_t ctl; /* Timer Control register */ } ARMGenericTimer; -#define GTIMER_PHYS 0 -#define GTIMER_VIRT 1 -#define GTIMER_HYP 2 -#define GTIMER_SEC 3 -#define GTIMER_HYPVIRT 4 -#define NUM_GTIMERS 5 - -#define VTCR_NSW (1u << 29) -#define VTCR_NSA (1u << 30) -#define VSTCR_SW VTCR_NSW -#define VSTCR_SA VTCR_NSA - /* Define a maximum sized vector register. * For 32-bit, this is a 128-bit NEON/AdvSIMD register. * For 64-bit, this is a 2048-bit SVE register. @@ -484,6 +452,7 @@ typedef struct CPUArchState { uint64_t c14_cntkctl; /* Timer Control register */ uint64_t cnthctl_el2; /* Counter/Timer Hyp Control register */ uint64_t cntvoff_el2; /* Counter Virtual Offset register */ + uint64_t cntpoff_el2; /* Counter Physical Offset register */ ARMGenericTimer c14_timer[NUM_GTIMERS]; uint32_t c15_cpar; /* XScale Coprocessor Access Register */ uint32_t c15_ticonfig; /* TI925T configuration byte. */ @@ -547,6 +516,9 @@ typedef struct CPUArchState { uint64_t gpccr_el3; uint64_t gptbr_el3; uint64_t mfar_el3; + + /* NV2 register */ + uint64_t vncr_el2; } cp15; struct { @@ -880,10 +852,10 @@ struct ArchCPU { uint64_t *cpreg_vmstate_values; int32_t cpreg_vmstate_array_len; - DynamicGDBXMLInfo dyn_sysreg_xml; - DynamicGDBXMLInfo dyn_svereg_xml; - DynamicGDBXMLInfo dyn_m_systemreg_xml; - DynamicGDBXMLInfo dyn_m_secextreg_xml; + DynamicGDBFeatureInfo dyn_sysreg_feature; + DynamicGDBFeatureInfo dyn_svereg_feature; + DynamicGDBFeatureInfo dyn_m_systemreg_feature; + DynamicGDBFeatureInfo dyn_m_secextreg_feature; /* Timers used by the generic (architected) timer */ QEMUTimer *gt_timer[NUM_GTIMERS]; @@ -1170,7 +1142,7 @@ void arm_cpu_post_init(Object *obj); (ARM_AFF0_MASK | ARM_AFF1_MASK | ARM_AFF2_MASK | ARM_AFF3_MASK) #define ARM64_AFFINITY_INVALID (~ARM64_AFFINITY_MASK) -uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz); +uint64_t arm_build_mp_affinity(int idx, uint8_t clustersz); #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_arm_cpu; @@ -1185,12 +1157,6 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -/* Returns the dynamically generated XML for the gdb stub. - * Returns a pointer to the XML contents for the specified XML file or NULL - * if the XML name doesn't match the predefined one. - */ -const char *arm_gdb_get_dynamic_xml(CPUState *cpu, const char *xmlname); - int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, int cpuid, DumpState *s); int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, @@ -1407,73 +1373,6 @@ void pmu_init(ARMCPU *cpu); #define SCTLR_SPINTMASK (1ULL << 62) /* FEAT_NMI */ #define SCTLR_TIDCP (1ULL << 63) /* FEAT_TIDCP1 */ -/* Bit definitions for CPACR (AArch32 only) */ -FIELD(CPACR, CP10, 20, 2) -FIELD(CPACR, CP11, 22, 2) -FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ -FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ -FIELD(CPACR, ASEDIS, 31, 1) - -/* Bit definitions for CPACR_EL1 (AArch64 only) */ -FIELD(CPACR_EL1, ZEN, 16, 2) -FIELD(CPACR_EL1, FPEN, 20, 2) -FIELD(CPACR_EL1, SMEN, 24, 2) -FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ - -/* Bit definitions for HCPTR (AArch32 only) */ -FIELD(HCPTR, TCP10, 10, 1) -FIELD(HCPTR, TCP11, 11, 1) -FIELD(HCPTR, TASE, 15, 1) -FIELD(HCPTR, TTA, 20, 1) -FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ -FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ - -/* Bit definitions for CPTR_EL2 (AArch64 only) */ -FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ -FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ -FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ -FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ -FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ -FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ -FIELD(CPTR_EL2, TTA, 28, 1) -FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ -FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ - -/* Bit definitions for CPTR_EL3 (AArch64 only) */ -FIELD(CPTR_EL3, EZ, 8, 1) -FIELD(CPTR_EL3, TFP, 10, 1) -FIELD(CPTR_EL3, ESM, 12, 1) -FIELD(CPTR_EL3, TTA, 20, 1) -FIELD(CPTR_EL3, TAM, 30, 1) -FIELD(CPTR_EL3, TCPAC, 31, 1) - -#define MDCR_MTPME (1U << 28) -#define MDCR_TDCC (1U << 27) -#define MDCR_HLP (1U << 26) /* MDCR_EL2 */ -#define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ -#define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ -#define MDCR_EPMAD (1U << 21) -#define MDCR_EDAD (1U << 20) -#define MDCR_TTRF (1U << 19) -#define MDCR_STE (1U << 18) /* MDCR_EL3 */ -#define MDCR_SPME (1U << 17) /* MDCR_EL3 */ -#define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ -#define MDCR_SDD (1U << 16) -#define MDCR_SPD (3U << 14) -#define MDCR_TDRA (1U << 11) -#define MDCR_TDOSA (1U << 10) -#define MDCR_TDA (1U << 9) -#define MDCR_TDE (1U << 8) -#define MDCR_HPME (1U << 7) -#define MDCR_TPM (1U << 6) -#define MDCR_TPMCR (1U << 5) -#define MDCR_HPMN (0x1fU) - -/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ -#define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ - MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ - MDCR_STE | MDCR_SPME | MDCR_SPD) - #define CPSR_M (0x1fU) #define CPSR_T (1U << 5) #define CPSR_F (1U << 6) @@ -1520,41 +1419,6 @@ FIELD(CPTR_EL3, TCPAC, 31, 1) #define XPSR_NZCV CPSR_NZCV #define XPSR_IT CPSR_IT -#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ -#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ -#define TTBCR_PD0 (1U << 4) -#define TTBCR_PD1 (1U << 5) -#define TTBCR_EPD0 (1U << 7) -#define TTBCR_IRGN0 (3U << 8) -#define TTBCR_ORGN0 (3U << 10) -#define TTBCR_SH0 (3U << 12) -#define TTBCR_T1SZ (3U << 16) -#define TTBCR_A1 (1U << 22) -#define TTBCR_EPD1 (1U << 23) -#define TTBCR_IRGN1 (3U << 24) -#define TTBCR_ORGN1 (3U << 26) -#define TTBCR_SH1 (1U << 28) -#define TTBCR_EAE (1U << 31) - -FIELD(VTCR, T0SZ, 0, 6) -FIELD(VTCR, SL0, 6, 2) -FIELD(VTCR, IRGN0, 8, 2) -FIELD(VTCR, ORGN0, 10, 2) -FIELD(VTCR, SH0, 12, 2) -FIELD(VTCR, TG0, 14, 2) -FIELD(VTCR, PS, 16, 3) -FIELD(VTCR, VS, 19, 1) -FIELD(VTCR, HA, 21, 1) -FIELD(VTCR, HD, 22, 1) -FIELD(VTCR, HWU59, 25, 1) -FIELD(VTCR, HWU60, 26, 1) -FIELD(VTCR, HWU61, 27, 1) -FIELD(VTCR, HWU62, 28, 1) -FIELD(VTCR, NSW, 29, 1) -FIELD(VTCR, NSA, 30, 1) -FIELD(VTCR, DS, 32, 1) -FIELD(VTCR, SL2, 33, 1) - /* Bit definitions for ARMv8 SPSR (PSTATE) format. * Only these are valid when in AArch64 mode; in * AArch32 mode SPSRs are basically CPSR-format. @@ -1762,21 +1626,6 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) #define HCR_TWEDEN (1ULL << 59) #define HCR_TWEDEL MAKE_64BIT_MASK(60, 4) -#define HCRX_ENAS0 (1ULL << 0) -#define HCRX_ENALS (1ULL << 1) -#define HCRX_ENASR (1ULL << 2) -#define HCRX_FNXS (1ULL << 3) -#define HCRX_FGTNXS (1ULL << 4) -#define HCRX_SMPME (1ULL << 5) -#define HCRX_TALLINT (1ULL << 6) -#define HCRX_VINMI (1ULL << 7) -#define HCRX_VFNMI (1ULL << 8) -#define HCRX_CMOW (1ULL << 9) -#define HCRX_MCE2 (1ULL << 10) -#define HCRX_MSCEN (1ULL << 11) - -#define HPFAR_NS (1ULL << 63) - #define SCR_NS (1ULL << 0) #define SCR_IRQ (1ULL << 1) #define SCR_FIQ (1ULL << 2) @@ -1815,12 +1664,6 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) #define SCR_GPF (1ULL << 48) #define SCR_NSE (1ULL << 62) -#define HSTR_TTEE (1 << 16) -#define HSTR_TJDBX (1 << 17) - -#define CNTHCTL_CNTVMASK (1 << 18) -#define CNTHCTL_CNTPMASK (1 << 19) - /* Return the current FPSCR value. */ uint32_t vfp_get_fpscr(CPUARMState *env); void vfp_set_fpscr(CPUARMState *env, uint32_t val); @@ -2739,7 +2582,6 @@ static inline bool access_secure_reg(CPUARMState *env) (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ (_val)) -void arm_cpu_list(void); uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint32_t cur_el, bool secure); @@ -2836,14 +2678,10 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); #define ARM_CPUID_TI915T 0x54029152 #define ARM_CPUID_TI925T 0x54029252 -#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU -#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_ARM_CPU #define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU -#define cpu_list arm_cpu_list - /* ARM has the following "translation regimes" (as the ARM ARM calls them): * * If EL3 is 64-bit: @@ -3237,17 +3075,26 @@ FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1) FIELD(TBFLAG_A64, SVL, 24, 4) /* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */ FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) -FIELD(TBFLAG_A64, FGT_ERET, 29, 1) +FIELD(TBFLAG_A64, TRAP_ERET, 29, 1) FIELD(TBFLAG_A64, NAA, 30, 1) FIELD(TBFLAG_A64, ATA0, 31, 1) +FIELD(TBFLAG_A64, NV, 32, 1) +FIELD(TBFLAG_A64, NV1, 33, 1) +FIELD(TBFLAG_A64, NV2, 34, 1) +/* Set if FEAT_NV2 RAM accesses use the EL2&0 translation regime */ +FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1) +/* Set if FEAT_NV2 RAM accesses are big-endian */ +FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1) /* - * Helpers for using the above. + * Helpers for using the above. Note that only the A64 accessors use + * FIELD_DP64() and FIELD_EX64(), because in the other cases the flags + * word either is or might be 32 bits only. */ #define DP_TBFLAG_ANY(DST, WHICH, VAL) \ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL)) #define DP_TBFLAG_A64(DST, WHICH, VAL) \ - (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A64, WHICH, VAL)) + (DST.flags2 = FIELD_DP64(DST.flags2, TBFLAG_A64, WHICH, VAL)) #define DP_TBFLAG_A32(DST, WHICH, VAL) \ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL)) #define DP_TBFLAG_M32(DST, WHICH, VAL) \ @@ -3256,24 +3103,11 @@ FIELD(TBFLAG_A64, ATA0, 31, 1) (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL)) #define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH) -#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A64, WHICH) +#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX64(IN.flags2, TBFLAG_A64, WHICH) #define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH) #define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH) #define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH) -/** - * cpu_mmu_index: - * @env: The cpu environment - * @ifetch: True for code access, false for data access. - * - * Return the core mmu index for the current translation regime. - * This function is used by generic TCG code paths. - */ -static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) -{ - return EX_TBFLAG_ANY(env->hflags, MMUIDX); -} - /** * sve_vq * @env: the cpu context diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 313c6939b13..d783bf54c62 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -66,7 +66,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) */ if (kvm_enabled()) { if (kvm_arm_sve_supported()) { - cpu->sve_vq.supported = kvm_arm_sve_get_vls(CPU(cpu)); + cpu->sve_vq.supported = kvm_arm_sve_get_vls(cpu); vq_supported = cpu->sve_vq.supported; } else { assert(!cpu_isar_feature(aa64_sve, cpu)); @@ -673,7 +673,7 @@ static void aarch64_a53_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53; cpu->midr = 0x410fd034; - cpu->revidr = 0x00000000; + cpu->revidr = 0x00000100; cpu->reset_fpsid = 0x41034070; cpu->isar.mvfr0 = 0x10110222; cpu->isar.mvfr1 = 0x12111111; @@ -803,7 +803,6 @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data) cc->gdb_read_register = aarch64_cpu_gdb_read_register; cc->gdb_write_register = aarch64_cpu_gdb_write_register; - cc->gdb_num_core_regs = 34; cc->gdb_core_xml_file = "aarch64-core.xml"; cc->gdb_arch_name = aarch64_gdb_arch_name; diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index cbfba532f50..7d856acddf2 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -844,6 +844,16 @@ static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +static CPAccessResult access_dbgvcr32(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* MCDR_EL3.TDMA doesn't apply for FEAT_NV traps */ + if (arm_current_el(env) == 2 && (env->cp15.mdcr_el3 & MDCR_TDA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + /* * Check for traps to Debug Comms Channel registers. If FEAT_FGT * is implemented then these are controlled by MDCR_EL2.TDCC for @@ -950,6 +960,7 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, .access = PL1_RW, .accessfn = access_tda, .fgt = FGT_MDSCR_EL1, + .nv2_redirect_offset = 0x158, .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), .resetvalue = 0 }, /* @@ -1026,14 +1037,6 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_RW, .accessfn = access_tda, .type = ARM_CP_NOP }, - /* - * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor - * to save and restore a 32-bit guest's DBGVCR) - */ - { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, - .access = PL2_RW, .accessfn = access_tda, - .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, /* * Dummy MDCCINT_EL1, since we don't implement the Debug Communications * Channel but Linux may try to access this register. The 32-bit @@ -1062,6 +1065,18 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) }, }; +/* These are present only when EL1 supports AArch32 */ +static const ARMCPRegInfo debug_aa32_el1_reginfo[] = { + /* + * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor + * to save and restore a 32-bit guest's DBGVCR) + */ + { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, + .access = PL2_RW, .accessfn = access_dbgvcr32, + .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, +}; + static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { /* 64 bit access versions of the (dummy) debug registers */ { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, @@ -1207,6 +1222,9 @@ void define_debug_regs(ARMCPU *cpu) assert(ctx_cmps <= brps); define_arm_cp_regs(cpu, debug_cp_reginfo); + if (cpu_isar_feature(aa64_aa32_el1, cpu)) { + define_arm_cp_regs(cpu, debug_aa32_el1_reginfo); + } if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c index 28f546a5ff9..a3bb73cfa7c 100644 --- a/target/arm/gdbstub.c +++ b/target/arm/gdbstub.c @@ -26,11 +26,11 @@ #include "cpu-features.h" #include "cpregs.h" -typedef struct RegisterSysregXmlParam { +typedef struct RegisterSysregFeatureParam { CPUState *cs; - GString *s; + GDBFeatureBuilder builder; int n; -} RegisterSysregXmlParam; +} RegisterSysregFeatureParam; /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect whatever the target description contains. Due to a historical mishap @@ -106,9 +106,10 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return 0; } -static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) +static int vfp_gdb_get_reg(CPUState *cs, GByteArray *buf, int reg) { - ARMCPU *cpu = env_archcpu(env); + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; /* VFP data registers are always little-endian. */ @@ -130,9 +131,10 @@ static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) return 0; } -static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) +static int vfp_gdb_set_reg(CPUState *cs, uint8_t *buf, int reg) { - ARMCPU *cpu = env_archcpu(env); + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; if (reg < nregs) { @@ -156,8 +158,11 @@ static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) return 0; } -static int vfp_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) +static int vfp_gdb_get_sysreg(CPUState *cs, GByteArray *buf, int reg) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + switch (reg) { case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); @@ -167,8 +172,11 @@ static int vfp_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) return 0; } -static int vfp_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) +static int vfp_gdb_set_sysreg(CPUState *cs, uint8_t *buf, int reg) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + switch (reg) { case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); @@ -180,8 +188,11 @@ static int vfp_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) return 0; } -static int mve_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) +static int mve_gdb_get_reg(CPUState *cs, GByteArray *buf, int reg) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + switch (reg) { case 0: return gdb_get_reg32(buf, env->v7m.vpr); @@ -190,8 +201,11 @@ static int mve_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) } } -static int mve_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) +static int mve_gdb_set_reg(CPUState *cs, uint8_t *buf, int reg) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + switch (reg) { case 0: env->v7m.vpr = ldl_p(buf); @@ -210,13 +224,14 @@ static int mve_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) * We return the number of bytes copied */ -static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) +static int arm_gdb_get_sysreg(CPUState *cs, GByteArray *buf, int reg) { - ARMCPU *cpu = env_archcpu(env); + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; const ARMCPRegInfo *ri; uint32_t key; - key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg]; + key = cpu->dyn_sysreg_feature.data.cpregs.keys[reg]; ri = get_arm_cp_reginfo(cpu->cp_regs, key); if (ri) { if (cpreg_field_is_64bit(ri)) { @@ -228,39 +243,37 @@ static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) return 0; } -static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) +static int arm_gdb_set_sysreg(CPUState *cs, uint8_t *buf, int reg) { return 0; } -static void arm_gen_one_xml_sysreg_tag(GString *s, DynamicGDBXMLInfo *dyn_xml, +static void arm_gen_one_feature_sysreg(GDBFeatureBuilder *builder, + DynamicGDBFeatureInfo *dyn_feature, ARMCPRegInfo *ri, uint32_t ri_key, - int bitsize, int regnum) + int bitsize, int n) { - g_string_append_printf(s, "name); - g_string_append_printf(s, " bitsize=\"%d\"", bitsize); - g_string_append_printf(s, " regnum=\"%d\"", regnum); - g_string_append_printf(s, " group=\"cp_regs\"/>"); - dyn_xml->data.cpregs.keys[dyn_xml->num] = ri_key; - dyn_xml->num++; + gdb_feature_builder_append_reg(builder, ri->name, bitsize, n, + "int", "cp_regs"); + + dyn_feature->data.cpregs.keys[n] = ri_key; } -static void arm_register_sysreg_for_xml(gpointer key, gpointer value, - gpointer p) +static void arm_register_sysreg_for_feature(gpointer key, gpointer value, + gpointer p) { uint32_t ri_key = (uintptr_t)key; ARMCPRegInfo *ri = value; - RegisterSysregXmlParam *param = (RegisterSysregXmlParam *)p; - GString *s = param->s; + RegisterSysregFeatureParam *param = p; ARMCPU *cpu = ARM_CPU(param->cs); CPUARMState *env = &cpu->env; - DynamicGDBXMLInfo *dyn_xml = &cpu->dyn_sysreg_xml; + DynamicGDBFeatureInfo *dyn_feature = &cpu->dyn_sysreg_feature; if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_NO_GDB))) { if (arm_feature(env, ARM_FEATURE_AARCH64)) { if (ri->state == ARM_CP_STATE_AA64) { - arm_gen_one_xml_sysreg_tag(s , dyn_xml, ri, ri_key, 64, - param->n++); + arm_gen_one_feature_sysreg(¶m->builder, dyn_feature, + ri, ri_key, 64, param->n++); } } else { if (ri->state == ARM_CP_STATE_AA32) { @@ -269,32 +282,32 @@ static void arm_register_sysreg_for_xml(gpointer key, gpointer value, return; } if (ri->type & ARM_CP_64BIT) { - arm_gen_one_xml_sysreg_tag(s , dyn_xml, ri, ri_key, 64, - param->n++); + arm_gen_one_feature_sysreg(¶m->builder, dyn_feature, + ri, ri_key, 64, param->n++); } else { - arm_gen_one_xml_sysreg_tag(s , dyn_xml, ri, ri_key, 32, - param->n++); + arm_gen_one_feature_sysreg(¶m->builder, dyn_feature, + ri, ri_key, 32, param->n++); } } } } } -static int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg) +static GDBFeature *arm_gen_dynamic_sysreg_feature(CPUState *cs, int base_reg) { ARMCPU *cpu = ARM_CPU(cs); - GString *s = g_string_new(NULL); - RegisterSysregXmlParam param = {cs, s, base_reg}; - - cpu->dyn_sysreg_xml.num = 0; - cpu->dyn_sysreg_xml.data.cpregs.keys = g_new(uint32_t, g_hash_table_size(cpu->cp_regs)); - g_string_printf(s, ""); - g_string_append_printf(s, ""); - g_string_append_printf(s, ""); - g_hash_table_foreach(cpu->cp_regs, arm_register_sysreg_for_xml, ¶m); - g_string_append_printf(s, ""); - cpu->dyn_sysreg_xml.desc = g_string_free(s, false); - return cpu->dyn_sysreg_xml.num; + RegisterSysregFeatureParam param = {cs}; + gsize num_regs = g_hash_table_size(cpu->cp_regs); + + gdb_feature_builder_init(¶m.builder, + &cpu->dyn_sysreg_feature.desc, + "org.qemu.gdb.arm.sys.regs", + "system-registers.xml", + base_reg); + cpu->dyn_sysreg_feature.data.cpregs.keys = g_new(uint32_t, num_regs); + g_hash_table_foreach(cpu->cp_regs, arm_register_sysreg_for_feature, ¶m); + gdb_feature_builder_end(¶m.builder); + return &cpu->dyn_sysreg_feature.desc; } #ifdef CONFIG_TCG @@ -369,8 +382,11 @@ static int m_sysreg_get(CPUARMState *env, GByteArray *buf, return gdb_get_reg32(buf, *ptr); } -static int arm_gdb_get_m_systemreg(CPUARMState *env, GByteArray *buf, int reg) +static int arm_gdb_get_m_systemreg(CPUState *cs, GByteArray *buf, int reg) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + /* * Here, we emulate MRS instruction, where CONTROL has a mix of * banked and non-banked bits. @@ -381,36 +397,34 @@ static int arm_gdb_get_m_systemreg(CPUARMState *env, GByteArray *buf, int reg) return m_sysreg_get(env, buf, reg, env->v7m.secure); } -static int arm_gdb_set_m_systemreg(CPUARMState *env, uint8_t *buf, int reg) +static int arm_gdb_set_m_systemreg(CPUState *cs, uint8_t *buf, int reg) { return 0; /* TODO */ } -static int arm_gen_dynamic_m_systemreg_xml(CPUState *cs, int orig_base_reg) +static GDBFeature *arm_gen_dynamic_m_systemreg_feature(CPUState *cs, + int base_reg) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; - GString *s = g_string_new(NULL); - int base_reg = orig_base_reg; + GDBFeatureBuilder builder; + int reg = 0; int i; - g_string_printf(s, ""); - g_string_append_printf(s, ""); - g_string_append_printf(s, "\n"); + gdb_feature_builder_init(&builder, &cpu->dyn_m_systemreg_feature.desc, + "org.gnu.gdb.arm.m-system", "arm-m-system.xml", + base_reg); for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) { if (arm_feature(env, m_sysreg_def[i].feature)) { - g_string_append_printf(s, - "\n", - m_sysreg_def[i].name, base_reg++); + gdb_feature_builder_append_reg(&builder, m_sysreg_def[i].name, 32, + reg++, "int", NULL); } } - g_string_append_printf(s, ""); - cpu->dyn_m_systemreg_xml.desc = g_string_free(s, false); - cpu->dyn_m_systemreg_xml.num = base_reg - orig_base_reg; + gdb_feature_builder_end(&builder); - return cpu->dyn_m_systemreg_xml.num; + return &cpu->dyn_m_systemreg_feature.desc; } #ifndef CONFIG_USER_ONLY @@ -418,63 +432,48 @@ static int arm_gen_dynamic_m_systemreg_xml(CPUState *cs, int orig_base_reg) * For user-only, we see the non-secure registers via m_systemreg above. * For secext, encode the non-secure view as even and secure view as odd. */ -static int arm_gdb_get_m_secextreg(CPUARMState *env, GByteArray *buf, int reg) +static int arm_gdb_get_m_secextreg(CPUState *cs, GByteArray *buf, int reg) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + return m_sysreg_get(env, buf, reg >> 1, reg & 1); } -static int arm_gdb_set_m_secextreg(CPUARMState *env, uint8_t *buf, int reg) +static int arm_gdb_set_m_secextreg(CPUState *cs, uint8_t *buf, int reg) { return 0; /* TODO */ } -static int arm_gen_dynamic_m_secextreg_xml(CPUState *cs, int orig_base_reg) +static GDBFeature *arm_gen_dynamic_m_secextreg_feature(CPUState *cs, + int base_reg) { ARMCPU *cpu = ARM_CPU(cs); - GString *s = g_string_new(NULL); - int base_reg = orig_base_reg; + GDBFeatureBuilder builder; + char *name; + int reg = 0; int i; - g_string_printf(s, ""); - g_string_append_printf(s, ""); - g_string_append_printf(s, "\n"); + gdb_feature_builder_init(&builder, &cpu->dyn_m_secextreg_feature.desc, + "org.gnu.gdb.arm.secext", "arm-m-secext.xml", + base_reg); for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) { - g_string_append_printf(s, - "\n", - m_sysreg_def[i].name, base_reg++); - g_string_append_printf(s, - "\n", - m_sysreg_def[i].name, base_reg++); + name = g_strconcat(m_sysreg_def[i].name, "_ns", NULL); + gdb_feature_builder_append_reg(&builder, name, 32, reg++, + "int", NULL); + name = g_strconcat(m_sysreg_def[i].name, "_s", NULL); + gdb_feature_builder_append_reg(&builder, name, 32, reg++, + "int", NULL); } - g_string_append_printf(s, ""); - cpu->dyn_m_secextreg_xml.desc = g_string_free(s, false); - cpu->dyn_m_secextreg_xml.num = base_reg - orig_base_reg; + gdb_feature_builder_end(&builder); - return cpu->dyn_m_secextreg_xml.num; + return &cpu->dyn_m_secextreg_feature.desc; } #endif #endif /* CONFIG_TCG */ -const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) -{ - ARMCPU *cpu = ARM_CPU(cs); - - if (strcmp(xmlname, "system-registers.xml") == 0) { - return cpu->dyn_sysreg_xml.desc; - } else if (strcmp(xmlname, "sve-registers.xml") == 0) { - return cpu->dyn_svereg_xml.desc; - } else if (strcmp(xmlname, "arm-m-system.xml") == 0) { - return cpu->dyn_m_systemreg_xml.desc; -#ifndef CONFIG_USER_ONLY - } else if (strcmp(xmlname, "arm-m-secext.xml") == 0) { - return cpu->dyn_m_secextreg_xml.desc; -#endif - } - return NULL; -} - void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) { CPUState *cs = CPU(cpu); @@ -487,14 +486,14 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) */ #ifdef TARGET_AARCH64 if (isar_feature_aa64_sve(&cpu->isar)) { - int nreg = arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs); + GDBFeature *feature = arm_gen_dynamic_svereg_feature(cs, cs->gdb_num_regs); gdb_register_coprocessor(cs, aarch64_gdb_get_sve_reg, - aarch64_gdb_set_sve_reg, nreg, - "sve-registers.xml", 0); + aarch64_gdb_set_sve_reg, feature, 0); } else { gdb_register_coprocessor(cs, aarch64_gdb_get_fpu_reg, aarch64_gdb_set_fpu_reg, - 34, "aarch64-fpu.xml", 0); + gdb_find_static_feature("aarch64-fpu.xml"), + 0); } /* * Note that we report pauth information via the feature name @@ -505,19 +504,22 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) if (isar_feature_aa64_pauth(&cpu->isar)) { gdb_register_coprocessor(cs, aarch64_gdb_get_pauth_reg, aarch64_gdb_set_pauth_reg, - 4, "aarch64-pauth.xml", 0); + gdb_find_static_feature("aarch64-pauth.xml"), + 0); } #endif } else { if (arm_feature(env, ARM_FEATURE_NEON)) { gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 49, "arm-neon.xml", 0); + gdb_find_static_feature("arm-neon.xml"), + 0); } else if (cpu_isar_feature(aa32_simd_r32, cpu)) { gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 33, "arm-vfp3.xml", 0); + gdb_find_static_feature("arm-vfp3.xml"), + 0); } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) { gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 17, "arm-vfp.xml", 0); + gdb_find_static_feature("arm-vfp.xml"), 0); } if (!arm_feature(env, ARM_FEATURE_M)) { /* @@ -525,29 +527,29 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) * expose to gdb. */ gdb_register_coprocessor(cs, vfp_gdb_get_sysreg, vfp_gdb_set_sysreg, - 2, "arm-vfp-sysregs.xml", 0); + gdb_find_static_feature("arm-vfp-sysregs.xml"), + 0); } } if (cpu_isar_feature(aa32_mve, cpu) && tcg_enabled()) { gdb_register_coprocessor(cs, mve_gdb_get_reg, mve_gdb_set_reg, - 1, "arm-m-profile-mve.xml", 0); + gdb_find_static_feature("arm-m-profile-mve.xml"), + 0); } gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, - arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs), - "system-registers.xml", 0); + arm_gen_dynamic_sysreg_feature(cs, cs->gdb_num_regs), + 0); #ifdef CONFIG_TCG if (arm_feature(env, ARM_FEATURE_M) && tcg_enabled()) { gdb_register_coprocessor(cs, arm_gdb_get_m_systemreg, arm_gdb_set_m_systemreg, - arm_gen_dynamic_m_systemreg_xml(cs, cs->gdb_num_regs), - "arm-m-system.xml", 0); + arm_gen_dynamic_m_systemreg_feature(cs, cs->gdb_num_regs), 0); #ifndef CONFIG_USER_ONLY if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { gdb_register_coprocessor(cs, arm_gdb_get_m_secextreg, arm_gdb_set_m_secextreg, - arm_gen_dynamic_m_secextreg_xml(cs, cs->gdb_num_regs), - "arm-m-secext.xml", 0); + arm_gen_dynamic_m_secextreg_feature(cs, cs->gdb_num_regs), 0); } #endif } diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c index d7b79a6589b..caa31ff3fa1 100644 --- a/target/arm/gdbstub64.c +++ b/target/arm/gdbstub64.c @@ -72,8 +72,11 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return 0; } -int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg) +int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + switch (reg) { case 0 ... 31: { @@ -92,8 +95,11 @@ int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg) } } -int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg) +int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + switch (reg) { case 0 ... 31: /* 128 bit FP register */ @@ -116,9 +122,10 @@ int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg) } } -int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg) +int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg) { - ARMCPU *cpu = env_archcpu(env); + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; switch (reg) { /* The first 32 registers are the zregs */ @@ -164,9 +171,10 @@ int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg) return 0; } -int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg) +int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg) { - ARMCPU *cpu = env_archcpu(env); + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; /* The first 32 registers are the zregs */ switch (reg) { @@ -210,8 +218,11 @@ int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg) return 0; } -int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg) +int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + switch (reg) { case 0: /* pauth_dmask */ case 1: /* pauth_cmask */ @@ -241,13 +252,13 @@ int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg) } } -int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg) +int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg) { /* All pseudo registers are read-only. */ return 0; } -static void output_vector_union_type(GString *s, int reg_width, +static void output_vector_union_type(GDBFeatureBuilder *builder, int reg_width, const char *name) { struct TypeSize { @@ -282,10 +293,10 @@ static void output_vector_union_type(GString *s, int reg_width, /* First define types and totals in a whole VL */ for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { - g_string_append_printf(s, - "", - name, vec_lanes[i].sz, vec_lanes[i].suffix, - vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size); + gdb_feature_builder_append_tag( + builder, "", + name, vec_lanes[i].sz, vec_lanes[i].suffix, + vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size); } /* @@ -296,86 +307,77 @@ static void output_vector_union_type(GString *s, int reg_width, for (i = 0; i < ARRAY_SIZE(suf); i++) { int bits = 8 << i; - g_string_append_printf(s, "", name, suf[i]); + gdb_feature_builder_append_tag(builder, "", + name, suf[i]); for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) { if (vec_lanes[j].size == bits) { - g_string_append_printf(s, "", - vec_lanes[j].suffix, name, - vec_lanes[j].sz, vec_lanes[j].suffix); + gdb_feature_builder_append_tag( + builder, "", + vec_lanes[j].suffix, name, + vec_lanes[j].sz, vec_lanes[j].suffix); } } - g_string_append(s, ""); + gdb_feature_builder_append_tag(builder, ""); } /* And now the final union of unions */ - g_string_append_printf(s, "", name); + gdb_feature_builder_append_tag(builder, "", name); for (i = ARRAY_SIZE(suf) - 1; i >= 0; i--) { - g_string_append_printf(s, "", - suf[i], name, suf[i]); + gdb_feature_builder_append_tag(builder, + "", + suf[i], name, suf[i]); } - g_string_append(s, ""); + gdb_feature_builder_append_tag(builder, ""); } -int arm_gen_dynamic_svereg_xml(CPUState *cs, int orig_base_reg) +GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cs, int base_reg) { ARMCPU *cpu = ARM_CPU(cs); - GString *s = g_string_new(NULL); - DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml; int reg_width = cpu->sve_max_vq * 128; int pred_width = cpu->sve_max_vq * 16; - int base_reg = orig_base_reg; + GDBFeatureBuilder builder; + char *name; + int reg = 0; int i; - g_string_printf(s, ""); - g_string_append_printf(s, ""); - g_string_append_printf(s, ""); + gdb_feature_builder_init(&builder, &cpu->dyn_svereg_feature.desc, + "org.gnu.gdb.aarch64.sve", "sve-registers.xml", + base_reg); /* Create the vector union type. */ - output_vector_union_type(s, reg_width, "svev"); + output_vector_union_type(&builder, reg_width, "svev"); /* Create the predicate vector type. */ - g_string_append_printf(s, - "", - pred_width / 8); + gdb_feature_builder_append_tag( + &builder, "", + pred_width / 8); /* Define the vector registers. */ for (i = 0; i < 32; i++) { - g_string_append_printf(s, - "", - i, reg_width, base_reg++); + name = g_strdup_printf("z%d", i); + gdb_feature_builder_append_reg(&builder, name, reg_width, reg++, + "svev", NULL); } /* fpscr & status registers */ - g_string_append_printf(s, "", base_reg++); - g_string_append_printf(s, "", base_reg++); + gdb_feature_builder_append_reg(&builder, "fpsr", 32, reg++, + "int", "float"); + gdb_feature_builder_append_reg(&builder, "fpcr", 32, reg++, + "int", "float"); /* Define the predicate registers. */ for (i = 0; i < 16; i++) { - g_string_append_printf(s, - "", - i, pred_width, base_reg++); + name = g_strdup_printf("p%d", i); + gdb_feature_builder_append_reg(&builder, name, pred_width, reg++, + "svep", NULL); } - g_string_append_printf(s, - "", - pred_width, base_reg++); + gdb_feature_builder_append_reg(&builder, "ffr", pred_width, reg++, + "svep", "vector"); /* Define the vector length pseudo-register. */ - g_string_append_printf(s, - "", - base_reg++); + gdb_feature_builder_append_reg(&builder, "vg", 64, reg++, "int", NULL); - g_string_append_printf(s, ""); + gdb_feature_builder_end(&builder); - info->desc = g_string_free(s, false); - info->num = base_reg - orig_base_reg; - return info->num; + return &cpu->dyn_svereg_feature.desc; } diff --git a/target/arm/gtimer.h b/target/arm/gtimer.h new file mode 100644 index 00000000000..b992941bef1 --- /dev/null +++ b/target/arm/gtimer.h @@ -0,0 +1,21 @@ +/* + * ARM generic timer definitions for Arm A-class CPU + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef TARGET_ARM_GTIMER_H +#define TARGET_ARM_GTIMER_H + +enum { + GTIMER_PHYS = 0, + GTIMER_VIRT = 1, + GTIMER_HYP = 2, + GTIMER_SEC = 3, + GTIMER_HYPVIRT = 4, +#define NUM_GTIMERS 5 +}; + +#endif diff --git a/target/arm/helper.c b/target/arm/helper.c index df1646de3ae..a620481d7cf 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -30,6 +30,7 @@ #include "semihosting/common-semi.h" #endif #include "cpregs.h" +#include "target/arm/gtimer.h" #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ @@ -263,6 +264,18 @@ void init_cpreg_list(ARMCPU *cpu) g_list_free(keys); } +static bool arm_pan_enabled(CPUARMState *env) +{ + if (is_a64(env)) { + if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) { + return false; + } + return env->pstate & PSTATE_PAN; + } else { + return env->uncached_cpsr & CPSR_PAN; + } +} + /* * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0. */ @@ -635,6 +648,7 @@ static const ARMCPRegInfo cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_CONTEXTIDR_EL1, + .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1, .secure = ARM_CP_SECSTATE_NS, .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, @@ -871,6 +885,7 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, .fgt = FGT_CPACR_EL1, + .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, }; @@ -934,16 +949,19 @@ static int64_t cycles_ns_per(uint64_t cycles) static bool instructions_supported(CPUARMState *env) { - return icount_enabled() == 1; /* Precise instruction counting */ + /* Precise instruction counting */ + return icount_enabled() == ICOUNT_PRECISE; } static uint64_t instructions_get_count(CPUARMState *env) { + assert(icount_enabled() == ICOUNT_PRECISE); return (uint64_t)icount_get_raw(); } static int64_t instructions_ns_per(uint64_t icount) { + assert(icount_enabled() == ICOUNT_PRECISE); return icount_to_ns((int64_t)icount); } #endif @@ -1483,6 +1501,22 @@ static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, pmu_op_finish(env); } +static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint64_t pmcr = env->cp15.c9_pmcr; + + /* + * If EL2 is implemented and enabled for the current security state, reads + * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN. + */ + if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) { + pmcr &= ~PMCRN_MASK; + pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT; + } + + return pmcr; +} + static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -1889,6 +1923,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) if (cpu_isar_feature(aa64_rme, cpu)) { valid_mask |= SCR_NSE | SCR_GPF; } + if (cpu_isar_feature(aa64_ecv, cpu)) { + valid_mask |= SCR_ECVEN; + } } else { valid_mask &= ~(SCR_RW | SCR_ST); if (cpu_isar_feature(aa32_ras, cpu)) { @@ -2230,11 +2267,13 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_AFSR0_EL1, + .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_AFSR1_EL1, + .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1, .type = ARM_CP_CONST, .resetvalue = 0 }, /* * MAIR can just read-as-written because we don't implement caches @@ -2244,6 +2283,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_MAIR_EL1, + .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), .resetvalue = 0 }, { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, @@ -2493,6 +2533,11 @@ static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, : !extract32(env->cp15.cnthctl_el2, 0, 1))) { return CP_ACCESS_TRAP_EL2; } + if (has_el2 && timeridx == GTIMER_VIRT) { + if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) { + return CP_ACCESS_TRAP_EL2; + } + } break; } return CP_ACCESS_OK; @@ -2536,6 +2581,11 @@ static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, } } } + if (has_el2 && timeridx == GTIMER_VIRT) { + if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) { + return CP_ACCESS_TRAP_EL2; + } + } break; } return CP_ACCESS_OK; @@ -2615,8 +2665,8 @@ static void gt_update_irq(ARMCPU *cpu, int timeridx) * It is RES0 in Secure and NonSecure state. */ if ((ss == ARMSS_Root || ss == ARMSS_Realm) && - ((timeridx == GTIMER_VIRT && (cnthctl & CNTHCTL_CNTVMASK)) || - (timeridx == GTIMER_PHYS && (cnthctl & CNTHCTL_CNTPMASK)))) { + ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) || + (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) { irqstate = 0; } @@ -2635,6 +2685,25 @@ void gt_rme_post_el_change(ARMCPU *cpu, void *ignored) gt_update_irq(cpu, GTIMER_PHYS); } +static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env) +{ + if ((env->cp15.scr_el3 & SCR_ECVEN) && + FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) && + arm_is_el2_enabled(env) && + (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + return env->cp15.cntpoff_el2; + } + return 0; +} + +static uint64_t gt_phys_cnt_offset(CPUARMState *env) +{ + if (arm_current_el(env) >= 2) { + return 0; + } + return gt_phys_raw_cnt_offset(env); +} + static void gt_recalc_timer(ARMCPU *cpu, int timeridx) { ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; @@ -2645,7 +2714,7 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx) * reset timer to when ISTATUS next has to change */ uint64_t offset = timeridx == GTIMER_VIRT ? - cpu->env.cp15.cntvoff_el2 : 0; + cpu->env.cp15.cntvoff_el2 : gt_phys_raw_cnt_offset(&cpu->env); uint64_t count = gt_get_countervalue(&cpu->env); /* Note that this must be unsigned 64 bit arithmetic: */ int istatus = count - offset >= gt->cval; @@ -2708,7 +2777,7 @@ static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) { - return gt_get_countervalue(env); + return gt_get_countervalue(env) - gt_phys_cnt_offset(env); } static uint64_t gt_virt_cnt_offset(CPUARMState *env) @@ -2757,6 +2826,9 @@ static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, case GTIMER_HYPVIRT: offset = gt_virt_cnt_offset(env); break; + case GTIMER_PHYS: + offset = gt_phys_cnt_offset(env); + break; } return (uint32_t)(env->cp15.c14_timer[timeridx].cval - @@ -2774,6 +2846,9 @@ static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, case GTIMER_HYPVIRT: offset = gt_virt_cnt_offset(env); break; + case GTIMER_PHYS: + offset = gt_phys_cnt_offset(env); + break; } trace_arm_gt_tval_write(timeridx, value); @@ -2931,12 +3006,40 @@ static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri, { ARMCPU *cpu = env_archcpu(env); uint32_t oldval = env->cp15.cnthctl_el2; + uint32_t valid_mask = + R_CNTHCTL_EL0PCTEN_E2H1_MASK | + R_CNTHCTL_EL0VCTEN_E2H1_MASK | + R_CNTHCTL_EVNTEN_MASK | + R_CNTHCTL_EVNTDIR_MASK | + R_CNTHCTL_EVNTI_MASK | + R_CNTHCTL_EL0VTEN_MASK | + R_CNTHCTL_EL0PTEN_MASK | + R_CNTHCTL_EL1PCTEN_E2H1_MASK | + R_CNTHCTL_EL1PTEN_MASK; + + if (cpu_isar_feature(aa64_rme, cpu)) { + valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK; + } + if (cpu_isar_feature(aa64_ecv_traps, cpu)) { + valid_mask |= + R_CNTHCTL_EL1TVT_MASK | + R_CNTHCTL_EL1TVCT_MASK | + R_CNTHCTL_EL1NVPCT_MASK | + R_CNTHCTL_EL1NVVCT_MASK | + R_CNTHCTL_EVNTIS_MASK; + } + if (cpu_isar_feature(aa64_ecv, cpu)) { + valid_mask |= R_CNTHCTL_ECV_MASK; + } + + /* Clear RES0 bits */ + value &= valid_mask; raw_write(env, ri, value); - if ((oldval ^ value) & CNTHCTL_CNTVMASK) { + if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) { gt_update_irq(cpu, GTIMER_VIRT); - } else if ((oldval ^ value) & CNTHCTL_CNTPMASK) { + } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) { gt_update_irq(cpu, GTIMER_PHYS); } } @@ -3166,6 +3269,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, .type = ARM_CP_IO, .access = PL0_RW, .accessfn = gt_ptimer_access, + .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), .resetvalue = 0, .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, @@ -3183,6 +3287,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, .type = ARM_CP_IO, .access = PL0_RW, .accessfn = gt_vtimer_access, + .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), .resetvalue = 0, .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, @@ -3262,6 +3367,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, .access = PL0_RW, .type = ARM_CP_IO, + .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), .resetvalue = 0, .accessfn = gt_ptimer_access, .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, @@ -3279,6 +3385,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, .access = PL0_RW, .type = ARM_CP_IO, + .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), .resetvalue = 0, .accessfn = gt_vtimer_access, .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, @@ -3313,15 +3420,63 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { }, }; -static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +/* + * FEAT_ECV adds extra views of CNTVCT_EL0 and CNTPCT_EL0 which + * are "self-synchronizing". For QEMU all sysregs are self-synchronizing, + * so our implementations here are identical to the normal registers. + */ +static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = { + { .name = "CNTVCTSS", .cp = 15, .crm = 14, .opc1 = 9, + .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = gt_vct_access, + .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, + }, + { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6, + .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, + }, + { .name = "CNTPCTSS", .cp = 15, .crm = 14, .opc1 = 8, + .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = gt_pct_access, + .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, + }, + { .name = "CNTPCTSS_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 5, + .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = gt_pct_access, .readfn = gt_cnt_read, + }, +}; + +static CPAccessResult gt_cntpoff_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) { - if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { - return CP_ACCESS_TRAP; + if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) && + !(env->cp15.scr_el3 & SCR_ECVEN)) { + return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; } +static void gt_cntpoff_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + trace_arm_gt_cntpoff_write(value); + raw_write(env, ri, value); + gt_recalc_timer(cpu, GTIMER_PHYS); +} + +static const ARMCPRegInfo gen_timer_cntpoff_reginfo = { + .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6, + .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, + .accessfn = gt_cntpoff_access, .writefn = gt_cntpoff_write, + .nv2_redirect_offset = 0x1a8, + .fieldoffset = offsetof(CPUARMState, cp15.cntpoff_el2), +}; #else /* @@ -3355,6 +3510,18 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { }, }; +/* + * CNTVCTSS_EL0 has the same trap conditions as CNTVCT_EL0, so it also + * is exposed to userspace by Linux. + */ +static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = { + { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6, + .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .readfn = gt_virt_cnt_read, + }, +}; + #endif static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) @@ -3601,7 +3768,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ /* fall through */ case 1: - if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { + if (ri->crm == 9 && arm_pan_enabled(env)) { mmu_idx = ARMMMUIdx_Stage1_E1_PAN; } else { mmu_idx = ARMMMUIdx_Stage1_E1; @@ -3695,6 +3862,15 @@ static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, return at_e012_access(env, ri, isread); } +static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) { + return CP_ACCESS_TRAP_EL2; + } + return at_e012_access(env, ri, isread); +} + static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -3703,12 +3879,14 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, ARMMMUIdx mmu_idx; uint64_t hcr_el2 = arm_hcr_el2_eff(env); bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE); + bool for_el3 = false; + ARMSecuritySpace ss; switch (ri->opc2 & 6) { case 0: switch (ri->opc1) { case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ - if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { + if (ri->crm == 9 && arm_pan_enabled(env)) { mmu_idx = regime_e20 ? ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN; } else { @@ -3720,6 +3898,7 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, break; case 6: /* AT S1E3R, AT S1E3W */ mmu_idx = ARMMMUIdx_E3; + for_el3 = true; break; default: g_assert_not_reached(); @@ -3738,8 +3917,8 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, g_assert_not_reached(); } - env->cp15.par_el[1] = do_ats_write(env, value, access_type, - mmu_idx, arm_security_space(env)); + ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env); + env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss); #else /* Handled by hardware accelerator. */ g_assert_not_reached(); @@ -4244,6 +4423,7 @@ static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_FAR_EL1, + .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), .resetvalue = 0, }, }; @@ -4253,11 +4433,13 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_ESR_EL1, + .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_TTBR0_EL1, + .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1, .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), offsetof(CPUARMState, cp15.ttbr0_ns) } }, @@ -4265,6 +4447,7 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_TTBR1_EL1, + .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1, .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), offsetof(CPUARMState, cp15.ttbr1_ns) } }, @@ -4272,6 +4455,7 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_TCR_EL1, + .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1, .writefn = vmsa_tcr_el12_write, .raw_writefn = raw_write, .resetvalue = 0, @@ -4511,6 +4695,7 @@ static const ARMCPRegInfo lpae_cp_reginfo[] = { .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_AMAIR_EL1, + .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1, .type = ARM_CP_CONST, .resetvalue = 0 }, /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, @@ -5333,6 +5518,19 @@ static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, } } +static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1) { + uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2); + + if (hcr_nv == (HCR_NV | HCR_NV1)) { + return CP_ACCESS_TRAP_EL2; + } + } + return CP_ACCESS_OK; +} + #ifdef CONFIG_USER_ONLY /* * `IC IVAU` is handled to improve compatibility with JITs that dual-map their @@ -5560,22 +5758,22 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E1R, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E1W, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E0R, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E0W, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, @@ -5681,12 +5879,14 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, - .access = PL1_RW, + .access = PL1_RW, .accessfn = access_nv1, + .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, - .access = PL1_RW, + .access = PL1_RW, .accessfn = access_nv1, + .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, /* * We rely on the access checks not allowing the guest to write to the @@ -5700,26 +5900,13 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, + .nv2_redirect_offset = 0x240, .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP, .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, { .name = "SPSel", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, - { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, - .access = PL2_RW, - .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP, - .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) }, - { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, - .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, - .writefn = dacr_write, .raw_writefn = raw_write, - .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, - { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, - .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, - .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, @@ -5754,6 +5941,24 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, }; +/* These are present only when EL1 supports AArch32 */ +static const ARMCPRegInfo v8_aa32_el1_reginfo[] = { + { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, + .access = PL2_RW, + .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP, + .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) }, + { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, + .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, + .writefn = dacr_write, .raw_writefn = raw_write, + .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, + { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, + .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, + .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, +}; + static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) { ARMCPU *cpu = env_archcpu(env); @@ -5803,6 +6008,12 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) if (cpu_isar_feature(aa64_rme, cpu)) { valid_mask |= HCR_GPF; } + if (cpu_isar_feature(aa64_nv, cpu)) { + valid_mask |= HCR_NV | HCR_NV1 | HCR_AT; + } + if (cpu_isar_feature(aa64_nv2, cpu)) { + valid_mask |= HCR_NV2; + } } if (cpu_isar_feature(any_evt, cpu)) { @@ -5821,9 +6032,10 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) * HCR_DC disables stage1 and enables stage2 translation * HCR_DCT enables tagging on (disabled) stage1 translation * HCR_FWB changes the interpretation of stage2 descriptor bits + * HCR_NV and HCR_NV1 affect interpretation of descriptor bits */ if ((env->cp15.hcr_el2 ^ value) & - (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) { + (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) { tlb_flush(CPU(cpu)); } env->cp15.hcr_el2 = value; @@ -5832,14 +6044,14 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) * Updates to VI and VF require us to update the status of * virtual interrupts, which are the logical OR of these bits * and the state of the input lines from the GIC. (This requires - * that we have the iothread lock, which is done by marking the + * that we have the BQL, which is done by marking the * reginfo structs as ARM_CP_IO.) * Note that if a write to HCR pends a VIRQ or VFIQ it is never * possible for it to be taken immediately, because VIRQ and * VFIQ are masked unless running at EL0 or EL1, and HCR * can only be written at EL2. */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); arm_cpu_update_virq(cpu); arm_cpu_update_vfiq(cpu); arm_cpu_update_vserr(cpu); @@ -5989,7 +6201,7 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { - if (arm_current_el(env) < 3 + if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) { return CP_ACCESS_TRAP_EL3; @@ -6001,6 +6213,7 @@ static const ARMCPRegInfo hcrx_el2_reginfo = { .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2, .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen, + .nv2_redirect_offset = 0xa0, .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2), }; @@ -6067,6 +6280,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .type = ARM_CP_IO, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), + .nv2_redirect_offset = 0x78, .writefn = hcr_write, .raw_writefn = raw_write }, { .name = "HCR", .state = ARM_CP_STATE_AA32, .type = ARM_CP_ALIAS | ARM_CP_IO, @@ -6077,14 +6291,16 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, - .type = ARM_CP_ALIAS, + .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, + .type = ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, + .type = ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, { .name = "HIFAR", .state = ARM_CP_STATE_AA32, @@ -6093,7 +6309,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .access = PL2_RW, .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, - .type = ARM_CP_ALIAS, + .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, @@ -6149,6 +6365,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, .access = PL2_RW, + .nv2_redirect_offset = 0x40, /* no .writefn needed as this can't cause an ASID change */ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, { .name = "VTTBR", .state = ARM_CP_STATE_AA32, @@ -6160,6 +6377,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write, + .nv2_redirect_offset = 0x20, .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, @@ -6168,6 +6386,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, .access = PL2_RW, .resetvalue = 0, + .nv2_redirect_offset = 0x90, .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, @@ -6263,6 +6482,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, .writefn = gt_cntvoff_write, + .nv2_redirect_offset = 0x60, .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, @@ -6301,6 +6521,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, .access = PL2_RW, + .nv2_redirect_offset = 0x80, .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, }; @@ -6326,10 +6547,12 @@ static const ARMCPRegInfo el2_sec_cp_reginfo[] = { { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0, .access = PL2_RW, .accessfn = sel2_access, + .nv2_redirect_offset = 0x30, .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) }, { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2, .access = PL2_RW, .accessfn = sel2_access, + .nv2_redirect_offset = 0x48, .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) }, }; @@ -6456,6 +6679,44 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { }; #ifndef CONFIG_USER_ONLY + +static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1) { + /* This must be a FEAT_NV access */ + return CP_ACCESS_OK; + } + if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { + return CP_ACCESS_TRAP_UNCATEGORIZED; + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_el1nvpct(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1) { + /* This must be a FEAT_NV access with NVx == 101 */ + if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) { + return CP_ACCESS_TRAP_EL2; + } + } + return e2h_access(env, ri, isread); +} + +static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1) { + /* This must be a FEAT_NV access with NVx == 101 */ + if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) { + return CP_ACCESS_TRAP_EL2; + } + } + return e2h_access(env, ri, isread); +} + /* Test if system register redirection is to occur in the current state. */ static bool redirect_for_e2h(CPUARMState *env) { @@ -6497,6 +6758,42 @@ static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, writefn(env, ri, value); } +static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Pass the EL1 register accessor its ri, not the EL12 alias ri */ + return ri->orig_readfn(env, ri->opaque); +} + +static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Pass the EL1 register accessor its ri, not the EL12 alias ri */ + return ri->orig_writefn(env, ri->opaque, value); +} + +static CPAccessResult el2_e2h_e12_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1) { + /* + * This must be a FEAT_NV access (will either trap or redirect + * to memory). None of the registers with _EL12 aliases want to + * apply their trap controls for this kind of access, so don't + * call the orig_accessfn or do the "UNDEF when E2H is 0" check. + */ + return CP_ACCESS_OK; + } + /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */ + if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { + return CP_ACCESS_TRAP_UNCATEGORIZED; + } + if (ri->orig_accessfn) { + return ri->orig_accessfn(env, ri->opaque, isread); + } + return CP_ACCESS_OK; +} + static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) { struct E2HAlias { @@ -6596,6 +6893,41 @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) new_reg->type |= ARM_CP_ALIAS; /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ new_reg->access &= PL2_RW | PL3_RW; + /* The new_reg op fields are as per new_key, not the target reg */ + new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK) + >> CP_REG_ARM64_SYSREG_CRN_SHIFT; + new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK) + >> CP_REG_ARM64_SYSREG_CRM_SHIFT; + new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK) + >> CP_REG_ARM64_SYSREG_OP0_SHIFT; + new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK) + >> CP_REG_ARM64_SYSREG_OP1_SHIFT; + new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK) + >> CP_REG_ARM64_SYSREG_OP2_SHIFT; + new_reg->opaque = src_reg; + new_reg->orig_readfn = src_reg->readfn ?: raw_read; + new_reg->orig_writefn = src_reg->writefn ?: raw_write; + new_reg->orig_accessfn = src_reg->accessfn; + if (!new_reg->raw_readfn) { + new_reg->raw_readfn = raw_read; + } + if (!new_reg->raw_writefn) { + new_reg->raw_writefn = raw_write; + } + new_reg->readfn = el2_e2h_e12_read; + new_reg->writefn = el2_e2h_e12_write; + new_reg->accessfn = el2_e2h_e12_access; + + /* + * If the _EL1 register is redirected to memory by FEAT_NV2, + * then it shares the offset with the _EL12 register, + * and which one is redirected depends on HCR_EL2.NV1. + */ + if (new_reg->nv2_redirect_offset) { + assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1); + new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1; + new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1; + } ok = g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)a->new_key, new_reg); @@ -6729,9 +7061,11 @@ static const ARMCPRegInfo minimal_ras_reginfo[] = { .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1, + .nv2_redirect_offset = 0x500, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) }, { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3, + .nv2_redirect_offset = 0x508, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) }, }; @@ -6903,6 +7237,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, static const ARMCPRegInfo zcr_reginfo[] = { { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, + .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1, .access = PL1_RW, .type = ARM_CP_SVE, .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), .writefn = zcr_write, .raw_writefn = raw_write }, @@ -6939,10 +7274,21 @@ static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } -static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */ + if (arm_current_el(env) == 2 + && arm_feature(env, ARM_FEATURE_EL3) + && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - /* TODO: FEAT_FGT for SMPRI_EL1 but not SMPRIMAP_EL2 */ if (arm_current_el(env) < 3 && arm_feature(env, ARM_FEATURE_EL3) && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { @@ -7033,6 +7379,7 @@ static const ARMCPRegInfo sme_reginfo[] = { .writefn = svcr_write, .raw_writefn = raw_write }, { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6, + .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1, .access = PL1_RW, .type = ARM_CP_SME, .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]), .writefn = smcr_write, .raw_writefn = raw_write }, @@ -7061,12 +7408,13 @@ static const ARMCPRegInfo sme_reginfo[] = { */ { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4, - .access = PL1_RW, .accessfn = access_esm, + .access = PL1_RW, .accessfn = access_smpri, .fgt = FGT_NSMPRI_EL1, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5, - .access = PL2_RW, .accessfn = access_esm, + .nv2_redirect_offset = 0x1f8, + .access = PL2_RW, .accessfn = access_smprimap, .type = ARM_CP_CONST, .resetvalue = 0 }, }; @@ -7162,8 +7510,9 @@ static void define_pmu_regs(ARMCPU *cpu) .fgt = FGT_PMCR_EL0, .type = ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), - .accessfn = pmreg_access, .writefn = pmcr_write, - .raw_writefn = raw_write, + .accessfn = pmreg_access, + .readfn = pmcr_read, .raw_readfn = raw_read, + .writefn = pmcr_write, .raw_writefn = raw_write, }; ARMCPRegInfo pmcr64 = { .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, @@ -7173,6 +7522,7 @@ static void define_pmu_regs(ARMCPU *cpu) .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), .resetvalue = cpu->isar.reset_pmcr_el0, + .readfn = pmcr_read, .raw_readfn = raw_read, .writefn = pmcr_write, .raw_writefn = raw_write, }; @@ -7653,13 +8003,14 @@ static const ARMCPRegInfo rndr_reginfo[] = { static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, uint64_t value) { +#ifdef CONFIG_TCG ARMCPU *cpu = env_archcpu(env); /* CTR_EL0 System register -> DminLine, bits [19:16] */ uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); uint64_t vaddr_in = (uint64_t) value; uint64_t vaddr = vaddr_in & ~(dline_size - 1); void *haddr; - int mem_idx = cpu_mmu_index(env, false); + int mem_idx = arm_env_mmu_index(env); /* This won't be crossing page boundaries */ haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); @@ -7677,6 +8028,10 @@ static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, } #endif /*CONFIG_USER_ONLY*/ } +#else + /* Handled by hardware accelerator. */ + g_assert_not_reached(); +#endif /* CONFIG_TCG */ } static const ARMCPRegInfo dcpop_reg[] = { @@ -7709,7 +8064,46 @@ static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { int el = arm_current_el(env); + if (el < 2 && arm_is_el2_enabled(env)) { + uint64_t hcr = arm_hcr_el2_eff(env); + if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { + return CP_ACCESS_TRAP_EL2; + } + } + if (el < 3 && + arm_feature(env, ARM_FEATURE_EL3) && + !(env->cp15.scr_el3 & SCR_ATA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + CPAccessResult nv1 = access_nv1(env, ri, isread); + + if (nv1 != CP_ACCESS_OK) { + return nv1; + } + return access_mte(env, ri, isread); +} + +static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* + * TFSR_EL2: similar to generic access_mte(), but we need to + * account for FEAT_NV. At EL1 this must be a FEAT_NV access; + * if NV2 is enabled then we will redirect this to TFSR_EL1 + * after doing the HCR and SCR ATA traps; otherwise this will + * be a trap to EL2 and the HCR/SCR traps do not apply. + */ + int el = arm_current_el(env); + if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) { + return CP_ACCESS_OK; + } if (el < 2 && arm_is_el2_enabled(env)) { uint64_t hcr = arm_hcr_el2_eff(env); if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { @@ -7741,11 +8135,13 @@ static const ARMCPRegInfo mte_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) }, { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0, - .access = PL1_RW, .accessfn = access_mte, + .access = PL1_RW, .accessfn = access_tfsr_el1, + .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) }, { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0, - .access = PL2_RW, .accessfn = access_mte, + .access = PL2_RW, .accessfn = access_tfsr_el2, .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) }, { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0, @@ -7893,6 +8289,18 @@ static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +static CPAccessResult access_scxtnum_el1(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + CPAccessResult nv1 = access_nv1(env, ri, isread); + + if (nv1 != CP_ACCESS_OK) { + return nv1; + } + return access_scxtnum(env, ri, isread); +} + static const ARMCPRegInfo scxtnum_reginfo[] = { { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7, @@ -7901,8 +8309,9 @@ static const ARMCPRegInfo scxtnum_reginfo[] = { .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) }, { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7, - .access = PL1_RW, .accessfn = access_scxtnum, + .access = PL1_RW, .accessfn = access_scxtnum_el1, .fgt = FGT_SCXTNUM_EL1, + .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) }, { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7, @@ -7927,25 +8336,53 @@ static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri, static const ARMCPRegInfo fgt_reginfo[] = { { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, + .nv2_redirect_offset = 0x1b8, .access = PL2_RW, .accessfn = access_fgt, .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) }, { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5, + .nv2_redirect_offset = 0x1c0, .access = PL2_RW, .accessfn = access_fgt, .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) }, { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4, + .nv2_redirect_offset = 0x1d0, .access = PL2_RW, .accessfn = access_fgt, .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) }, { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5, + .nv2_redirect_offset = 0x1d8, .access = PL2_RW, .accessfn = access_fgt, .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) }, { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6, + .nv2_redirect_offset = 0x1c8, .access = PL2_RW, .accessfn = access_fgt, .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) }, }; + +static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee + * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything + * about the RESS bits at the top -- we choose the "generate an EL2 + * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let + * the ptw.c code detect the resulting invalid address). + */ + env->cp15.vncr_el2 = value & ~0xfffULL; +} + +static const ARMCPRegInfo nv2_reginfo[] = { + { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0, + .access = PL2_RW, + .writefn = vncr_write, + .nv2_redirect_offset = 0xb0, + .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) }, +}; + #endif /* TARGET_AARCH64 */ static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, @@ -8105,13 +8542,15 @@ static const ARMCPRegInfo vhe_reginfo[] = { { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_ALIAS, - .access = PL2_RW, .accessfn = e2h_access, + .access = PL2_RW, .accessfn = access_el1nvpct, + .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_ALIAS, - .access = PL2_RW, .accessfn = e2h_access, + .access = PL2_RW, .accessfn = access_el1nvvct, + .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, @@ -8128,13 +8567,15 @@ static const ARMCPRegInfo vhe_reginfo[] = { .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, .type = ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), - .access = PL2_RW, .accessfn = e2h_access, + .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1, + .access = PL2_RW, .accessfn = access_el1nvpct, .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, .type = ARM_CP_IO | ARM_CP_ALIAS, + .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), - .access = PL2_RW, .accessfn = e2h_access, + .access = PL2_RW, .accessfn = access_el1nvvct, .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, #endif }; @@ -8145,12 +8586,12 @@ static const ARMCPRegInfo ats1e1_reginfo[] = { .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E1RP, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E1WP, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, }; static const ARMCPRegInfo ats1cp_reginfo[] = { @@ -8625,6 +9066,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) R_ID_AA64ZFR0_AES_MASK | R_ID_AA64ZFR0_BITPERM_MASK | R_ID_AA64ZFR0_BFLOAT16_MASK | + R_ID_AA64ZFR0_B16B16_MASK | R_ID_AA64ZFR0_SHA3_MASK | R_ID_AA64ZFR0_SM4_MASK | R_ID_AA64ZFR0_I8MM_MASK | @@ -8724,6 +9166,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) } define_arm_cp_regs(cpu, v8_idregs); define_arm_cp_regs(cpu, v8_cp_reginfo); + if (cpu_isar_feature(aa64_aa32_el1, cpu)) { + define_arm_cp_regs(cpu, v8_aa32_el1_reginfo); + } for (i = 4; i < 16; i++) { /* @@ -8771,6 +9216,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, .access = PL2_RW, .resetvalue = cpu->midr, .type = ARM_CP_EL3_NO_EL2_C_NZ, + .nv2_redirect_offset = 0x88, .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, @@ -8782,6 +9228,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, .access = PL2_RW, .resetvalue = vmpidr_def, .type = ARM_CP_EL3_NO_EL2_C_NZ, + .nv2_redirect_offset = 0x50, .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, }; /* @@ -8914,6 +9361,14 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { define_arm_cp_regs(cpu, generic_timer_cp_reginfo); } + if (cpu_isar_feature(aa64_ecv_traps, cpu)) { + define_arm_cp_regs(cpu, gen_timer_ecv_cp_reginfo); + } +#ifndef CONFIG_USER_ONLY + if (cpu_isar_feature(aa64_ecv, cpu)) { + define_one_arm_cp_reg(cpu, &gen_timer_cntpoff_reginfo); + } +#endif if (arm_feature(env, ARM_FEATURE_VAPA)) { ARMCPRegInfo vapa_cp_reginfo[] = { { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, @@ -9211,6 +9666,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tacr, + .nv2_redirect_offset = 0x118, .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, @@ -9241,7 +9697,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) * AArch64 cores we might need to add a specific feature flag * to indicate cores with "flavour 2" CBAR. */ - if (arm_feature(env, ARM_FEATURE_AARCH64)) { + if (arm_feature(env, ARM_FEATURE_V8)) { /* 32 bit view is [31:18] 0...0 [43:32]. */ uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) | extract64(cpu->reset_cbar, 32, 12); @@ -9280,7 +9736,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "VBAR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .writefn = vbar_write, + .accessfn = access_nv1, .fgt = FGT_VBAR_EL1, + .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), offsetof(CPUARMState, cp15.vbar_ns) }, .resetvalue = 0 }, @@ -9295,6 +9753,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_SCTLR_EL1, + .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), offsetof(CPUARMState, cp15.sctlr_ns) }, .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, @@ -9425,6 +9884,10 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, rme_mte_reginfo); } } + + if (cpu_isar_feature(aa64_nv2, cpu)) { + define_arm_cp_regs(cpu, nv2_reginfo); + } #endif if (cpu_isar_feature(any_predinv, cpu)) { @@ -9446,52 +9909,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) #endif } -/* Sort alphabetically by type name, except for "any". */ -static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) -{ - ObjectClass *class_a = (ObjectClass *)a; - ObjectClass *class_b = (ObjectClass *)b; - const char *name_a, *name_b; - - name_a = object_class_get_name(class_a); - name_b = object_class_get_name(class_b); - if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { - return 1; - } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { - return -1; - } else { - return strcmp(name_a, name_b); - } -} - -static void arm_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CPUClass *cc = CPU_CLASS(oc); - const char *typename; - char *name; - - typename = object_class_get_name(oc); - name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); - if (cc->deprecation_note) { - qemu_printf(" %s (deprecated)\n", name); - } else { - qemu_printf(" %s\n", name); - } - g_free(name); -} - -void arm_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list(TYPE_ARM_CPU, false); - list = g_slist_sort(list, arm_cpu_list_compare); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, arm_cpu_list_entry, NULL); - g_slist_free(list); -} - /* * Private utility function for define_one_arm_cp_reg_with_opaque(): * add a single reginfo struct to the hash table. @@ -10143,61 +10560,6 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, } } -/* Sign/zero extend */ -uint32_t HELPER(sxtb16)(uint32_t x) -{ - uint32_t res; - res = (uint16_t)(int8_t)x; - res |= (uint32_t)(int8_t)(x >> 16) << 16; - return res; -} - -static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) -{ - /* - * Take a division-by-zero exception if necessary; otherwise return - * to get the usual non-trapping division behaviour (result of 0) - */ - if (arm_feature(env, ARM_FEATURE_M) - && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { - raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); - } -} - -uint32_t HELPER(uxtb16)(uint32_t x) -{ - uint32_t res; - res = (uint16_t)(uint8_t)x; - res |= (uint32_t)(uint8_t)(x >> 16) << 16; - return res; -} - -int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) -{ - if (den == 0) { - handle_possible_div0_trap(env, GETPC()); - return 0; - } - if (num == INT_MIN && den == -1) { - return INT_MIN; - } - return num / den; -} - -uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) -{ - if (den == 0) { - handle_possible_div0_trap(env, GETPC()); - return 0; - } - return num / den; -} - -uint32_t HELPER(rbit)(uint32_t x) -{ - return revbit32(x); -} - #ifdef CONFIG_USER_ONLY static void switch_mode(CPUARMState *env, int mode) @@ -11231,6 +11593,20 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) old_mode = pstate_read(env); aarch64_save_sp(env, arm_current_el(env)); env->elr_el[new_el] = env->pc; + + if (cur_el == 1 && new_el == 1) { + uint64_t hcr = arm_hcr_el2_eff(env); + if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV || + (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) { + /* + * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR + * by setting M[3:2] to 0b10. + * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN) + * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM) + */ + old_mode = deposit32(old_mode, 2, 2, 2); + } + } } else { old_mode = cpsr_read_for_spsr_elx(env); env->elr_el[new_el] = env->regs[15]; @@ -11241,6 +11617,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) } env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; + qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode); qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", env->elr_el[new_el]); @@ -11370,7 +11747,7 @@ void arm_cpu_do_interrupt(CPUState *cs) * BQL needs to be held for any modification of * cs->interrupt_request. */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); arm_call_pre_el_change_hook(cpu); @@ -12084,15 +12461,6 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) } #endif -static bool arm_pan_enabled(CPUARMState *env) -{ - if (is_a64(env)) { - return env->pstate & PSTATE_PAN; - } else { - return env->uncached_cpsr & CPSR_PAN; - } -} - ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) { ARMMMUIdx idx; diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index 757e13b0f90..65a56018041 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -28,6 +28,8 @@ #include "arm-powerctl.h" #include "target/arm/cpu.h" #include "target/arm/internals.h" +#include "target/arm/multiprocessing.h" +#include "target/arm/gtimer.h" #include "trace/trace-target_arm_hvf.h" #include "migration/vmstate.h" @@ -36,7 +38,7 @@ #define MDSCR_EL1_SS_SHIFT 0 #define MDSCR_EL1_MDE_SHIFT 15 -static uint16_t dbgbcr_regs[] = { +static const uint16_t dbgbcr_regs[] = { HV_SYS_REG_DBGBCR0_EL1, HV_SYS_REG_DBGBCR1_EL1, HV_SYS_REG_DBGBCR2_EL1, @@ -54,7 +56,8 @@ static uint16_t dbgbcr_regs[] = { HV_SYS_REG_DBGBCR14_EL1, HV_SYS_REG_DBGBCR15_EL1, }; -static uint16_t dbgbvr_regs[] = { + +static const uint16_t dbgbvr_regs[] = { HV_SYS_REG_DBGBVR0_EL1, HV_SYS_REG_DBGBVR1_EL1, HV_SYS_REG_DBGBVR2_EL1, @@ -72,7 +75,8 @@ static uint16_t dbgbvr_regs[] = { HV_SYS_REG_DBGBVR14_EL1, HV_SYS_REG_DBGBVR15_EL1, }; -static uint16_t dbgwcr_regs[] = { + +static const uint16_t dbgwcr_regs[] = { HV_SYS_REG_DBGWCR0_EL1, HV_SYS_REG_DBGWCR1_EL1, HV_SYS_REG_DBGWCR2_EL1, @@ -90,7 +94,8 @@ static uint16_t dbgwcr_regs[] = { HV_SYS_REG_DBGWCR14_EL1, HV_SYS_REG_DBGWCR15_EL1, }; -static uint16_t dbgwvr_regs[] = { + +static const uint16_t dbgwvr_regs[] = { HV_SYS_REG_DBGWVR0_EL1, HV_SYS_REG_DBGWVR1_EL1, HV_SYS_REG_DBGWVR2_EL1, @@ -1013,7 +1018,7 @@ static void hvf_raise_exception(CPUState *cpu, uint32_t excp, static void hvf_psci_cpu_off(ARMCPU *arm_cpu) { - int32_t ret = arm_set_cpu_off(arm_cpu->mp_affinity); + int32_t ret = arm_set_cpu_off(arm_cpu_mp_affinity(arm_cpu)); assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS); } @@ -1042,7 +1047,7 @@ static bool hvf_handle_psci_call(CPUState *cpu) int32_t ret = 0; trace_hvf_psci_call(param[0], param[1], param[2], param[3], - arm_cpu->mp_affinity); + arm_cpu_mp_affinity(arm_cpu)); switch (param[0]) { case QEMU_PSCI_0_2_FN_PSCI_VERSION: @@ -1718,9 +1723,9 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) * sleeping. */ qatomic_set_mb(&cpu->thread_kicked, false); - qemu_mutex_unlock_iothread(); + bql_unlock(); pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask); - qemu_mutex_lock_iothread(); + bql_lock(); } static void hvf_wfi(CPUState *cpu) @@ -1821,7 +1826,7 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); assert_hvf_ok(hv_vcpu_run(cpu->accel->fd)); /* handle VMEXIT */ @@ -1830,7 +1835,7 @@ int hvf_vcpu_exec(CPUState *cpu) uint32_t ec = syn_get_ec(syndrome); ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); switch (exit_reason) { case HV_EXIT_REASON_EXCEPTION: /* This is the main one, handle below. */ @@ -2010,7 +2015,7 @@ static const VMStateDescription vmstate_hvf_vtimer = { .name = "hvf-vtimer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(vtimer_val, HVFVTimer), VMSTATE_END_OF_LIST() }, @@ -2241,7 +2246,7 @@ void hvf_arch_update_guest_debug(CPUState *cpu) hvf_arch_set_traps(); } -inline bool hvf_arch_supports_guest_debug(void) +bool hvf_arch_supports_guest_debug(void) { return true; } diff --git a/target/arm/internals.h b/target/arm/internals.h index 8342f4651f6..dd3da211a3f 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -40,6 +40,11 @@ #define BANK_HYP 6 #define BANK_MON 7 +static inline int arm_env_mmu_index(CPUARMState *env) +{ + return EX_TBFLAG_ANY(env->hflags, MMUIDX); +} + static inline bool excp_is_internal(int excp) { /* Return true if this exception number represents a QEMU-internal @@ -94,6 +99,157 @@ FIELD(DBGWCR, WT, 20, 1) FIELD(DBGWCR, MASK, 24, 5) FIELD(DBGWCR, SSCE, 29, 1) +#define VTCR_NSW (1u << 29) +#define VTCR_NSA (1u << 30) +#define VSTCR_SW VTCR_NSW +#define VSTCR_SA VTCR_NSA + +/* Bit definitions for CPACR (AArch32 only) */ +FIELD(CPACR, CP10, 20, 2) +FIELD(CPACR, CP11, 22, 2) +FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ +FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ +FIELD(CPACR, ASEDIS, 31, 1) + +/* Bit definitions for CPACR_EL1 (AArch64 only) */ +FIELD(CPACR_EL1, ZEN, 16, 2) +FIELD(CPACR_EL1, FPEN, 20, 2) +FIELD(CPACR_EL1, SMEN, 24, 2) +FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ + +/* Bit definitions for HCPTR (AArch32 only) */ +FIELD(HCPTR, TCP10, 10, 1) +FIELD(HCPTR, TCP11, 11, 1) +FIELD(HCPTR, TASE, 15, 1) +FIELD(HCPTR, TTA, 20, 1) +FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ +FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ + +/* Bit definitions for CPTR_EL2 (AArch64 only) */ +FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ +FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ +FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ +FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ +FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ +FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ +FIELD(CPTR_EL2, TTA, 28, 1) +FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ +FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ + +/* Bit definitions for CPTR_EL3 (AArch64 only) */ +FIELD(CPTR_EL3, EZ, 8, 1) +FIELD(CPTR_EL3, TFP, 10, 1) +FIELD(CPTR_EL3, ESM, 12, 1) +FIELD(CPTR_EL3, TTA, 20, 1) +FIELD(CPTR_EL3, TAM, 30, 1) +FIELD(CPTR_EL3, TCPAC, 31, 1) + +#define MDCR_MTPME (1U << 28) +#define MDCR_TDCC (1U << 27) +#define MDCR_HLP (1U << 26) /* MDCR_EL2 */ +#define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ +#define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ +#define MDCR_EPMAD (1U << 21) +#define MDCR_EDAD (1U << 20) +#define MDCR_TTRF (1U << 19) +#define MDCR_STE (1U << 18) /* MDCR_EL3 */ +#define MDCR_SPME (1U << 17) /* MDCR_EL3 */ +#define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ +#define MDCR_SDD (1U << 16) +#define MDCR_SPD (3U << 14) +#define MDCR_TDRA (1U << 11) +#define MDCR_TDOSA (1U << 10) +#define MDCR_TDA (1U << 9) +#define MDCR_TDE (1U << 8) +#define MDCR_HPME (1U << 7) +#define MDCR_TPM (1U << 6) +#define MDCR_TPMCR (1U << 5) +#define MDCR_HPMN (0x1fU) + +/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ +#define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ + MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ + MDCR_STE | MDCR_SPME | MDCR_SPD) + +#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ +#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ +#define TTBCR_PD0 (1U << 4) +#define TTBCR_PD1 (1U << 5) +#define TTBCR_EPD0 (1U << 7) +#define TTBCR_IRGN0 (3U << 8) +#define TTBCR_ORGN0 (3U << 10) +#define TTBCR_SH0 (3U << 12) +#define TTBCR_T1SZ (3U << 16) +#define TTBCR_A1 (1U << 22) +#define TTBCR_EPD1 (1U << 23) +#define TTBCR_IRGN1 (3U << 24) +#define TTBCR_ORGN1 (3U << 26) +#define TTBCR_SH1 (1U << 28) +#define TTBCR_EAE (1U << 31) + +FIELD(VTCR, T0SZ, 0, 6) +FIELD(VTCR, SL0, 6, 2) +FIELD(VTCR, IRGN0, 8, 2) +FIELD(VTCR, ORGN0, 10, 2) +FIELD(VTCR, SH0, 12, 2) +FIELD(VTCR, TG0, 14, 2) +FIELD(VTCR, PS, 16, 3) +FIELD(VTCR, VS, 19, 1) +FIELD(VTCR, HA, 21, 1) +FIELD(VTCR, HD, 22, 1) +FIELD(VTCR, HWU59, 25, 1) +FIELD(VTCR, HWU60, 26, 1) +FIELD(VTCR, HWU61, 27, 1) +FIELD(VTCR, HWU62, 28, 1) +FIELD(VTCR, NSW, 29, 1) +FIELD(VTCR, NSA, 30, 1) +FIELD(VTCR, DS, 32, 1) +FIELD(VTCR, SL2, 33, 1) + +#define HCRX_ENAS0 (1ULL << 0) +#define HCRX_ENALS (1ULL << 1) +#define HCRX_ENASR (1ULL << 2) +#define HCRX_FNXS (1ULL << 3) +#define HCRX_FGTNXS (1ULL << 4) +#define HCRX_SMPME (1ULL << 5) +#define HCRX_TALLINT (1ULL << 6) +#define HCRX_VINMI (1ULL << 7) +#define HCRX_VFNMI (1ULL << 8) +#define HCRX_CMOW (1ULL << 9) +#define HCRX_MCE2 (1ULL << 10) +#define HCRX_MSCEN (1ULL << 11) + +#define HPFAR_NS (1ULL << 63) + +#define HSTR_TTEE (1 << 16) +#define HSTR_TJDBX (1 << 17) + +/* + * Depending on the value of HCR_EL2.E2H, bits 0 and 1 + * have different bit definitions, and EL1PCTEN might be + * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to + * disambiguate if necessary. + */ +FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) +FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) +FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) +FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) +FIELD(CNTHCTL, EVNTEN, 2, 1) +FIELD(CNTHCTL, EVNTDIR, 3, 1) +FIELD(CNTHCTL, EVNTI, 4, 4) +FIELD(CNTHCTL, EL0VTEN, 8, 1) +FIELD(CNTHCTL, EL0PTEN, 9, 1) +FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) +FIELD(CNTHCTL, EL1PTEN, 11, 1) +FIELD(CNTHCTL, ECV, 12, 1) +FIELD(CNTHCTL, EL1TVT, 13, 1) +FIELD(CNTHCTL, EL1TVCT, 14, 1) +FIELD(CNTHCTL, EL1NVPCT, 15, 1) +FIELD(CNTHCTL, EL1NVVCT, 16, 1) +FIELD(CNTHCTL, EVNTIS, 17, 1) +FIELD(CNTHCTL, CNTVMASK, 18, 1) +FIELD(CNTHCTL, CNTPMASK, 19, 1) + /* We use a few fake FSR values for internal purposes in M profile. * M profile cores don't have A/R format FSRs, but currently our * get_phys_addr() code assumes A/R profile and reports failures via @@ -940,7 +1096,7 @@ static inline const char *aarch32_mode_name(uint32_t psr) * * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. - * Must be called with the iothread lock held. + * Must be called with the BQL held. */ void arm_cpu_update_virq(ARMCPU *cpu); @@ -949,7 +1105,7 @@ void arm_cpu_update_virq(ARMCPU *cpu); * * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. - * Must be called with the iothread lock held. + * Must be called with the BQL held. */ void arm_cpu_update_vfiq(ARMCPU *cpu); @@ -1446,13 +1602,13 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env) } #ifdef TARGET_AARCH64 -int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg); -int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg); -int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg); -int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg); -int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg); -int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg); -int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg); +GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); +int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); +int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); +int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); +int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); +int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); +int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 7903e2ddde1..ab85d628a8b 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -2,6 +2,8 @@ * ARM implementation of KVM hooks * * Copyright Christoffer Dall 2009-2010 + * Copyright Mian-M. Hamayun 2013, Virtual Open Systems + * Copyright Alex Bennée 2014, Linaro * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -19,6 +21,7 @@ #include "qom/object.h" #include "qapi/error.h" #include "sysemu/sysemu.h" +#include "sysemu/runstate.h" #include "sysemu/kvm.h" #include "sysemu/kvm_int.h" #include "kvm_arm.h" @@ -28,10 +31,14 @@ #include "hw/pci/pci.h" #include "exec/memattrs.h" #include "exec/address-spaces.h" +#include "exec/gdbstub.h" #include "hw/boards.h" #include "hw/irq.h" #include "qapi/visitor.h" #include "qemu/log.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/ghes.h" +#include "target/arm/gtimer.h" const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_LAST_INFO @@ -41,28 +48,54 @@ static bool cap_has_mp_state; static bool cap_has_inject_serror_esr; static bool cap_has_inject_ext_dabt; +/** + * ARMHostCPUFeatures: information about the host CPU (identified + * by asking the host kernel) + */ +typedef struct ARMHostCPUFeatures { + ARMISARegisters isar; + uint64_t features; + uint32_t target; + const char *dtb_compatible; +} ARMHostCPUFeatures; + static ARMHostCPUFeatures arm_host_cpu_features; -int kvm_arm_vcpu_init(CPUState *cs) +/** + * kvm_arm_vcpu_init: + * @cpu: ARMCPU + * + * Initialize (or reinitialize) the VCPU by invoking the + * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature + * bitmask specified in the CPUState. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_arm_vcpu_init(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); struct kvm_vcpu_init init; init.target = cpu->kvm_target; memcpy(init.features, cpu->kvm_init_features, sizeof(init.features)); - return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init); + return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_INIT, &init); } -int kvm_arm_vcpu_finalize(CPUState *cs, int feature) -{ - return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature); -} - -void kvm_arm_init_serror_injection(CPUState *cs) +/** + * kvm_arm_vcpu_finalize: + * @cpu: ARMCPU + * @feature: feature to finalize + * + * Finalizes the configuration of the specified VCPU feature by + * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring + * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of + * KVM's API documentation. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature) { - cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state, - KVM_CAP_ARM_INJECT_SERROR_ESR); + return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature); } bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, @@ -167,6 +200,260 @@ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray) } } +static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) +{ + uint64_t ret; + struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; + int err; + + assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); + err = ioctl(fd, KVM_GET_ONE_REG, &idreg); + if (err < 0) { + return -1; + } + *pret = ret; + return 0; +} + +static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id) +{ + struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret }; + + assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); + return ioctl(fd, KVM_GET_ONE_REG, &idreg); +} + +static bool kvm_arm_pauth_supported(void) +{ + return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) && + kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); +} + +static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) +{ + /* Identify the feature bits corresponding to the host CPU, and + * fill out the ARMHostCPUClass fields accordingly. To do this + * we have to create a scratch VM, create a single CPU inside it, + * and then query that CPU for the relevant ID registers. + */ + int fdarray[3]; + bool sve_supported; + bool pmu_supported = false; + uint64_t features = 0; + int err; + + /* Old kernels may not know about the PREFERRED_TARGET ioctl: however + * we know these will only support creating one kind of guest CPU, + * which is its preferred CPU type. Fortunately these old kernels + * support only a very limited number of CPUs. + */ + static const uint32_t cpus_to_try[] = { + KVM_ARM_TARGET_AEM_V8, + KVM_ARM_TARGET_FOUNDATION_V8, + KVM_ARM_TARGET_CORTEX_A57, + QEMU_KVM_ARM_TARGET_NONE + }; + /* + * target = -1 informs kvm_arm_create_scratch_host_vcpu() + * to use the preferred target + */ + struct kvm_vcpu_init init = { .target = -1, }; + + /* + * Ask for SVE if supported, so that we can query ID_AA64ZFR0, + * which is otherwise RAZ. + */ + sve_supported = kvm_arm_sve_supported(); + if (sve_supported) { + init.features[0] |= 1 << KVM_ARM_VCPU_SVE; + } + + /* + * Ask for Pointer Authentication if supported, so that we get + * the unsanitized field values for AA64ISAR1_EL1. + */ + if (kvm_arm_pauth_supported()) { + init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | + 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); + } + + if (kvm_arm_pmu_supported()) { + init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; + pmu_supported = true; + } + + if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { + return false; + } + + ahcf->target = init.target; + ahcf->dtb_compatible = "arm,arm-v8"; + + err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, + ARM64_SYS_REG(3, 0, 0, 4, 0)); + if (unlikely(err < 0)) { + /* + * Before v4.15, the kernel only exposed a limited number of system + * registers, not including any of the interesting AArch64 ID regs. + * For the most part we could leave these fields as zero with minimal + * effect, since this does not affect the values seen by the guest. + * + * However, it could cause problems down the line for QEMU, + * so provide a minimal v8.0 default. + * + * ??? Could read MIDR and use knowledge from cpu64.c. + * ??? Could map a page of memory into our temp guest and + * run the tiniest of hand-crafted kernels to extract + * the values seen by the guest. + * ??? Either of these sounds like too much effort just + * to work around running a modern host kernel. + */ + ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ + err = 0; + } else { + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, + ARM64_SYS_REG(3, 0, 0, 4, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, + ARM64_SYS_REG(3, 0, 0, 4, 5)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, + ARM64_SYS_REG(3, 0, 0, 5, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, + ARM64_SYS_REG(3, 0, 0, 5, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, + ARM64_SYS_REG(3, 0, 0, 6, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, + ARM64_SYS_REG(3, 0, 0, 6, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, + ARM64_SYS_REG(3, 0, 0, 6, 2)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, + ARM64_SYS_REG(3, 0, 0, 7, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, + ARM64_SYS_REG(3, 0, 0, 7, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, + ARM64_SYS_REG(3, 0, 0, 7, 2)); + + /* + * Note that if AArch32 support is not present in the host, + * the AArch32 sysregs are present to be read, but will + * return UNKNOWN values. This is neither better nor worse + * than skipping the reads and leaving 0, as we must avoid + * considering the values in every case. + */ + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, + ARM64_SYS_REG(3, 0, 0, 1, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, + ARM64_SYS_REG(3, 0, 0, 1, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, + ARM64_SYS_REG(3, 0, 0, 1, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, + ARM64_SYS_REG(3, 0, 0, 1, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, + ARM64_SYS_REG(3, 0, 0, 1, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, + ARM64_SYS_REG(3, 0, 0, 1, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, + ARM64_SYS_REG(3, 0, 0, 1, 7)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, + ARM64_SYS_REG(3, 0, 0, 2, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, + ARM64_SYS_REG(3, 0, 0, 2, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, + ARM64_SYS_REG(3, 0, 0, 2, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, + ARM64_SYS_REG(3, 0, 0, 2, 3)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, + ARM64_SYS_REG(3, 0, 0, 2, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, + ARM64_SYS_REG(3, 0, 0, 2, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, + ARM64_SYS_REG(3, 0, 0, 2, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, + ARM64_SYS_REG(3, 0, 0, 2, 7)); + + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, + ARM64_SYS_REG(3, 0, 0, 3, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, + ARM64_SYS_REG(3, 0, 0, 3, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, + ARM64_SYS_REG(3, 0, 0, 3, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, + ARM64_SYS_REG(3, 0, 0, 3, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, + ARM64_SYS_REG(3, 0, 0, 3, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, + ARM64_SYS_REG(3, 0, 0, 3, 6)); + + /* + * DBGDIDR is a bit complicated because the kernel doesn't + * provide an accessor for it in 64-bit mode, which is what this + * scratch VM is in, and there's no architected "64-bit sysreg + * which reads the same as the 32-bit register" the way there is + * for other ID registers. Instead we synthesize a value from the + * AArch64 ID_AA64DFR0, the same way the kernel code in + * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. + * We only do this if the CPU supports AArch32 at EL1. + */ + if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { + int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); + int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); + int ctx_cmps = + FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); + int version = 6; /* ARMv8 debug architecture */ + bool has_el3 = + !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); + uint32_t dbgdidr = 0; + + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); + dbgdidr |= (1 << 15); /* RES1 bit */ + ahcf->isar.dbgdidr = dbgdidr; + } + + if (pmu_supported) { + /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, + ARM64_SYS_REG(3, 3, 9, 12, 0)); + } + + if (sve_supported) { + /* + * There is a range of kernels between kernel commit 73433762fcae + * and f81cb2c3ad41 which have a bug where the kernel doesn't + * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has + * enabled SVE support, which resulted in an error rather than RAZ. + * So only read the register if we set KVM_ARM_VCPU_SVE above. + */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, + ARM64_SYS_REG(3, 0, 0, 4, 4)); + } + } + + kvm_arm_destroy_scratch_host_vcpu(fdarray); + + if (err < 0) { + return false; + } + + /* + * We can assume any KVM supporting CPU is at least a v8 + * with VFPv4+Neon; this in turn implies most of the other + * feature bits. + */ + features |= 1ULL << ARM_FEATURE_V8; + features |= 1ULL << ARM_FEATURE_NEON; + features |= 1ULL << ARM_FEATURE_AARCH64; + features |= 1ULL << ARM_FEATURE_PMU; + features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; + + ahcf->features = features; + + return true; +} + void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) { CPUARMState *env = &cpu->env; @@ -210,10 +497,10 @@ static void kvm_steal_time_set(Object *obj, bool value, Error **errp) } /* KVM VCPU properties should be prefixed with "kvm-". */ -void kvm_arm_add_vcpu_properties(Object *obj) +void kvm_arm_add_vcpu_properties(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(obj); CPUARMState *env = &cpu->env; + Object *obj = OBJECT(cpu); if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { cpu->kvm_adjvtime = true; @@ -271,6 +558,10 @@ int kvm_arch_init(MachineState *ms, KVMState *s) cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE); + /* Check whether user space can specify guest syndrome value */ + cap_has_inject_serror_esr = + kvm_check_extension(s, KVM_CAP_ARM_INJECT_SERROR_ESR); + if (ms->smp.cpus > 256 && !kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) { error_report("Using more than 256 vcpus requires a host kernel " @@ -308,7 +599,13 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } - kvm_arm_init_debug(s); + max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS); + hw_watchpoints = g_array_sized_new(true, true, + sizeof(HWWatchpoint), max_hw_wps); + + max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS); + hw_breakpoints = g_array_sized_new(true, true, + sizeof(HWBreakpoint), max_hw_bps); return ret; } @@ -469,11 +766,36 @@ static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx) return &cpu->cpreg_values[res - cpu->cpreg_indexes]; } -/* Initialize the ARMCPU cpreg list according to the kernel's +/** + * kvm_arm_reg_syncs_via_cpreg_list: + * @regidx: KVM register index + * + * Return true if this KVM register should be synchronized via the + * cpreg list of arbitrary system registers, false if it is synchronized + * by hand using code in kvm_arch_get/put_registers(). + */ +static bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) +{ + switch (regidx & KVM_REG_ARM_COPROC_MASK) { + case KVM_REG_ARM_CORE: + case KVM_REG_ARM64_SVE: + return false; + default: + return true; + } +} + +/** + * kvm_arm_init_cpreg_list: + * @cpu: ARMCPU + * + * Initialize the ARMCPU cpreg list according to the kernel's * definition of what CPU registers it knows about (and throw away * the previous TCG-created cpreg list). + * + * Returns: 0 if success, else < 0 error code */ -int kvm_arm_init_cpreg_list(ARMCPU *cpu) +static int kvm_arm_init_cpreg_list(ARMCPU *cpu) { struct kvm_reg_list rl; struct kvm_reg_list *rlp; @@ -546,6 +868,28 @@ int kvm_arm_init_cpreg_list(ARMCPU *cpu) return ret; } +/** + * kvm_arm_cpreg_level: + * @regidx: KVM register index + * + * Return the level of this coprocessor/system register. Return value is + * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE. + */ +static int kvm_arm_cpreg_level(uint64_t regidx) +{ + /* + * All system registers are assumed to be level KVM_PUT_RUNTIME_STATE. + * If a register should be written less often, you must add it here + * with a state of either KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. + */ + switch (regidx) { + case KVM_REG_ARM_TIMER_CNT: + case KVM_REG_ARM_PTIMER_CNT: + return KVM_PUT_FULL_STATE; + } + return KVM_PUT_RUNTIME_STATE; +} + bool write_kvmstate_to_list(ARMCPU *cpu) { CPUState *cs = CPU(cpu); @@ -638,7 +982,7 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu) /* Re-init VCPU so that all registers are set to * their respective reset values. */ - ret = kvm_arm_vcpu_init(CPU(cpu)); + ret = kvm_arm_vcpu_init(cpu); if (ret < 0) { fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret)); abort(); @@ -660,54 +1004,50 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu) /* * Update KVM's MP_STATE based on what QEMU thinks it is */ -int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) +static int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) { if (cap_has_mp_state) { struct kvm_mp_state mp_state = { .mp_state = (cpu->power_state == PSCI_OFF) ? KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE }; - int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); - if (ret) { - fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n", - __func__, ret, strerror(-ret)); - return -1; - } + return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); } - return 0; } /* * Sync the KVM MP_STATE into QEMU */ -int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) +static int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) { if (cap_has_mp_state) { struct kvm_mp_state mp_state; int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state); if (ret) { - fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n", - __func__, ret, strerror(-ret)); - abort(); + return ret; } cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ? PSCI_OFF : PSCI_ON; } - return 0; } -void kvm_arm_get_virtual_time(CPUState *cs) +/** + * kvm_arm_get_virtual_time: + * @cpu: ARMCPU + * + * Gets the VCPU's virtual counter and stores it in the KVM CPU state. + */ +static void kvm_arm_get_virtual_time(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); int ret; if (cpu->kvm_vtime_dirty) { return; } - ret = kvm_get_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); + ret = kvm_get_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); if (ret) { error_report("Failed to get KVM_REG_ARM_TIMER_CNT"); abort(); @@ -716,16 +1056,21 @@ void kvm_arm_get_virtual_time(CPUState *cs) cpu->kvm_vtime_dirty = true; } -void kvm_arm_put_virtual_time(CPUState *cs) +/** + * kvm_arm_put_virtual_time: + * @cpu: ARMCPU + * + * Sets the VCPU's virtual counter to the value stored in the KVM CPU state. + */ +static void kvm_arm_put_virtual_time(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); int ret; if (!cpu->kvm_vtime_dirty) { return; } - ret = kvm_set_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); + ret = kvm_set_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); if (ret) { error_report("Failed to set KVM_REG_ARM_TIMER_CNT"); abort(); @@ -734,7 +1079,15 @@ void kvm_arm_put_virtual_time(CPUState *cs) cpu->kvm_vtime_dirty = false; } -int kvm_put_vcpu_events(ARMCPU *cpu) +/** + * kvm_put_vcpu_events: + * @cpu: ARMCPU + * + * Put VCPU related state to kvm. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_put_vcpu_events(ARMCPU *cpu) { CPUARMState *env = &cpu->env; struct kvm_vcpu_events events; @@ -763,7 +1116,15 @@ int kvm_put_vcpu_events(ARMCPU *cpu) return ret; } -int kvm_get_vcpu_events(ARMCPU *cpu) +/** + * kvm_get_vcpu_events: + * @cpu: ARMCPU + * + * Get VCPU related state from kvm. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_get_vcpu_events(ARMCPU *cpu) { CPUARMState *env = &cpu->env; struct kvm_vcpu_events events; @@ -787,6 +1148,63 @@ int kvm_get_vcpu_events(ARMCPU *cpu) return 0; } +#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0) +#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2) + +/* + * ESR_EL1 + * ISS encoding + * AARCH64: DFSC, bits [5:0] + * AARCH32: + * TTBCR.EAE == 0 + * FS[4] - DFSR[10] + * FS[3:0] - DFSR[3:0] + * TTBCR.EAE == 1 + * FS, bits [5:0] + */ +#define ESR_DFSC(aarch64, lpae, v) \ + ((aarch64 || (lpae)) ? ((v) & 0x3F) \ + : (((v) >> 6) | ((v) & 0x1F))) + +#define ESR_DFSC_EXTABT(aarch64, lpae) \ + ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8) + +/** + * kvm_arm_verify_ext_dabt_pending: + * @cpu: ARMCPU + * + * Verify the fault status code wrt the Ext DABT injection + * + * Returns: true if the fault status code is as expected, false otherwise + */ +static bool kvm_arm_verify_ext_dabt_pending(ARMCPU *cpu) +{ + CPUState *cs = CPU(cpu); + uint64_t dfsr_val; + + if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) { + CPUARMState *env = &cpu->env; + int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64); + int lpae = 0; + + if (!aarch64_mode) { + uint64_t ttbcr; + + if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) { + lpae = arm_feature(env, ARM_FEATURE_LPAE) + && (ttbcr & TTBCR_EAE); + } + } + /* + * The verification here is based on the DFSC bits + * of the ESR_EL1 reg only + */ + return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) == + ESR_DFSC_EXTABT(aarch64_mode, lpae)); + } + return false; +} + void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) { ARMCPU *cpu = ARM_CPU(cs); @@ -801,7 +1219,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) * an IMPLEMENTATION DEFINED exception (for 32-bit EL1) */ if (!arm_feature(env, ARM_FEATURE_AARCH64) && - unlikely(!kvm_arm_verify_ext_dabt_pending(cs))) { + unlikely(!kvm_arm_verify_ext_dabt_pending(cpu))) { error_report("Data abort exception with no valid ISS generated by " "guest memory access. KVM unable to emulate faulting " @@ -833,7 +1251,7 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) if (run->s.regs.device_irq_level != cpu->device_irq_level) { switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level; - qemu_mutex_lock_iothread(); + bql_lock(); if (switched_level & KVM_ARM_DEV_EL1_VTIMER) { qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT], @@ -862,41 +1280,39 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) /* We also mark unknown levels as processed to not waste cycles */ cpu->device_irq_level = run->s.regs.device_irq_level; - qemu_mutex_unlock_iothread(); + bql_unlock(); } return MEMTXATTRS_UNSPECIFIED; } -void kvm_arm_vm_state_change(void *opaque, bool running, RunState state) +static void kvm_arm_vm_state_change(void *opaque, bool running, RunState state) { - CPUState *cs = opaque; - ARMCPU *cpu = ARM_CPU(cs); + ARMCPU *cpu = opaque; if (running) { if (cpu->kvm_adjvtime) { - kvm_arm_put_virtual_time(cs); + kvm_arm_put_virtual_time(cpu); } } else { if (cpu->kvm_adjvtime) { - kvm_arm_get_virtual_time(cs); + kvm_arm_get_virtual_time(cpu); } } } /** * kvm_arm_handle_dabt_nisv: - * @cs: CPUState + * @cpu: ARMCPU * @esr_iss: ISS encoding (limited) for the exception from Data Abort * ISV bit set to '0b0' -> no valid instruction syndrome * @fault_ipa: faulting address for the synchronous data abort * * Returns: 0 if the exception has been handled, < 0 otherwise */ -static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, +static int kvm_arm_handle_dabt_nisv(ARMCPU *cpu, uint64_t esr_iss, uint64_t fault_ipa) { - ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; /* * Request KVM to inject the external data abort into the guest @@ -912,7 +1328,7 @@ static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, */ events.exception.ext_dabt_pending = 1; /* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */ - if (!kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events)) { + if (!kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events)) { env->ext_dabt_raised = 1; return 0; } @@ -925,19 +1341,97 @@ static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, return -1; } +/** + * kvm_arm_handle_debug: + * @cpu: ARMCPU + * @debug_exit: debug part of the KVM exit structure + * + * Returns: TRUE if the debug exception was handled. + * + * See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register + * + * To minimise translating between kernel and user-space the kernel + * ABI just provides user-space with the full exception syndrome + * register value to be decoded in QEMU. + */ +static bool kvm_arm_handle_debug(ARMCPU *cpu, + struct kvm_debug_exit_arch *debug_exit) +{ + int hsr_ec = syn_get_ec(debug_exit->hsr); + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + + /* Ensure PC is synchronised */ + kvm_cpu_synchronize_state(cs); + + switch (hsr_ec) { + case EC_SOFTWARESTEP: + if (cs->singlestep_enabled) { + return true; + } else { + /* + * The kernel should have suppressed the guest's ability to + * single step at this point so something has gone wrong. + */ + error_report("%s: guest single-step while debugging unsupported" + " (%"PRIx64", %"PRIx32")", + __func__, env->pc, debug_exit->hsr); + return false; + } + break; + case EC_AA64_BKPT: + if (kvm_find_sw_breakpoint(cs, env->pc)) { + return true; + } + break; + case EC_BREAKPOINT: + if (find_hw_breakpoint(cs, env->pc)) { + return true; + } + break; + case EC_WATCHPOINT: + { + CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); + if (wp) { + cs->watchpoint_hit = wp; + return true; + } + break; + } + default: + error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", + __func__, debug_exit->hsr, env->pc); + } + + /* If we are not handling the debug exception it must belong to + * the guest. Let's re-use the existing TCG interrupt code to set + * everything up properly. + */ + cs->exception_index = EXCP_BKPT; + env->exception.syndrome = debug_exit->hsr; + env->exception.vaddress = debug_exit->far; + env->exception.target_el = 1; + bql_lock(); + arm_cpu_do_interrupt(cs); + bql_unlock(); + + return false; +} + int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { + ARMCPU *cpu = ARM_CPU(cs); int ret = 0; switch (run->exit_reason) { case KVM_EXIT_DEBUG: - if (kvm_arm_handle_debug(cs, &run->debug.arch)) { + if (kvm_arm_handle_debug(cpu, &run->debug.arch)) { ret = EXCP_DEBUG; } /* otherwise return to guest */ break; case KVM_EXIT_ARM_NISV: /* External DABT with no valid iss to decode */ - ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss, + ret = kvm_arm_handle_dabt_nisv(cpu, run->arm_nisv.esr_iss, run->arm_nisv.fault_ipa); break; default: @@ -958,12 +1452,47 @@ int kvm_arch_process_async_events(CPUState *cs) return 0; } +/** + * kvm_arm_hw_debug_active: + * @cpu: ARMCPU + * + * Return: TRUE if any hardware breakpoints in use. + */ +static bool kvm_arm_hw_debug_active(ARMCPU *cpu) +{ + return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); +} + +/** + * kvm_arm_copy_hw_debug_data: + * @ptr: kvm_guest_debug_arch structure + * + * Copy the architecture specific debug registers into the + * kvm_guest_debug ioctl structure. + */ +static void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) +{ + int i; + memset(ptr, 0, sizeof(struct kvm_guest_debug_arch)); + + for (i = 0; i < max_hw_wps; i++) { + HWWatchpoint *wp = get_hw_wp(i); + ptr->dbg_wcr[i] = wp->wcr; + ptr->dbg_wvr[i] = wp->wvr; + } + for (i = 0; i < max_hw_bps; i++) { + HWBreakpoint *bp = get_hw_bp(i); + ptr->dbg_bcr[i] = bp->bcr; + ptr->dbg_bvr[i] = bp->bvr; + } +} + void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) { if (kvm_sw_breakpoints_active(cs)) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; } - if (kvm_arm_hw_debug_active(cs)) { + if (kvm_arm_hw_debug_active(ARM_CPU(cs))) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW; kvm_arm_copy_hw_debug_data(&dbg->arch); } @@ -1117,3 +1646,782 @@ void kvm_arch_accel_class_init(ObjectClass *oc) object_class_property_set_description(oc, "eager-split-size", "Eager Page Split chunk size for hugepages. (default: 0, disabled)"); } + +int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) +{ + switch (type) { + case GDB_BREAKPOINT_HW: + return insert_hw_breakpoint(addr); + break; + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_ACCESS: + return insert_hw_watchpoint(addr, len, type); + default: + return -ENOSYS; + } +} + +int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) +{ + switch (type) { + case GDB_BREAKPOINT_HW: + return delete_hw_breakpoint(addr); + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_ACCESS: + return delete_hw_watchpoint(addr, len, type); + default: + return -ENOSYS; + } +} + +void kvm_arch_remove_all_hw_breakpoints(void) +{ + if (cur_hw_wps > 0) { + g_array_remove_range(hw_watchpoints, 0, cur_hw_wps); + } + if (cur_hw_bps > 0) { + g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); + } +} + +static bool kvm_arm_set_device_attr(ARMCPU *cpu, struct kvm_device_attr *attr, + const char *name) +{ + int err; + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr); + if (err != 0) { + error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); + return false; + } + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEVICE_ATTR, attr); + if (err != 0) { + error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); + return false; + } + + return true; +} + +void kvm_arm_pmu_init(ARMCPU *cpu) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_INIT, + }; + + if (!cpu->has_pmu) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) { + error_report("failed to init PMU"); + abort(); + } +} + +void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .addr = (intptr_t)&irq, + .attr = KVM_ARM_VCPU_PMU_V3_IRQ, + }; + + if (!cpu->has_pmu) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) { + error_report("failed to set irq for PMU"); + abort(); + } +} + +void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PVTIME_CTRL, + .attr = KVM_ARM_VCPU_PVTIME_IPA, + .addr = (uint64_t)&ipa, + }; + + if (cpu->kvm_steal_time == ON_OFF_AUTO_OFF) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PVTIME IPA")) { + error_report("failed to init PVTIME IPA"); + abort(); + } +} + +void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) +{ + bool has_steal_time = kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME); + + if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { + if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + cpu->kvm_steal_time = ON_OFF_AUTO_OFF; + } else { + cpu->kvm_steal_time = ON_OFF_AUTO_ON; + } + } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { + if (!has_steal_time) { + error_setg(errp, "'kvm-steal-time' cannot be enabled " + "on this host"); + return; + } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + /* + * DEN0057A chapter 2 says "This specification only covers + * systems in which the Execution state of the hypervisor + * as well as EL1 of virtual machines is AArch64.". And, + * to ensure that, the smc/hvc calls are only specified as + * smc64/hvc64. + */ + error_setg(errp, "'kvm-steal-time' cannot be enabled " + "for AArch32 guests"); + return; + } + } +} + +bool kvm_arm_aarch32_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); +} + +bool kvm_arm_sve_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); +} + +QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); + +uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) +{ + /* Only call this function if kvm_arm_sve_supported() returns true. */ + static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS]; + static bool probed; + uint32_t vq = 0; + int i; + + /* + * KVM ensures all host CPUs support the same set of vector lengths. + * So we only need to create the scratch VCPUs once and then cache + * the results. + */ + if (!probed) { + struct kvm_vcpu_init init = { + .target = -1, + .features[0] = (1 << KVM_ARM_VCPU_SVE), + }; + struct kvm_one_reg reg = { + .id = KVM_REG_ARM64_SVE_VLS, + .addr = (uint64_t)&vls[0], + }; + int fdarray[3], ret; + + probed = true; + + if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { + error_report("failed to create scratch VCPU with SVE enabled"); + abort(); + } + ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®); + kvm_arm_destroy_scratch_host_vcpu(fdarray); + if (ret) { + error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s", + strerror(errno)); + abort(); + } + + for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { + if (vls[i]) { + vq = 64 - clz64(vls[i]) + i * 64; + break; + } + } + if (vq > ARM_MAX_VQ) { + warn_report("KVM supports vector lengths larger than " + "QEMU can enable"); + vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ); + } + } + + return vls[0]; +} + +static int kvm_arm_sve_set_vls(ARMCPU *cpu) +{ + uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map }; + + assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); + + return kvm_set_one_reg(CPU(cpu), KVM_REG_ARM64_SVE_VLS, &vls[0]); +} + +#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 + +int kvm_arch_init_vcpu(CPUState *cs) +{ + int ret; + uint64_t mpidr; + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t psciver; + + if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || + !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { + error_report("KVM is not supported for this guest CPU type"); + return -EINVAL; + } + + qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cpu); + + /* Determine init features for this CPU */ + memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); + if (cs->start_powered_off) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; + } + if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { + cpu->psci_version = QEMU_PSCI_VERSION_0_2; + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; + } + if (!arm_feature(env, ARM_FEATURE_AARCH64)) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; + } + if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) { + cpu->has_pmu = false; + } + if (cpu->has_pmu) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; + } else { + env->features &= ~(1ULL << ARM_FEATURE_PMU); + } + if (cpu_isar_feature(aa64_sve, cpu)) { + assert(kvm_arm_sve_supported()); + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; + } + if (cpu_isar_feature(aa64_pauth, cpu)) { + cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | + 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); + } + + /* Do KVM_ARM_VCPU_INIT ioctl */ + ret = kvm_arm_vcpu_init(cpu); + if (ret) { + return ret; + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arm_sve_set_vls(cpu); + if (ret) { + return ret; + } + ret = kvm_arm_vcpu_finalize(cpu, KVM_ARM_VCPU_SVE); + if (ret) { + return ret; + } + } + + /* + * KVM reports the exact PSCI version it is implementing via a + * special sysreg. If it is present, use its contents to determine + * what to report to the guest in the dtb (it is the PSCI version, + * in the same 15-bits major 16-bits minor format that PSCI_VERSION + * returns). + */ + if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) { + cpu->psci_version = psciver; + } + + /* + * When KVM is in use, PSCI is emulated in-kernel and not by qemu. + * Currently KVM has its own idea about MPIDR assignment, so we + * override our defaults with what we get from KVM. + */ + ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); + if (ret) { + return ret; + } + cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; + + return kvm_arm_init_cpreg_list(cpu); +} + +int kvm_arch_destroy_vcpu(CPUState *cs) +{ + return 0; +} + +/* Callers must hold the iothread mutex lock */ +static void kvm_inject_arm_sea(CPUState *c) +{ + ARMCPU *cpu = ARM_CPU(c); + CPUARMState *env = &cpu->env; + uint32_t esr; + bool same_el; + + c->exception_index = EXCP_DATA_ABORT; + env->exception.target_el = 1; + + /* + * Set the DFSC to synchronous external abort and set FnV to not valid, + * this will tell guest the FAR_ELx is UNKNOWN for this abort. + */ + same_el = arm_current_el(env) == env->exception.target_el; + esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10); + + env->exception.syndrome = esr; + + arm_cpu_do_interrupt(c); +} + +#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +static int kvm_arch_put_fpsimd(CPUState *cs) +{ + CPUARMState *env = &ARM_CPU(cs)->env; + int i, ret; + + for (i = 0; i < 32; i++) { + uint64_t *q = aa64_vfp_qreg(env, i); +#if HOST_BIG_ENDIAN + uint64_t fp_val[2] = { q[1], q[0] }; + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), + fp_val); +#else + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); +#endif + if (ret) { + return ret; + } + } + + return 0; +} + +/* + * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits + * and PREGS and the FFR have a slice size of 256 bits. However we simply hard + * code the slice index to zero for now as it's unlikely we'll need more than + * one slice for quite some time. + */ +static int kvm_arch_put_sve(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t tmp[ARM_MAX_VQ * 2]; + uint64_t *r; + int n, ret; + + for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { + r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); + if (ret) { + return ret; + } + } + + for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { + r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0], + DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); + if (ret) { + return ret; + } + } + + r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0], + DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); + if (ret) { + return ret; + } + + return 0; +} + +int kvm_arch_put_registers(CPUState *cs, int level) +{ + uint64_t val; + uint32_t fpr; + int i, ret; + unsigned int el; + + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + /* If we are in AArch32 mode then we need to copy the AArch32 regs to the + * AArch64 registers before pushing them out to 64-bit KVM. + */ + if (!is_a64(env)) { + aarch64_sync_32_to_64(env); + } + + for (i = 0; i < 31; i++) { + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), + &env->xregs[i]); + if (ret) { + return ret; + } + } + + /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the + * QEMU side we keep the current SP in xregs[31] as well. + */ + aarch64_save_sp(env, 1); + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); + if (ret) { + return ret; + } + + /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */ + if (is_a64(env)) { + val = pstate_read(env); + } else { + val = cpsr_read(env); + } + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); + if (ret) { + return ret; + } + + /* Saved Program State Registers + * + * Before we restore from the banked_spsr[] array we need to + * ensure that any modifications to env->spsr are correctly + * reflected in the banks. + */ + el = arm_current_el(env); + if (el > 0 && !is_a64(env)) { + i = bank_number(env->uncached_cpsr & CPSR_M); + env->banked_spsr[i] = env->spsr; + } + + /* KVM 0-4 map to QEMU banks 1-5 */ + for (i = 0; i < KVM_NR_SPSR; i++) { + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]), + &env->banked_spsr[i + 1]); + if (ret) { + return ret; + } + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arch_put_sve(cs); + } else { + ret = kvm_arch_put_fpsimd(cs); + } + if (ret) { + return ret; + } + + fpr = vfp_get_fpsr(env); + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); + if (ret) { + return ret; + } + + fpr = vfp_get_fpcr(env); + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); + if (ret) { + return ret; + } + + write_cpustate_to_list(cpu, true); + + if (!write_list_to_kvmstate(cpu, level)) { + return -EINVAL; + } + + /* + * Setting VCPU events should be triggered after syncing the registers + * to avoid overwriting potential changes made by KVM upon calling + * KVM_SET_VCPU_EVENTS ioctl + */ + ret = kvm_put_vcpu_events(cpu); + if (ret) { + return ret; + } + + return kvm_arm_sync_mpstate_to_kvm(cpu); +} + +static int kvm_arch_get_fpsimd(CPUState *cs) +{ + CPUARMState *env = &ARM_CPU(cs)->env; + int i, ret; + + for (i = 0; i < 32; i++) { + uint64_t *q = aa64_vfp_qreg(env, i); + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); + if (ret) { + return ret; + } else { +#if HOST_BIG_ENDIAN + uint64_t t; + t = q[0], q[0] = q[1], q[1] = t; +#endif + } + } + + return 0; +} + +/* + * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits + * and PREGS and the FFR have a slice size of 256 bits. However we simply hard + * code the slice index to zero for now as it's unlikely we'll need more than + * one slice for quite some time. + */ +static int kvm_arch_get_sve(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t *r; + int n, ret; + + for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { + r = &env->vfp.zregs[n].d[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, cpu->sve_max_vq * 2); + } + + for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { + r = &env->vfp.pregs[n].p[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + } + + r = &env->vfp.pregs[FFR_PRED_NUM].p[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + + return 0; +} + +int kvm_arch_get_registers(CPUState *cs) +{ + uint64_t val; + unsigned int el; + uint32_t fpr; + int i, ret; + + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + for (i = 0; i < 31; i++) { + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), + &env->xregs[i]); + if (ret) { + return ret; + } + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); + if (ret) { + return ret; + } + + env->aarch64 = ((val & PSTATE_nRW) == 0); + if (is_a64(env)) { + pstate_write(env, val); + } else { + cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); + } + + /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the + * QEMU side we keep the current SP in xregs[31] as well. + */ + aarch64_restore_sp(env, 1); + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); + if (ret) { + return ret; + } + + /* If we are in AArch32 mode then we need to sync the AArch32 regs with the + * incoming AArch64 regs received from 64-bit KVM. + * We must perform this after all of the registers have been acquired from + * the kernel. + */ + if (!is_a64(env)) { + aarch64_sync_64_to_32(env); + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); + if (ret) { + return ret; + } + + /* Fetch the SPSR registers + * + * KVM SPSRs 0-4 map to QEMU banks 1-5 + */ + for (i = 0; i < KVM_NR_SPSR; i++) { + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]), + &env->banked_spsr[i + 1]); + if (ret) { + return ret; + } + } + + el = arm_current_el(env); + if (el > 0 && !is_a64(env)) { + i = bank_number(env->uncached_cpsr & CPSR_M); + env->spsr = env->banked_spsr[i]; + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arch_get_sve(cs); + } else { + ret = kvm_arch_get_fpsimd(cs); + } + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); + if (ret) { + return ret; + } + vfp_set_fpsr(env, fpr); + + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); + if (ret) { + return ret; + } + vfp_set_fpcr(env, fpr); + + ret = kvm_get_vcpu_events(cpu); + if (ret) { + return ret; + } + + if (!write_kvmstate_to_list(cpu)) { + return -EINVAL; + } + /* Note that it's OK to have registers which aren't in CPUState, + * so we can ignore a failure return here. + */ + write_list_to_cpustate(cpu); + + ret = kvm_arm_sync_mpstate_to_qemu(cpu); + + /* TODO: other registers */ + return ret; +} + +void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) +{ + ram_addr_t ram_addr; + hwaddr paddr; + + assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); + + if (acpi_ghes_present() && addr) { + ram_addr = qemu_ram_addr_from_host(addr); + if (ram_addr != RAM_ADDR_INVALID && + kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { + kvm_hwpoison_page_add(ram_addr); + /* + * If this is a BUS_MCEERR_AR, we know we have been called + * synchronously from the vCPU thread, so we can easily + * synchronize the state and inject an error. + * + * TODO: we currently don't tell the guest at all about + * BUS_MCEERR_AO. In that case we might either be being + * called synchronously from the vCPU thread, or a bit + * later from the main thread, so doing the injection of + * the error would be more complicated. + */ + if (code == BUS_MCEERR_AR) { + kvm_cpu_synchronize_state(c); + if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { + kvm_inject_arm_sea(c); + } else { + error_report("failed to record the error"); + abort(); + } + } + return; + } + if (code == BUS_MCEERR_AO) { + error_report("Hardware memory error at addr %p for memory used by " + "QEMU itself instead of guest system!", addr); + } + } + + if (code == BUS_MCEERR_AR) { + error_report("Hardware memory error!"); + exit(1); + } +} + +/* C6.6.29 BRK instruction */ +static const uint32_t brk_insn = 0xd4200000; + +int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { + return -EINVAL; + } + return 0; +} + +int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + static uint32_t brk; + + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || + brk != brk_insn || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { + return -EINVAL; + } + return 0; +} diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c deleted file mode 100644 index 3c175c93a7a..00000000000 --- a/target/arm/kvm64.c +++ /dev/null @@ -1,1290 +0,0 @@ -/* - * ARM implementation of KVM hooks, 64 bit specific code - * - * Copyright Mian-M. Hamayun 2013, Virtual Open Systems - * Copyright Alex Bennée 2014, Linaro - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include -#include - -#include -#include - -#include "qapi/error.h" -#include "cpu.h" -#include "qemu/timer.h" -#include "qemu/error-report.h" -#include "qemu/host-utils.h" -#include "qemu/main-loop.h" -#include "exec/gdbstub.h" -#include "sysemu/runstate.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_int.h" -#include "kvm_arm.h" -#include "internals.h" -#include "cpu-features.h" -#include "hw/acpi/acpi.h" -#include "hw/acpi/ghes.h" - -static bool have_guest_debug; - -void kvm_arm_init_debug(KVMState *s) -{ - have_guest_debug = kvm_check_extension(s, - KVM_CAP_SET_GUEST_DEBUG); - - max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS); - hw_watchpoints = g_array_sized_new(true, true, - sizeof(HWWatchpoint), max_hw_wps); - - max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS); - hw_breakpoints = g_array_sized_new(true, true, - sizeof(HWBreakpoint), max_hw_bps); - return; -} - -int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) -{ - switch (type) { - case GDB_BREAKPOINT_HW: - return insert_hw_breakpoint(addr); - break; - case GDB_WATCHPOINT_READ: - case GDB_WATCHPOINT_WRITE: - case GDB_WATCHPOINT_ACCESS: - return insert_hw_watchpoint(addr, len, type); - default: - return -ENOSYS; - } -} - -int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) -{ - switch (type) { - case GDB_BREAKPOINT_HW: - return delete_hw_breakpoint(addr); - case GDB_WATCHPOINT_READ: - case GDB_WATCHPOINT_WRITE: - case GDB_WATCHPOINT_ACCESS: - return delete_hw_watchpoint(addr, len, type); - default: - return -ENOSYS; - } -} - - -void kvm_arch_remove_all_hw_breakpoints(void) -{ - if (cur_hw_wps > 0) { - g_array_remove_range(hw_watchpoints, 0, cur_hw_wps); - } - if (cur_hw_bps > 0) { - g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); - } -} - -void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) -{ - int i; - memset(ptr, 0, sizeof(struct kvm_guest_debug_arch)); - - for (i = 0; i < max_hw_wps; i++) { - HWWatchpoint *wp = get_hw_wp(i); - ptr->dbg_wcr[i] = wp->wcr; - ptr->dbg_wvr[i] = wp->wvr; - } - for (i = 0; i < max_hw_bps; i++) { - HWBreakpoint *bp = get_hw_bp(i); - ptr->dbg_bcr[i] = bp->bcr; - ptr->dbg_bvr[i] = bp->bvr; - } -} - -bool kvm_arm_hw_debug_active(CPUState *cs) -{ - return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); -} - -static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr, - const char *name) -{ - int err; - - err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr); - if (err != 0) { - error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); - return false; - } - - err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr); - if (err != 0) { - error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); - return false; - } - - return true; -} - -void kvm_arm_pmu_init(CPUState *cs) -{ - struct kvm_device_attr attr = { - .group = KVM_ARM_VCPU_PMU_V3_CTRL, - .attr = KVM_ARM_VCPU_PMU_V3_INIT, - }; - - if (!ARM_CPU(cs)->has_pmu) { - return; - } - if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) { - error_report("failed to init PMU"); - abort(); - } -} - -void kvm_arm_pmu_set_irq(CPUState *cs, int irq) -{ - struct kvm_device_attr attr = { - .group = KVM_ARM_VCPU_PMU_V3_CTRL, - .addr = (intptr_t)&irq, - .attr = KVM_ARM_VCPU_PMU_V3_IRQ, - }; - - if (!ARM_CPU(cs)->has_pmu) { - return; - } - if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) { - error_report("failed to set irq for PMU"); - abort(); - } -} - -void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa) -{ - struct kvm_device_attr attr = { - .group = KVM_ARM_VCPU_PVTIME_CTRL, - .attr = KVM_ARM_VCPU_PVTIME_IPA, - .addr = (uint64_t)&ipa, - }; - - if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) { - return; - } - if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) { - error_report("failed to init PVTIME IPA"); - abort(); - } -} - -static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) -{ - uint64_t ret; - struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; - int err; - - assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); - err = ioctl(fd, KVM_GET_ONE_REG, &idreg); - if (err < 0) { - return -1; - } - *pret = ret; - return 0; -} - -static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id) -{ - struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret }; - - assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); - return ioctl(fd, KVM_GET_ONE_REG, &idreg); -} - -static bool kvm_arm_pauth_supported(void) -{ - return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) && - kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); -} - -bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) -{ - /* Identify the feature bits corresponding to the host CPU, and - * fill out the ARMHostCPUClass fields accordingly. To do this - * we have to create a scratch VM, create a single CPU inside it, - * and then query that CPU for the relevant ID registers. - */ - int fdarray[3]; - bool sve_supported; - bool pmu_supported = false; - uint64_t features = 0; - int err; - - /* Old kernels may not know about the PREFERRED_TARGET ioctl: however - * we know these will only support creating one kind of guest CPU, - * which is its preferred CPU type. Fortunately these old kernels - * support only a very limited number of CPUs. - */ - static const uint32_t cpus_to_try[] = { - KVM_ARM_TARGET_AEM_V8, - KVM_ARM_TARGET_FOUNDATION_V8, - KVM_ARM_TARGET_CORTEX_A57, - QEMU_KVM_ARM_TARGET_NONE - }; - /* - * target = -1 informs kvm_arm_create_scratch_host_vcpu() - * to use the preferred target - */ - struct kvm_vcpu_init init = { .target = -1, }; - - /* - * Ask for SVE if supported, so that we can query ID_AA64ZFR0, - * which is otherwise RAZ. - */ - sve_supported = kvm_arm_sve_supported(); - if (sve_supported) { - init.features[0] |= 1 << KVM_ARM_VCPU_SVE; - } - - /* - * Ask for Pointer Authentication if supported, so that we get - * the unsanitized field values for AA64ISAR1_EL1. - */ - if (kvm_arm_pauth_supported()) { - init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | - 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); - } - - if (kvm_arm_pmu_supported()) { - init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; - pmu_supported = true; - } - - if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { - return false; - } - - ahcf->target = init.target; - ahcf->dtb_compatible = "arm,arm-v8"; - - err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, - ARM64_SYS_REG(3, 0, 0, 4, 0)); - if (unlikely(err < 0)) { - /* - * Before v4.15, the kernel only exposed a limited number of system - * registers, not including any of the interesting AArch64 ID regs. - * For the most part we could leave these fields as zero with minimal - * effect, since this does not affect the values seen by the guest. - * - * However, it could cause problems down the line for QEMU, - * so provide a minimal v8.0 default. - * - * ??? Could read MIDR and use knowledge from cpu64.c. - * ??? Could map a page of memory into our temp guest and - * run the tiniest of hand-crafted kernels to extract - * the values seen by the guest. - * ??? Either of these sounds like too much effort just - * to work around running a modern host kernel. - */ - ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ - err = 0; - } else { - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, - ARM64_SYS_REG(3, 0, 0, 4, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, - ARM64_SYS_REG(3, 0, 0, 4, 5)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, - ARM64_SYS_REG(3, 0, 0, 5, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, - ARM64_SYS_REG(3, 0, 0, 5, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, - ARM64_SYS_REG(3, 0, 0, 6, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, - ARM64_SYS_REG(3, 0, 0, 6, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, - ARM64_SYS_REG(3, 0, 0, 6, 2)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, - ARM64_SYS_REG(3, 0, 0, 7, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, - ARM64_SYS_REG(3, 0, 0, 7, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, - ARM64_SYS_REG(3, 0, 0, 7, 2)); - - /* - * Note that if AArch32 support is not present in the host, - * the AArch32 sysregs are present to be read, but will - * return UNKNOWN values. This is neither better nor worse - * than skipping the reads and leaving 0, as we must avoid - * considering the values in every case. - */ - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, - ARM64_SYS_REG(3, 0, 0, 1, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, - ARM64_SYS_REG(3, 0, 0, 1, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, - ARM64_SYS_REG(3, 0, 0, 1, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, - ARM64_SYS_REG(3, 0, 0, 1, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, - ARM64_SYS_REG(3, 0, 0, 1, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, - ARM64_SYS_REG(3, 0, 0, 1, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, - ARM64_SYS_REG(3, 0, 0, 1, 7)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, - ARM64_SYS_REG(3, 0, 0, 2, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, - ARM64_SYS_REG(3, 0, 0, 2, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, - ARM64_SYS_REG(3, 0, 0, 2, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, - ARM64_SYS_REG(3, 0, 0, 2, 3)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, - ARM64_SYS_REG(3, 0, 0, 2, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, - ARM64_SYS_REG(3, 0, 0, 2, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, - ARM64_SYS_REG(3, 0, 0, 2, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, - ARM64_SYS_REG(3, 0, 0, 2, 7)); - - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, - ARM64_SYS_REG(3, 0, 0, 3, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, - ARM64_SYS_REG(3, 0, 0, 3, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, - ARM64_SYS_REG(3, 0, 0, 3, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, - ARM64_SYS_REG(3, 0, 0, 3, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, - ARM64_SYS_REG(3, 0, 0, 3, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, - ARM64_SYS_REG(3, 0, 0, 3, 6)); - - /* - * DBGDIDR is a bit complicated because the kernel doesn't - * provide an accessor for it in 64-bit mode, which is what this - * scratch VM is in, and there's no architected "64-bit sysreg - * which reads the same as the 32-bit register" the way there is - * for other ID registers. Instead we synthesize a value from the - * AArch64 ID_AA64DFR0, the same way the kernel code in - * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. - * We only do this if the CPU supports AArch32 at EL1. - */ - if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { - int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); - int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); - int ctx_cmps = - FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); - int version = 6; /* ARMv8 debug architecture */ - bool has_el3 = - !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); - uint32_t dbgdidr = 0; - - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); - dbgdidr |= (1 << 15); /* RES1 bit */ - ahcf->isar.dbgdidr = dbgdidr; - } - - if (pmu_supported) { - /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, - ARM64_SYS_REG(3, 3, 9, 12, 0)); - } - - if (sve_supported) { - /* - * There is a range of kernels between kernel commit 73433762fcae - * and f81cb2c3ad41 which have a bug where the kernel doesn't - * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has - * enabled SVE support, which resulted in an error rather than RAZ. - * So only read the register if we set KVM_ARM_VCPU_SVE above. - */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, - ARM64_SYS_REG(3, 0, 0, 4, 4)); - } - } - - kvm_arm_destroy_scratch_host_vcpu(fdarray); - - if (err < 0) { - return false; - } - - /* - * We can assume any KVM supporting CPU is at least a v8 - * with VFPv4+Neon; this in turn implies most of the other - * feature bits. - */ - features |= 1ULL << ARM_FEATURE_V8; - features |= 1ULL << ARM_FEATURE_NEON; - features |= 1ULL << ARM_FEATURE_AARCH64; - features |= 1ULL << ARM_FEATURE_PMU; - features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; - - ahcf->features = features; - - return true; -} - -void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) -{ - bool has_steal_time = kvm_arm_steal_time_supported(); - - if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { - if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - cpu->kvm_steal_time = ON_OFF_AUTO_OFF; - } else { - cpu->kvm_steal_time = ON_OFF_AUTO_ON; - } - } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { - if (!has_steal_time) { - error_setg(errp, "'kvm-steal-time' cannot be enabled " - "on this host"); - return; - } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - /* - * DEN0057A chapter 2 says "This specification only covers - * systems in which the Execution state of the hypervisor - * as well as EL1 of virtual machines is AArch64.". And, - * to ensure that, the smc/hvc calls are only specified as - * smc64/hvc64. - */ - error_setg(errp, "'kvm-steal-time' cannot be enabled " - "for AArch32 guests"); - return; - } - } -} - -bool kvm_arm_aarch32_supported(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); -} - -bool kvm_arm_sve_supported(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); -} - -bool kvm_arm_steal_time_supported(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME); -} - -QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); - -uint32_t kvm_arm_sve_get_vls(CPUState *cs) -{ - /* Only call this function if kvm_arm_sve_supported() returns true. */ - static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS]; - static bool probed; - uint32_t vq = 0; - int i; - - /* - * KVM ensures all host CPUs support the same set of vector lengths. - * So we only need to create the scratch VCPUs once and then cache - * the results. - */ - if (!probed) { - struct kvm_vcpu_init init = { - .target = -1, - .features[0] = (1 << KVM_ARM_VCPU_SVE), - }; - struct kvm_one_reg reg = { - .id = KVM_REG_ARM64_SVE_VLS, - .addr = (uint64_t)&vls[0], - }; - int fdarray[3], ret; - - probed = true; - - if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { - error_report("failed to create scratch VCPU with SVE enabled"); - abort(); - } - ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®); - kvm_arm_destroy_scratch_host_vcpu(fdarray); - if (ret) { - error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s", - strerror(errno)); - abort(); - } - - for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { - if (vls[i]) { - vq = 64 - clz64(vls[i]) + i * 64; - break; - } - } - if (vq > ARM_MAX_VQ) { - warn_report("KVM supports vector lengths larger than " - "QEMU can enable"); - vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ); - } - } - - return vls[0]; -} - -static int kvm_arm_sve_set_vls(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(cs); - uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map }; - - assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); - - return kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_VLS, &vls[0]); -} - -#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 - -int kvm_arch_init_vcpu(CPUState *cs) -{ - int ret; - uint64_t mpidr; - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint64_t psciver; - - if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || - !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { - error_report("KVM is not supported for this guest CPU type"); - return -EINVAL; - } - - qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs); - - /* Determine init features for this CPU */ - memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); - if (cs->start_powered_off) { - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; - } - if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { - cpu->psci_version = QEMU_PSCI_VERSION_0_2; - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; - } - if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; - } - if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) { - cpu->has_pmu = false; - } - if (cpu->has_pmu) { - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; - } else { - env->features &= ~(1ULL << ARM_FEATURE_PMU); - } - if (cpu_isar_feature(aa64_sve, cpu)) { - assert(kvm_arm_sve_supported()); - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; - } - if (cpu_isar_feature(aa64_pauth, cpu)) { - cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | - 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); - } - - /* Do KVM_ARM_VCPU_INIT ioctl */ - ret = kvm_arm_vcpu_init(cs); - if (ret) { - return ret; - } - - if (cpu_isar_feature(aa64_sve, cpu)) { - ret = kvm_arm_sve_set_vls(cs); - if (ret) { - return ret; - } - ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE); - if (ret) { - return ret; - } - } - - /* - * KVM reports the exact PSCI version it is implementing via a - * special sysreg. If it is present, use its contents to determine - * what to report to the guest in the dtb (it is the PSCI version, - * in the same 15-bits major 16-bits minor format that PSCI_VERSION - * returns). - */ - if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) { - cpu->psci_version = psciver; - } - - /* - * When KVM is in use, PSCI is emulated in-kernel and not by qemu. - * Currently KVM has its own idea about MPIDR assignment, so we - * override our defaults with what we get from KVM. - */ - ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); - if (ret) { - return ret; - } - cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; - - /* Check whether user space can specify guest syndrome value */ - kvm_arm_init_serror_injection(cs); - - return kvm_arm_init_cpreg_list(cpu); -} - -int kvm_arch_destroy_vcpu(CPUState *cs) -{ - return 0; -} - -bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) -{ - /* Return true if the regidx is a register we should synchronize - * via the cpreg_tuples array (ie is not a core or sve reg that - * we sync by hand in kvm_arch_get/put_registers()) - */ - switch (regidx & KVM_REG_ARM_COPROC_MASK) { - case KVM_REG_ARM_CORE: - case KVM_REG_ARM64_SVE: - return false; - default: - return true; - } -} - -typedef struct CPRegStateLevel { - uint64_t regidx; - int level; -} CPRegStateLevel; - -/* All system registers not listed in the following table are assumed to be - * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less - * often, you must add it to this table with a state of either - * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. - */ -static const CPRegStateLevel non_runtime_cpregs[] = { - { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE }, - { KVM_REG_ARM_PTIMER_CNT, KVM_PUT_FULL_STATE }, -}; - -int kvm_arm_cpreg_level(uint64_t regidx) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) { - const CPRegStateLevel *l = &non_runtime_cpregs[i]; - if (l->regidx == regidx) { - return l->level; - } - } - - return KVM_PUT_RUNTIME_STATE; -} - -/* Callers must hold the iothread mutex lock */ -static void kvm_inject_arm_sea(CPUState *c) -{ - ARMCPU *cpu = ARM_CPU(c); - CPUARMState *env = &cpu->env; - uint32_t esr; - bool same_el; - - c->exception_index = EXCP_DATA_ABORT; - env->exception.target_el = 1; - - /* - * Set the DFSC to synchronous external abort and set FnV to not valid, - * this will tell guest the FAR_ELx is UNKNOWN for this abort. - */ - same_el = arm_current_el(env) == env->exception.target_el; - esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10); - - env->exception.syndrome = esr; - - arm_cpu_do_interrupt(c); -} - -#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ - KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) - -#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \ - KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) - -#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \ - KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) - -static int kvm_arch_put_fpsimd(CPUState *cs) -{ - CPUARMState *env = &ARM_CPU(cs)->env; - int i, ret; - - for (i = 0; i < 32; i++) { - uint64_t *q = aa64_vfp_qreg(env, i); -#if HOST_BIG_ENDIAN - uint64_t fp_val[2] = { q[1], q[0] }; - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), - fp_val); -#else - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); -#endif - if (ret) { - return ret; - } - } - - return 0; -} - -/* - * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits - * and PREGS and the FFR have a slice size of 256 bits. However we simply hard - * code the slice index to zero for now as it's unlikely we'll need more than - * one slice for quite some time. - */ -static int kvm_arch_put_sve(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint64_t tmp[ARM_MAX_VQ * 2]; - uint64_t *r; - int n, ret; - - for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { - r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); - ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); - if (ret) { - return ret; - } - } - - for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { - r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0], - DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); - if (ret) { - return ret; - } - } - - r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0], - DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); - if (ret) { - return ret; - } - - return 0; -} - -int kvm_arch_put_registers(CPUState *cs, int level) -{ - uint64_t val; - uint32_t fpr; - int i, ret; - unsigned int el; - - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - /* If we are in AArch32 mode then we need to copy the AArch32 regs to the - * AArch64 registers before pushing them out to 64-bit KVM. - */ - if (!is_a64(env)) { - aarch64_sync_32_to_64(env); - } - - for (i = 0; i < 31; i++) { - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), - &env->xregs[i]); - if (ret) { - return ret; - } - } - - /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the - * QEMU side we keep the current SP in xregs[31] as well. - */ - aarch64_save_sp(env, 1); - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); - if (ret) { - return ret; - } - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); - if (ret) { - return ret; - } - - /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */ - if (is_a64(env)) { - val = pstate_read(env); - } else { - val = cpsr_read(env); - } - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); - if (ret) { - return ret; - } - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); - if (ret) { - return ret; - } - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); - if (ret) { - return ret; - } - - /* Saved Program State Registers - * - * Before we restore from the banked_spsr[] array we need to - * ensure that any modifications to env->spsr are correctly - * reflected in the banks. - */ - el = arm_current_el(env); - if (el > 0 && !is_a64(env)) { - i = bank_number(env->uncached_cpsr & CPSR_M); - env->banked_spsr[i] = env->spsr; - } - - /* KVM 0-4 map to QEMU banks 1-5 */ - for (i = 0; i < KVM_NR_SPSR; i++) { - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]), - &env->banked_spsr[i + 1]); - if (ret) { - return ret; - } - } - - if (cpu_isar_feature(aa64_sve, cpu)) { - ret = kvm_arch_put_sve(cs); - } else { - ret = kvm_arch_put_fpsimd(cs); - } - if (ret) { - return ret; - } - - fpr = vfp_get_fpsr(env); - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); - if (ret) { - return ret; - } - - fpr = vfp_get_fpcr(env); - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); - if (ret) { - return ret; - } - - write_cpustate_to_list(cpu, true); - - if (!write_list_to_kvmstate(cpu, level)) { - return -EINVAL; - } - - /* - * Setting VCPU events should be triggered after syncing the registers - * to avoid overwriting potential changes made by KVM upon calling - * KVM_SET_VCPU_EVENTS ioctl - */ - ret = kvm_put_vcpu_events(cpu); - if (ret) { - return ret; - } - - kvm_arm_sync_mpstate_to_kvm(cpu); - - return ret; -} - -static int kvm_arch_get_fpsimd(CPUState *cs) -{ - CPUARMState *env = &ARM_CPU(cs)->env; - int i, ret; - - for (i = 0; i < 32; i++) { - uint64_t *q = aa64_vfp_qreg(env, i); - ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); - if (ret) { - return ret; - } else { -#if HOST_BIG_ENDIAN - uint64_t t; - t = q[0], q[0] = q[1], q[1] = t; -#endif - } - } - - return 0; -} - -/* - * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits - * and PREGS and the FFR have a slice size of 256 bits. However we simply hard - * code the slice index to zero for now as it's unlikely we'll need more than - * one slice for quite some time. - */ -static int kvm_arch_get_sve(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint64_t *r; - int n, ret; - - for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { - r = &env->vfp.zregs[n].d[0]; - ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); - if (ret) { - return ret; - } - sve_bswap64(r, r, cpu->sve_max_vq * 2); - } - - for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { - r = &env->vfp.pregs[n].p[0]; - ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); - if (ret) { - return ret; - } - sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - } - - r = &env->vfp.pregs[FFR_PRED_NUM].p[0]; - ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); - if (ret) { - return ret; - } - sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - - return 0; -} - -int kvm_arch_get_registers(CPUState *cs) -{ - uint64_t val; - unsigned int el; - uint32_t fpr; - int i, ret; - - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - for (i = 0; i < 31; i++) { - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), - &env->xregs[i]); - if (ret) { - return ret; - } - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); - if (ret) { - return ret; - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); - if (ret) { - return ret; - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); - if (ret) { - return ret; - } - - env->aarch64 = ((val & PSTATE_nRW) == 0); - if (is_a64(env)) { - pstate_write(env, val); - } else { - cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); - } - - /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the - * QEMU side we keep the current SP in xregs[31] as well. - */ - aarch64_restore_sp(env, 1); - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); - if (ret) { - return ret; - } - - /* If we are in AArch32 mode then we need to sync the AArch32 regs with the - * incoming AArch64 regs received from 64-bit KVM. - * We must perform this after all of the registers have been acquired from - * the kernel. - */ - if (!is_a64(env)) { - aarch64_sync_64_to_32(env); - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); - if (ret) { - return ret; - } - - /* Fetch the SPSR registers - * - * KVM SPSRs 0-4 map to QEMU banks 1-5 - */ - for (i = 0; i < KVM_NR_SPSR; i++) { - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]), - &env->banked_spsr[i + 1]); - if (ret) { - return ret; - } - } - - el = arm_current_el(env); - if (el > 0 && !is_a64(env)) { - i = bank_number(env->uncached_cpsr & CPSR_M); - env->spsr = env->banked_spsr[i]; - } - - if (cpu_isar_feature(aa64_sve, cpu)) { - ret = kvm_arch_get_sve(cs); - } else { - ret = kvm_arch_get_fpsimd(cs); - } - if (ret) { - return ret; - } - - ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); - if (ret) { - return ret; - } - vfp_set_fpsr(env, fpr); - - ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); - if (ret) { - return ret; - } - vfp_set_fpcr(env, fpr); - - ret = kvm_get_vcpu_events(cpu); - if (ret) { - return ret; - } - - if (!write_kvmstate_to_list(cpu)) { - return -EINVAL; - } - /* Note that it's OK to have registers which aren't in CPUState, - * so we can ignore a failure return here. - */ - write_list_to_cpustate(cpu); - - kvm_arm_sync_mpstate_to_qemu(cpu); - - /* TODO: other registers */ - return ret; -} - -void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) -{ - ram_addr_t ram_addr; - hwaddr paddr; - - assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); - - if (acpi_ghes_present() && addr) { - ram_addr = qemu_ram_addr_from_host(addr); - if (ram_addr != RAM_ADDR_INVALID && - kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { - kvm_hwpoison_page_add(ram_addr); - /* - * If this is a BUS_MCEERR_AR, we know we have been called - * synchronously from the vCPU thread, so we can easily - * synchronize the state and inject an error. - * - * TODO: we currently don't tell the guest at all about - * BUS_MCEERR_AO. In that case we might either be being - * called synchronously from the vCPU thread, or a bit - * later from the main thread, so doing the injection of - * the error would be more complicated. - */ - if (code == BUS_MCEERR_AR) { - kvm_cpu_synchronize_state(c); - if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { - kvm_inject_arm_sea(c); - } else { - error_report("failed to record the error"); - abort(); - } - } - return; - } - if (code == BUS_MCEERR_AO) { - error_report("Hardware memory error at addr %p for memory used by " - "QEMU itself instead of guest system!", addr); - } - } - - if (code == BUS_MCEERR_AR) { - error_report("Hardware memory error!"); - exit(1); - } -} - -/* C6.6.29 BRK instruction */ -static const uint32_t brk_insn = 0xd4200000; - -int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) -{ - if (have_guest_debug) { - if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || - cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { - return -EINVAL; - } - return 0; - } else { - error_report("guest debug not supported on this kernel"); - return -EINVAL; - } -} - -int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) -{ - static uint32_t brk; - - if (have_guest_debug) { - if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || - brk != brk_insn || - cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { - return -EINVAL; - } - return 0; - } else { - error_report("guest debug not supported on this kernel"); - return -EINVAL; - } -} - -/* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register - * - * To minimise translating between kernel and user-space the kernel - * ABI just provides user-space with the full exception syndrome - * register value to be decoded in QEMU. - */ - -bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) -{ - int hsr_ec = syn_get_ec(debug_exit->hsr); - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - /* Ensure PC is synchronised */ - kvm_cpu_synchronize_state(cs); - - switch (hsr_ec) { - case EC_SOFTWARESTEP: - if (cs->singlestep_enabled) { - return true; - } else { - /* - * The kernel should have suppressed the guest's ability to - * single step at this point so something has gone wrong. - */ - error_report("%s: guest single-step while debugging unsupported" - " (%"PRIx64", %"PRIx32")", - __func__, env->pc, debug_exit->hsr); - return false; - } - break; - case EC_AA64_BKPT: - if (kvm_find_sw_breakpoint(cs, env->pc)) { - return true; - } - break; - case EC_BREAKPOINT: - if (find_hw_breakpoint(cs, env->pc)) { - return true; - } - break; - case EC_WATCHPOINT: - { - CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); - if (wp) { - cs->watchpoint_hit = wp; - return true; - } - break; - } - default: - error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", - __func__, debug_exit->hsr, env->pc); - } - - /* If we are not handling the debug exception it must belong to - * the guest. Let's re-use the existing TCG interrupt code to set - * everything up properly. - */ - cs->exception_index = EXCP_BKPT; - env->exception.syndrome = debug_exit->hsr; - env->exception.vaddress = debug_exit->far; - env->exception.target_el = 1; - qemu_mutex_lock_iothread(); - arm_cpu_do_interrupt(cs); - qemu_mutex_unlock_iothread(); - - return false; -} - -#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0) -#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2) - -/* - * ESR_EL1 - * ISS encoding - * AARCH64: DFSC, bits [5:0] - * AARCH32: - * TTBCR.EAE == 0 - * FS[4] - DFSR[10] - * FS[3:0] - DFSR[3:0] - * TTBCR.EAE == 1 - * FS, bits [5:0] - */ -#define ESR_DFSC(aarch64, lpae, v) \ - ((aarch64 || (lpae)) ? ((v) & 0x3F) \ - : (((v) >> 6) | ((v) & 0x1F))) - -#define ESR_DFSC_EXTABT(aarch64, lpae) \ - ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8) - -bool kvm_arm_verify_ext_dabt_pending(CPUState *cs) -{ - uint64_t dfsr_val; - - if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) { - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64); - int lpae = 0; - - if (!aarch64_mode) { - uint64_t ttbcr; - - if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) { - lpae = arm_feature(env, ARM_FEATURE_LPAE) - && (ttbcr & TTBCR_EAE); - } - } - /* - * The verification here is based on the DFSC bits - * of the ESR_EL1 reg only - */ - return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) == - ESR_DFSC_EXTABT(aarch64_mode, lpae)); - } - return false; -} diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 051a0da41c4..cfaa0d9bc71 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -12,46 +12,10 @@ #define QEMU_KVM_ARM_H #include "sysemu/kvm.h" -#include "exec/memory.h" -#include "qemu/error-report.h" #define KVM_ARM_VGIC_V2 (1 << 0) #define KVM_ARM_VGIC_V3 (1 << 1) -/** - * kvm_arm_init_debug() - initialize guest debug capabilities - * @s: KVMState - * - * Should be called only once before using guest debug capabilities. - */ -void kvm_arm_init_debug(KVMState *s); - -/** - * kvm_arm_vcpu_init: - * @cs: CPUState - * - * Initialize (or reinitialize) the VCPU by invoking the - * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature - * bitmask specified in the CPUState. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_arm_vcpu_init(CPUState *cs); - -/** - * kvm_arm_vcpu_finalize: - * @cs: CPUState - * @feature: feature to finalize - * - * Finalizes the configuration of the specified VCPU feature by - * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring - * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of - * KVM's API documentation. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_arm_vcpu_finalize(CPUState *cs, int feature); - /** * kvm_arm_register_device: * @mr: memory region for this device @@ -73,37 +37,6 @@ int kvm_arm_vcpu_finalize(CPUState *cs, int feature); void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group, uint64_t attr, int dev_fd, uint64_t addr_ormask); -/** - * kvm_arm_init_cpreg_list: - * @cpu: ARMCPU - * - * Initialize the ARMCPU cpreg list according to the kernel's - * definition of what CPU registers it knows about (and throw away - * the previous TCG-created cpreg list). - * - * Returns: 0 if success, else < 0 error code - */ -int kvm_arm_init_cpreg_list(ARMCPU *cpu); - -/** - * kvm_arm_reg_syncs_via_cpreg_list: - * @regidx: KVM register index - * - * Return true if this KVM register should be synchronized via the - * cpreg list of arbitrary system registers, false if it is synchronized - * by hand using code in kvm_arch_get/put_registers(). - */ -bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx); - -/** - * kvm_arm_cpreg_level: - * @regidx: KVM register index - * - * Return the level of this coprocessor/system register. Return value is - * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE. - */ -int kvm_arm_cpreg_level(uint64_t regidx); - /** * write_list_to_kvmstate: * @cpu: ARMCPU @@ -163,34 +96,6 @@ void kvm_arm_cpu_post_load(ARMCPU *cpu); */ void kvm_arm_reset_vcpu(ARMCPU *cpu); -/** - * kvm_arm_init_serror_injection: - * @cs: CPUState - * - * Check whether KVM can set guest SError syndrome. - */ -void kvm_arm_init_serror_injection(CPUState *cs); - -/** - * kvm_get_vcpu_events: - * @cpu: ARMCPU - * - * Get VCPU related state from kvm. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_get_vcpu_events(ARMCPU *cpu); - -/** - * kvm_put_vcpu_events: - * @cpu: ARMCPU - * - * Put VCPU related state to kvm. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_put_vcpu_events(ARMCPU *cpu); - #ifdef CONFIG_KVM /** * kvm_arm_create_scratch_host_vcpu: @@ -222,37 +127,15 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, */ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray); -/** - * ARMHostCPUFeatures: information about the host CPU (identified - * by asking the host kernel) - */ -typedef struct ARMHostCPUFeatures { - ARMISARegisters isar; - uint64_t features; - uint32_t target; - const char *dtb_compatible; -} ARMHostCPUFeatures; - -/** - * kvm_arm_get_host_cpu_features: - * @ahcf: ARMHostCPUClass to fill in - * - * Probe the capabilities of the host kernel's preferred CPU and fill - * in the ARMHostCPUClass struct accordingly. - * - * Returns true on success and false otherwise. - */ -bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf); - /** * kvm_arm_sve_get_vls: - * @cs: CPUState + * @cpu: ARMCPU * * Get all the SVE vector lengths supported by the KVM host, setting * the bits corresponding to their length in quadwords minus one * (vq - 1) up to ARM_MAX_VQ. Return the resulting map. */ -uint32_t kvm_arm_sve_get_vls(CPUState *cs); +uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu); /** * kvm_arm_set_cpu_features_from_host: @@ -265,12 +148,12 @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu); /** * kvm_arm_add_vcpu_properties: - * @obj: The CPU object to add the properties to + * @cpu: The CPU object to add the properties to * * Add all KVM specific CPU properties to the CPU object. These * are the CPU properties with "kvm-" prefixed names. */ -void kvm_arm_add_vcpu_properties(Object *obj); +void kvm_arm_add_vcpu_properties(ARMCPU *cpu); /** * kvm_arm_steal_time_finalize: @@ -282,14 +165,6 @@ void kvm_arm_add_vcpu_properties(Object *obj); */ void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp); -/** - * kvm_arm_steal_time_supported: - * - * Returns: true if KVM can enable steal time reporting - * and false otherwise. - */ -bool kvm_arm_steal_time_supported(void); - /** * kvm_arm_aarch32_supported: * @@ -323,57 +198,19 @@ bool kvm_arm_sve_supported(void); */ int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa); -/** - * kvm_arm_sync_mpstate_to_kvm: - * @cpu: ARMCPU - * - * If supported set the KVM MP_STATE based on QEMU's model. - * - * Returns 0 on success and -1 on failure. - */ -int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu); - -/** - * kvm_arm_sync_mpstate_to_qemu: - * @cpu: ARMCPU - * - * If supported get the MP_STATE from KVM and store in QEMU's model. - * - * Returns 0 on success and aborts on failure. - */ -int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu); - -/** - * kvm_arm_get_virtual_time: - * @cs: CPUState - * - * Gets the VCPU's virtual counter and stores it in the KVM CPU state. - */ -void kvm_arm_get_virtual_time(CPUState *cs); - -/** - * kvm_arm_put_virtual_time: - * @cs: CPUState - * - * Sets the VCPU's virtual counter to the value stored in the KVM CPU state. - */ -void kvm_arm_put_virtual_time(CPUState *cs); - -void kvm_arm_vm_state_change(void *opaque, bool running, RunState state); - int kvm_arm_vgic_probe(void); -void kvm_arm_pmu_set_irq(CPUState *cs, int irq); -void kvm_arm_pmu_init(CPUState *cs); +void kvm_arm_pmu_init(ARMCPU *cpu); +void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq); /** * kvm_arm_pvtime_init: - * @cs: CPUState + * @cpu: ARMCPU * @ipa: Per-vcpu guest physical base address of the pvtime structures * * Initializes PVTIME for the VCPU, setting the PVTIME IPA to @ipa. */ -void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa); +void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa); int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); @@ -398,11 +235,6 @@ static inline bool kvm_arm_sve_supported(void) return false; } -static inline bool kvm_arm_steal_time_supported(void) -{ - return false; -} - /* * These functions should never actually be called without KVM support. */ @@ -411,7 +243,7 @@ static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) g_assert_not_reached(); } -static inline void kvm_arm_add_vcpu_properties(Object *obj) +static inline void kvm_arm_add_vcpu_properties(ARMCPU *cpu) { g_assert_not_reached(); } @@ -426,17 +258,17 @@ static inline int kvm_arm_vgic_probe(void) g_assert_not_reached(); } -static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq) +static inline void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq) { g_assert_not_reached(); } -static inline void kvm_arm_pmu_init(CPUState *cs) +static inline void kvm_arm_pmu_init(ARMCPU *cpu) { g_assert_not_reached(); } -static inline void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa) +static inline void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa) { g_assert_not_reached(); } @@ -446,48 +278,11 @@ static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) g_assert_not_reached(); } -static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs) +static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) { g_assert_not_reached(); } #endif -/** - * kvm_arm_handle_debug: - * @cs: CPUState - * @debug_exit: debug part of the KVM exit structure - * - * Returns: TRUE if the debug exception was handled. - */ -bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit); - -/** - * kvm_arm_hw_debug_active: - * @cs: CPU State - * - * Return: TRUE if any hardware breakpoints in use. - */ -bool kvm_arm_hw_debug_active(CPUState *cs); - -/** - * kvm_arm_copy_hw_debug_data: - * @ptr: kvm_guest_debug_arch structure - * - * Copy the architecture specific debug registers into the - * kvm_guest_debug ioctl structure. - */ -struct kvm_guest_debug_arch; -void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr); - -/** - * kvm_arm_verify_ext_dabt_pending: - * @cs: CPUState - * - * Verify the fault status code wrt the Ext DABT injection - * - * Returns: true if the fault status code is as expected, false otherwise - */ -bool kvm_arm_verify_ext_dabt_pending(CPUState *cs); - #endif diff --git a/target/arm/machine.c b/target/arm/machine.c index 9e20b411895..b2b39b24755 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -7,6 +7,7 @@ #include "internals.h" #include "cpu-features.h" #include "migration/cpu.h" +#include "target/arm/gtimer.h" static bool vfp_needed(void *opaque) { @@ -49,7 +50,7 @@ static const VMStateDescription vmstate_vfp = { .version_id = 3, .minimum_version_id = 3, .needed = vfp_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* For compatibility, store Qn out of Zn here. */ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2), VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2), @@ -115,7 +116,7 @@ static const VMStateDescription vmstate_iwmmxt = { .version_id = 1, .minimum_version_id = 1, .needed = iwmmxt_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16), VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16), VMSTATE_END_OF_LIST() @@ -140,7 +141,7 @@ static const VMStateDescription vmstate_zreg_hi_reg = { .name = "cpu/sve/zreg_hi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2), VMSTATE_END_OF_LIST() } @@ -150,7 +151,7 @@ static const VMStateDescription vmstate_preg_reg = { .name = "cpu/sve/preg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8), VMSTATE_END_OF_LIST() } @@ -161,7 +162,7 @@ static const VMStateDescription vmstate_sve = { .version_id = 1, .minimum_version_id = 1, .needed = sve_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0, vmstate_zreg_hi_reg, ARMVectorReg), VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0, @@ -174,7 +175,7 @@ static const VMStateDescription vmstate_vreg = { .name = "vreg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2), VMSTATE_END_OF_LIST() } @@ -196,7 +197,7 @@ static const VMStateDescription vmstate_za = { .version_id = 1, .minimum_version_id = 1, .needed = za_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0, vmstate_vreg, ARMVectorReg), VMSTATE_END_OF_LIST() @@ -217,7 +218,7 @@ static const VMStateDescription vmstate_serror = { .version_id = 1, .minimum_version_id = 1, .needed = serror_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(env.serror.pending, ARMCPU), VMSTATE_UINT8(env.serror.has_esr, ARMCPU), VMSTATE_UINT64(env.serror.esr, ARMCPU), @@ -235,7 +236,7 @@ static const VMStateDescription vmstate_irq_line_state = { .version_id = 1, .minimum_version_id = 1, .needed = irq_line_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.irq_line_state, ARMCPU), VMSTATE_END_OF_LIST() } @@ -254,7 +255,7 @@ static const VMStateDescription vmstate_m_faultmask_primask = { .version_id = 1, .minimum_version_id = 1, .needed = m_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU), VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU), VMSTATE_END_OF_LIST() @@ -289,7 +290,7 @@ static const VMStateDescription vmstate_m_csselr = { .version_id = 1, .minimum_version_id = 1, .needed = m_csselr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS), VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate), VMSTATE_END_OF_LIST() @@ -301,7 +302,7 @@ static const VMStateDescription vmstate_m_scr = { .version_id = 1, .minimum_version_id = 1, .needed = m_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU), VMSTATE_END_OF_LIST() } @@ -312,7 +313,7 @@ static const VMStateDescription vmstate_m_other_sp = { .version_id = 1, .minimum_version_id = 1, .needed = m_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.other_sp, ARMCPU), VMSTATE_END_OF_LIST() } @@ -331,7 +332,7 @@ static const VMStateDescription vmstate_m_v8m = { .version_id = 1, .minimum_version_id = 1, .needed = m_v8m_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS), VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS), VMSTATE_END_OF_LIST() @@ -343,7 +344,7 @@ static const VMStateDescription vmstate_m_fp = { .version_id = 1, .minimum_version_id = 1, .needed = vfp_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS), VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS), VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS), @@ -365,7 +366,7 @@ static const VMStateDescription vmstate_m_mve = { .version_id = 1, .minimum_version_id = 1, .needed = mve_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.vpr, ARMCPU), VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU), VMSTATE_END_OF_LIST() @@ -377,7 +378,7 @@ static const VMStateDescription vmstate_m = { .version_id = 4, .minimum_version_id = 4, .needed = m_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU), VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU), VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU), @@ -391,7 +392,7 @@ static const VMStateDescription vmstate_m = { VMSTATE_INT32(env.v7m.exception, ARMCPU), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_m_faultmask_primask, &vmstate_m_csselr, &vmstate_m_scr, @@ -416,7 +417,7 @@ static const VMStateDescription vmstate_thumb2ee = { .version_id = 1, .minimum_version_id = 1, .needed = thumb2ee_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.teecr, ARMCPU), VMSTATE_UINT32(env.teehbr, ARMCPU), VMSTATE_END_OF_LIST() @@ -445,7 +446,7 @@ static const VMStateDescription vmstate_pmsav7 = { .version_id = 1, .minimum_version_id = 1, .needed = pmsav7_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0, vmstate_info_uint32, uint32_t), VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0, @@ -474,7 +475,7 @@ static const VMStateDescription vmstate_pmsav7_rnr = { .version_id = 1, .minimum_version_id = 1, .needed = pmsav7_rnr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU), VMSTATE_END_OF_LIST() } @@ -504,7 +505,7 @@ static const VMStateDescription vmstate_pmsav8r = { .version_id = 1, .minimum_version_id = 1, .needed = pmsav8r_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU, pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t), VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU, @@ -518,7 +519,7 @@ static const VMStateDescription vmstate_pmsav8 = { .version_id = 1, .minimum_version_id = 1, .needed = pmsav8_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion, 0, vmstate_info_uint32, uint32_t), VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion, @@ -527,7 +528,7 @@ static const VMStateDescription vmstate_pmsav8 = { VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_pmsav8r, NULL } @@ -560,7 +561,7 @@ static const VMStateDescription vmstate_m_security = { .version_id = 1, .minimum_version_id = 1, .needed = m_security_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.secure, ARMCPU), VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU), VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU), @@ -772,7 +773,7 @@ static int cpu_pre_load(void *opaque) env->irq_line_state = UINT32_MAX; if (!kvm_enabled()) { - pmu_op_start(&cpu->env); + pmu_op_start(env); } return 0; @@ -870,11 +871,11 @@ static int cpu_post_load(void *opaque, int version_id) } if (!kvm_enabled()) { - pmu_op_finish(&cpu->env); + pmu_op_finish(env); } if (tcg_enabled()) { - arm_rebuild_hflags(&cpu->env); + arm_rebuild_hflags(env); } return 0; @@ -888,7 +889,7 @@ const VMStateDescription vmstate_arm_cpu = { .post_save = cpu_post_save, .pre_load = cpu_pre_load, .post_load = cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16), VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32), VMSTATE_UINT64(env.pc, ARMCPU), @@ -937,7 +938,7 @@ const VMStateDescription vmstate_arm_cpu = { }, VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_vfp, &vmstate_iwmmxt, &vmstate_m, diff --git a/target/arm/meson.build b/target/arm/meson.build index 5d04a8e94f2..2e10464dbb6 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -8,7 +8,7 @@ arm_ss.add(files( )) arm_ss.add(zlib) -arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c')) +arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'), if_false: files('kvm-stub.c')) arm_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c')) arm_ss.add(when: 'TARGET_AARCH64', if_true: files( @@ -26,9 +26,11 @@ arm_system_ss.add(files( 'ptw.c', )) +arm_user_ss = ss.source_set() + subdir('hvf') -if 'CONFIG_TCG' in config_all +if 'CONFIG_TCG' in config_all_accel subdir('tcg') else arm_ss.add(files('tcg-stubs.c')) @@ -36,3 +38,4 @@ endif target_arch += {'arm': arm_ss} target_system_arch += {'arm': arm_system_ss} +target_user_arch += {'arm': arm_user_ss} diff --git a/target/arm/multiprocessing.h b/target/arm/multiprocessing.h new file mode 100644 index 00000000000..81715d345c2 --- /dev/null +++ b/target/arm/multiprocessing.h @@ -0,0 +1,16 @@ +/* + * ARM multiprocessor CPU helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef TARGET_ARM_MULTIPROCESSING_H +#define TARGET_ARM_MULTIPROCESSING_H + +#include "target/arm/cpu-qom.h" + +uint64_t arm_cpu_mp_affinity(ARMCPU *cpu); + +#endif diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 1762b058aec..31ae43f60ed 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -95,7 +95,10 @@ static const uint8_t pamax_map[] = { [6] = 52, }; -/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */ +/* + * The cpu-specific constant value of PAMax; also used by hw/arm/virt. + * Note that machvirt_init calls this on a CPU that is inited but not realized! + */ unsigned int arm_pamax(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { @@ -110,13 +113,8 @@ unsigned int arm_pamax(ARMCPU *cpu) return pamax_map[parange]; } - /* - * In machvirt_init, we call arm_pamax on a cpu that is not fully - * initialized, so we can't rely on the propagation done in realize. - */ - if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) || - arm_feature(&cpu->env, ARM_FEATURE_V7VE)) { - /* v7 with LPAE */ + if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { + /* v7 or v8 with LPAE */ return 40; } /* Anything else */ @@ -473,6 +471,16 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress, return false; } +static bool S1_attrs_are_device(uint8_t attrs) +{ + /* + * This slightly under-decodes the MAIR_ELx field: + * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE; + * 0b0000dd1x is UNPREDICTABLE. + */ + return (attrs & 0xf0) == 0; +} + static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) { /* @@ -713,8 +721,68 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, void *host = ptw->out_host; if (unlikely(!host)) { - fi->type = ARMFault_UnsuppAtomicUpdate; - return 0; + /* Page table in MMIO Memory Region */ + CPUState *cs = env_cpu(env); + MemTxAttrs attrs = { + .space = ptw->out_space, + .secure = arm_space_is_secure(ptw->out_space), + }; + AddressSpace *as = arm_addressspace(cs, attrs); + MemTxResult result = MEMTX_OK; + bool need_lock = !bql_locked(); + + if (need_lock) { + bql_lock(); + } + if (ptw->out_be) { + cur_val = address_space_ldq_be(as, ptw->out_phys, attrs, &result); + if (unlikely(result != MEMTX_OK)) { + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + if (need_lock) { + bql_unlock(); + } + return old_val; + } + if (cur_val == old_val) { + address_space_stq_be(as, ptw->out_phys, new_val, attrs, &result); + if (unlikely(result != MEMTX_OK)) { + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + if (need_lock) { + bql_unlock(); + } + return old_val; + } + cur_val = new_val; + } + } else { + cur_val = address_space_ldq_le(as, ptw->out_phys, attrs, &result); + if (unlikely(result != MEMTX_OK)) { + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + if (need_lock) { + bql_unlock(); + } + return old_val; + } + if (cur_val == old_val) { + address_space_stq_le(as, ptw->out_phys, new_val, attrs, &result); + if (unlikely(result != MEMTX_OK)) { + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + if (need_lock) { + bql_unlock(); + } + return old_val; + } + cur_val = new_val; + } + } + if (need_lock) { + bql_unlock(); + } + return cur_val; } /* @@ -772,9 +840,9 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, #if !TCG_OVERSIZED_GUEST # error "Unexpected configuration" #endif - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); if (!locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } if (ptw->out_be) { cur_val = ldq_be_p(host); @@ -788,7 +856,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, } } if (!locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif @@ -1581,6 +1649,12 @@ static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds, } } +static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw) +{ + uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); + return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1); +} + /** * get_phys_addr_lpae: perform one stage of page table walk, LPAE format * @@ -1620,6 +1694,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, bool aarch64 = arm_el_is_aa64(env, el); uint64_t descriptor, new_descriptor; ARMSecuritySpace out_space; + bool device; /* TODO: This code does not support shareability levels. */ if (aarch64) { @@ -1989,6 +2064,21 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, xn = extract64(attrs, 54, 1); pxn = extract64(attrs, 53, 1); + if (el == 1 && nv_nv1_enabled(env, ptw)) { + /* + * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page + * descriptor bit 54 holds PXN, 53 is RES0, and the effective value + * of UXN is 0. Similarly for bits 59 and 60 in table descriptors + * (which we have already folded into bits 53 and 54 of attrs). + * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0. + * Similarly, APTable[0] from the table descriptor is treated as 0; + * we already folded this into AP[1] and squashing that to 0 does + * the right thing. + */ + pxn = xn; + xn = 0; + ap &= ~1; + } /* * Note that we modified ptw->in_space earlier for NSTable, but * result->f.attrs retains a copy of the original security space. @@ -2027,6 +2117,12 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, if (regime_is_stage2(mmu_idx)) { result->cacheattrs.is_s2_format = true; result->cacheattrs.attrs = extract32(attrs, 2, 4); + /* + * Security state does not really affect HCR_EL2.FWB; + * we only need to filter FWB for aa32 or other FEAT. + */ + device = S2_attrs_are_device(arm_hcr_el2_eff(env), + result->cacheattrs.attrs); } else { /* Index into MAIR registers for cache attributes */ uint8_t attrindx = extract32(attrs, 2, 3); @@ -2039,6 +2135,28 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ } + device = S1_attrs_are_device(result->cacheattrs.attrs); + } + + /* + * Enable alignment checks on Device memory. + * + * Per R_XCHFJ, this check is mis-ordered. The correct ordering + * for alignment, permission, and stage 2 faults should be: + * - Alignment fault caused by the memory type + * - Permission fault + * - A stage 2 fault on the memory access + * but due to the way the TCG softmmu TLB operates, we will have + * implicitly done the permission check and the stage2 lookup in + * finding the TLB entry, so the alignment check cannot be done sooner. + * + * In v7, for a CPU without the Virtualization Extensions this + * access is UNPREDICTABLE; we choose to make it take the alignment + * fault as is required for a v7VE CPU. (QEMU doesn't emulate any + * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) + */ + if (device) { + result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED; } /* diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h index eccb759da6b..3244e0740dd 100644 --- a/target/arm/syndrome.h +++ b/target/arm/syndrome.h @@ -89,6 +89,9 @@ typedef enum { #define ARM_EL_IL (1 << ARM_EL_IL_SHIFT) #define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT) +/* In the Data Abort syndrome */ +#define ARM_EL_VNCR (1 << 13) + static inline uint32_t syn_get_ec(uint32_t syn) { return syn >> ARM_EL_EC_SHIFT; @@ -264,13 +267,12 @@ static inline uint32_t syn_bxjtrap(int cv, int cond, int rm) (cv << 24) | (cond << 20) | rm; } -static inline uint32_t syn_gpc(int s2ptw, int ind, int gpcsc, +static inline uint32_t syn_gpc(int s2ptw, int ind, int gpcsc, int vncr, int cm, int s1ptw, int wnr, int fsc) { - /* TODO: FEAT_NV2 adds VNCR */ return (EC_GPC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (s2ptw << 21) - | (ind << 20) | (gpcsc << 14) | (cm << 8) | (s1ptw << 7) - | (wnr << 6) | fsc; + | (ind << 20) | (gpcsc << 14) | (vncr << 13) | (cm << 8) + | (s1ptw << 7) | (wnr << 6) | fsc; } static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc) @@ -303,6 +305,16 @@ static inline uint32_t syn_data_abort_with_iss(int same_el, | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; } +/* + * Faults due to FEAT_NV2 VNCR_EL2-based accesses report as same-EL + * Data Aborts with the VNCR bit set. + */ +static inline uint32_t syn_data_abort_vncr(int ea, int wnr, int fsc) +{ + return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (1 << ARM_EL_EC_SHIFT) + | ARM_EL_IL | ARM_EL_VNCR | (wnr << 6) | fsc; +} + static inline uint32_t syn_swstep(int same_el, int isv, int ex) { return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c new file mode 100644 index 00000000000..c059c681e94 --- /dev/null +++ b/target/arm/tcg/cpu-v7m.c @@ -0,0 +1,290 @@ +/* + * QEMU ARMv7-M TCG-only CPUs. + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This code is licensed under the GNU GPL v2 or later. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/core/tcg-cpu-ops.h" +#include "internals.h" + +#if !defined(CONFIG_USER_ONLY) + +#include "hw/intc/armv7m_nvic.h" + +static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + CPUClass *cc = CPU_GET_CLASS(cs); + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + bool ret = false; + + /* + * ARMv7-M interrupt masking works differently than -A or -R. + * There is no FIQ/IRQ distinction. Instead of I and F bits + * masking FIQ and IRQ interrupts, an exception is taken only + * if it is higher priority than the current execution priority + * (which depends on state like BASEPRI, FAULTMASK and the + * currently active exception). + */ + if (interrupt_request & CPU_INTERRUPT_HARD + && (armv7m_nvic_can_take_pending_exception(env->nvic))) { + cs->exception_index = EXCP_IRQ; + cc->tcg_ops->do_interrupt(cs); + ret = true; + } + return ret; +} + +#endif /* !CONFIG_USER_ONLY */ + +static void cortex_m0_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + set_feature(&cpu->env, ARM_FEATURE_V6); + set_feature(&cpu->env, ARM_FEATURE_M); + + cpu->midr = 0x410cc200; + + /* + * These ID register values are not guest visible, because + * we do not implement the Main Extension. They must be set + * to values corresponding to the Cortex-M0's implemented + * features, because QEMU generally controls its emulation + * by looking at ID register fields. We use the same values as + * for the M3. + */ + cpu->isar.id_pfr0 = 0x00000030; + cpu->isar.id_pfr1 = 0x00000200; + cpu->isar.id_dfr0 = 0x00100000; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00000030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x00000000; + cpu->isar.id_mmfr3 = 0x00000000; + cpu->isar.id_isar0 = 0x01141110; + cpu->isar.id_isar1 = 0x02111000; + cpu->isar.id_isar2 = 0x21112231; + cpu->isar.id_isar3 = 0x01111110; + cpu->isar.id_isar4 = 0x01310102; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; +} + +static void cortex_m3_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_M); + set_feature(&cpu->env, ARM_FEATURE_M_MAIN); + cpu->midr = 0x410fc231; + cpu->pmsav7_dregion = 8; + cpu->isar.id_pfr0 = 0x00000030; + cpu->isar.id_pfr1 = 0x00000200; + cpu->isar.id_dfr0 = 0x00100000; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00000030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x00000000; + cpu->isar.id_mmfr3 = 0x00000000; + cpu->isar.id_isar0 = 0x01141110; + cpu->isar.id_isar1 = 0x02111000; + cpu->isar.id_isar2 = 0x21112231; + cpu->isar.id_isar3 = 0x01111110; + cpu->isar.id_isar4 = 0x01310102; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; +} + +static void cortex_m4_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_M); + set_feature(&cpu->env, ARM_FEATURE_M_MAIN); + set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); + cpu->midr = 0x410fc240; /* r0p0 */ + cpu->pmsav7_dregion = 8; + cpu->isar.mvfr0 = 0x10110021; + cpu->isar.mvfr1 = 0x11000011; + cpu->isar.mvfr2 = 0x00000000; + cpu->isar.id_pfr0 = 0x00000030; + cpu->isar.id_pfr1 = 0x00000200; + cpu->isar.id_dfr0 = 0x00100000; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00000030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x00000000; + cpu->isar.id_mmfr3 = 0x00000000; + cpu->isar.id_isar0 = 0x01141110; + cpu->isar.id_isar1 = 0x02111000; + cpu->isar.id_isar2 = 0x21112231; + cpu->isar.id_isar3 = 0x01111110; + cpu->isar.id_isar4 = 0x01310102; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; +} + +static void cortex_m7_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_M); + set_feature(&cpu->env, ARM_FEATURE_M_MAIN); + set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); + cpu->midr = 0x411fc272; /* r1p2 */ + cpu->pmsav7_dregion = 8; + cpu->isar.mvfr0 = 0x10110221; + cpu->isar.mvfr1 = 0x12000011; + cpu->isar.mvfr2 = 0x00000040; + cpu->isar.id_pfr0 = 0x00000030; + cpu->isar.id_pfr1 = 0x00000200; + cpu->isar.id_dfr0 = 0x00100000; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00100030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x01000000; + cpu->isar.id_mmfr3 = 0x00000000; + cpu->isar.id_isar0 = 0x01101110; + cpu->isar.id_isar1 = 0x02112000; + cpu->isar.id_isar2 = 0x20232231; + cpu->isar.id_isar3 = 0x01111131; + cpu->isar.id_isar4 = 0x01310132; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; +} + +static void cortex_m33_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_M); + set_feature(&cpu->env, ARM_FEATURE_M_MAIN); + set_feature(&cpu->env, ARM_FEATURE_M_SECURITY); + set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); + cpu->midr = 0x410fd213; /* r0p3 */ + cpu->pmsav7_dregion = 16; + cpu->sau_sregion = 8; + cpu->isar.mvfr0 = 0x10110021; + cpu->isar.mvfr1 = 0x11000011; + cpu->isar.mvfr2 = 0x00000040; + cpu->isar.id_pfr0 = 0x00000030; + cpu->isar.id_pfr1 = 0x00000210; + cpu->isar.id_dfr0 = 0x00200000; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00101F40; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x01000000; + cpu->isar.id_mmfr3 = 0x00000000; + cpu->isar.id_isar0 = 0x01101110; + cpu->isar.id_isar1 = 0x02212000; + cpu->isar.id_isar2 = 0x20232232; + cpu->isar.id_isar3 = 0x01111131; + cpu->isar.id_isar4 = 0x01310132; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; + cpu->clidr = 0x00000000; + cpu->ctr = 0x8000c000; +} + +static void cortex_m55_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_V8_1M); + set_feature(&cpu->env, ARM_FEATURE_M); + set_feature(&cpu->env, ARM_FEATURE_M_MAIN); + set_feature(&cpu->env, ARM_FEATURE_M_SECURITY); + set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); + cpu->midr = 0x410fd221; /* r0p1 */ + cpu->revidr = 0; + cpu->pmsav7_dregion = 16; + cpu->sau_sregion = 8; + /* These are the MVFR* values for the FPU + full MVE configuration */ + cpu->isar.mvfr0 = 0x10110221; + cpu->isar.mvfr1 = 0x12100211; + cpu->isar.mvfr2 = 0x00000040; + cpu->isar.id_pfr0 = 0x20000030; + cpu->isar.id_pfr1 = 0x00000230; + cpu->isar.id_dfr0 = 0x10200000; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00111040; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x01000000; + cpu->isar.id_mmfr3 = 0x00000011; + cpu->isar.id_isar0 = 0x01103110; + cpu->isar.id_isar1 = 0x02212000; + cpu->isar.id_isar2 = 0x20232232; + cpu->isar.id_isar3 = 0x01111131; + cpu->isar.id_isar4 = 0x01310132; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; + cpu->clidr = 0x00000000; /* caches not implemented */ + cpu->ctr = 0x8303c003; +} + +static const TCGCPUOps arm_v7m_tcg_ops = { + .initialize = arm_translate_init, + .synchronize_from_tb = arm_cpu_synchronize_from_tb, + .debug_excp_handler = arm_debug_excp_handler, + .restore_state_to_opc = arm_restore_state_to_opc, + +#ifdef CONFIG_USER_ONLY + .record_sigsegv = arm_cpu_record_sigsegv, + .record_sigbus = arm_cpu_record_sigbus, +#else + .tlb_fill = arm_cpu_tlb_fill, + .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt, + .do_interrupt = arm_v7m_cpu_do_interrupt, + .do_transaction_failed = arm_cpu_do_transaction_failed, + .do_unaligned_access = arm_cpu_do_unaligned_access, + .adjust_watchpoint_address = arm_adjust_watchpoint_address, + .debug_check_watchpoint = arm_debug_check_watchpoint, + .debug_check_breakpoint = arm_debug_check_breakpoint, +#endif /* !CONFIG_USER_ONLY */ +}; + +static void arm_v7m_class_init(ObjectClass *oc, void *data) +{ + ARMCPUClass *acc = ARM_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + + acc->info = data; + cc->tcg_ops = &arm_v7m_tcg_ops; + cc->gdb_core_xml_file = "arm-m-profile.xml"; +} + +static const ARMCPUInfo arm_v7m_cpus[] = { + { .name = "cortex-m0", .initfn = cortex_m0_initfn, + .class_init = arm_v7m_class_init }, + { .name = "cortex-m3", .initfn = cortex_m3_initfn, + .class_init = arm_v7m_class_init }, + { .name = "cortex-m4", .initfn = cortex_m4_initfn, + .class_init = arm_v7m_class_init }, + { .name = "cortex-m7", .initfn = cortex_m7_initfn, + .class_init = arm_v7m_class_init }, + { .name = "cortex-m33", .initfn = cortex_m33_initfn, + .class_init = arm_v7m_class_init }, + { .name = "cortex-m55", .initfn = cortex_m55_initfn, + .class_init = arm_v7m_class_init }, +}; + +static void arm_v7m_cpu_register_types(void) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(arm_v7m_cpus); ++i) { + arm_cpu_register(&arm_v7m_cpus[i]); + } +} + +type_init(arm_v7m_cpu_register_types) diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c index d9e0e2a4ddf..de8f2be9416 100644 --- a/target/arm/tcg/cpu32.c +++ b/target/arm/tcg/cpu32.c @@ -17,9 +17,6 @@ #include "hw/boards.h" #endif #include "cpregs.h" -#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) -#include "hw/intc/armv7m_nvic.h" -#endif /* Share AArch32 -cpu max features with AArch64. */ @@ -98,32 +95,6 @@ void aa32_max_features(ARMCPU *cpu) /* CPU models. These are not needed for the AArch64 linux-user build. */ #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) -#if !defined(CONFIG_USER_ONLY) -static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - CPUClass *cc = CPU_GET_CLASS(cs); - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - bool ret = false; - - /* - * ARMv7-M interrupt masking works differently than -A or -R. - * There is no FIQ/IRQ distinction. Instead of I and F bits - * masking FIQ and IRQ interrupts, an exception is taken only - * if it is higher priority than the current execution priority - * (which depends on state like BASEPRI, FAULTMASK and the - * currently active exception). - */ - if (interrupt_request & CPU_INTERRUPT_HARD - && (armv7m_nvic_can_take_pending_exception(env->nvic))) { - cs->exception_index = EXCP_IRQ; - cc->tcg_ops->do_interrupt(cs); - ret = true; - } - return ret; -} -#endif /* !CONFIG_USER_ONLY */ - static void arm926_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -571,195 +542,6 @@ static void cortex_a15_initfn(Object *obj) define_arm_cp_regs(cpu, cortexa15_cp_reginfo); } -static void cortex_m0_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - set_feature(&cpu->env, ARM_FEATURE_V6); - set_feature(&cpu->env, ARM_FEATURE_M); - - cpu->midr = 0x410cc200; - - /* - * These ID register values are not guest visible, because - * we do not implement the Main Extension. They must be set - * to values corresponding to the Cortex-M0's implemented - * features, because QEMU generally controls its emulation - * by looking at ID register fields. We use the same values as - * for the M3. - */ - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00000030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x00000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; -} - -static void cortex_m3_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - set_feature(&cpu->env, ARM_FEATURE_V7); - set_feature(&cpu->env, ARM_FEATURE_M); - set_feature(&cpu->env, ARM_FEATURE_M_MAIN); - cpu->midr = 0x410fc231; - cpu->pmsav7_dregion = 8; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00000030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x00000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; -} - -static void cortex_m4_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - set_feature(&cpu->env, ARM_FEATURE_V7); - set_feature(&cpu->env, ARM_FEATURE_M); - set_feature(&cpu->env, ARM_FEATURE_M_MAIN); - set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); - cpu->midr = 0x410fc240; /* r0p0 */ - cpu->pmsav7_dregion = 8; - cpu->isar.mvfr0 = 0x10110021; - cpu->isar.mvfr1 = 0x11000011; - cpu->isar.mvfr2 = 0x00000000; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00000030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x00000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; -} - -static void cortex_m7_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - set_feature(&cpu->env, ARM_FEATURE_V7); - set_feature(&cpu->env, ARM_FEATURE_M); - set_feature(&cpu->env, ARM_FEATURE_M_MAIN); - set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); - cpu->midr = 0x411fc272; /* r1p2 */ - cpu->pmsav7_dregion = 8; - cpu->isar.mvfr0 = 0x10110221; - cpu->isar.mvfr1 = 0x12000011; - cpu->isar.mvfr2 = 0x00000040; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00100030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01101110; - cpu->isar.id_isar1 = 0x02112000; - cpu->isar.id_isar2 = 0x20232231; - cpu->isar.id_isar3 = 0x01111131; - cpu->isar.id_isar4 = 0x01310132; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; -} - -static void cortex_m33_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_M); - set_feature(&cpu->env, ARM_FEATURE_M_MAIN); - set_feature(&cpu->env, ARM_FEATURE_M_SECURITY); - set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); - cpu->midr = 0x410fd213; /* r0p3 */ - cpu->pmsav7_dregion = 16; - cpu->sau_sregion = 8; - cpu->isar.mvfr0 = 0x10110021; - cpu->isar.mvfr1 = 0x11000011; - cpu->isar.mvfr2 = 0x00000040; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000210; - cpu->isar.id_dfr0 = 0x00200000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00101F40; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01101110; - cpu->isar.id_isar1 = 0x02212000; - cpu->isar.id_isar2 = 0x20232232; - cpu->isar.id_isar3 = 0x01111131; - cpu->isar.id_isar4 = 0x01310132; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; - cpu->clidr = 0x00000000; - cpu->ctr = 0x8000c000; -} - -static void cortex_m55_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_V8_1M); - set_feature(&cpu->env, ARM_FEATURE_M); - set_feature(&cpu->env, ARM_FEATURE_M_MAIN); - set_feature(&cpu->env, ARM_FEATURE_M_SECURITY); - set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); - cpu->midr = 0x410fd221; /* r0p1 */ - cpu->revidr = 0; - cpu->pmsav7_dregion = 16; - cpu->sau_sregion = 8; - /* These are the MVFR* values for the FPU + full MVE configuration */ - cpu->isar.mvfr0 = 0x10110221; - cpu->isar.mvfr1 = 0x12100211; - cpu->isar.mvfr2 = 0x00000040; - cpu->isar.id_pfr0 = 0x20000030; - cpu->isar.id_pfr1 = 0x00000230; - cpu->isar.id_dfr0 = 0x10200000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00111040; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01000000; - cpu->isar.id_mmfr3 = 0x00000011; - cpu->isar.id_isar0 = 0x01103110; - cpu->isar.id_isar1 = 0x02212000; - cpu->isar.id_isar2 = 0x20232232; - cpu->isar.id_isar3 = 0x01111131; - cpu->isar.id_isar4 = 0x01310132; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; - cpu->clidr = 0x00000000; /* caches not implemented */ - cpu->ctr = 0x8303c003; -} - static const ARMCPRegInfo cortexr5_cp_reginfo[] = { /* Dummy the TCM region regs for the moment */ { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, @@ -800,6 +582,111 @@ static void cortex_r5_initfn(Object *obj) define_arm_cp_regs(cpu, cortexr5_cp_reginfo); } +static const ARMCPRegInfo cortex_r52_cp_reginfo[] = { + { .name = "CPUACTLR", .cp = 15, .opc1 = 0, .crm = 15, + .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "IMP_ATCMREGIONR", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_BTCMREGIONR", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_CTCMREGIONR", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 2, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_CSCTLR", + .cp = 15, .opc1 = 1, .crn = 9, .crm = 1, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_BPCTLR", + .cp = 15, .opc1 = 1, .crn = 9, .crm = 1, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_MEMPROTCLR", + .cp = 15, .opc1 = 1, .crn = 9, .crm = 1, .opc2 = 2, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_SLAVEPCTLR", + .cp = 15, .opc1 = 0, .crn = 11, .crm = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_PERIPHREGIONR", + .cp = 15, .opc1 = 0, .crn = 15, .crm = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_FLASHIFREGIONR", + .cp = 15, .opc1 = 0, .crn = 15, .crm = 0, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_BUILDOPTR", + .cp = 15, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_PINOPTR", + .cp = 15, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_QOSR", + .cp = 15, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_BUSTIMEOUTR", + .cp = 15, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 2, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_INTMONR", + .cp = 15, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 4, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_ICERR0", + .cp = 15, .opc1 = 2, .crn = 15, .crm = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_ICERR1", + .cp = 15, .opc1 = 2, .crn = 15, .crm = 0, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_DCERR0", + .cp = 15, .opc1 = 2, .crn = 15, .crm = 1, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_DCERR1", + .cp = 15, .opc1 = 2, .crn = 15, .crm = 1, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_TCMERR0", + .cp = 15, .opc1 = 2, .crn = 15, .crm = 2, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_TCMERR1", + .cp = 15, .opc1 = 2, .crn = 15, .crm = 2, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_TCMSYNDR0", + .cp = 15, .opc1 = 2, .crn = 15, .crm = 2, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_TCMSYNDR1", + .cp = 15, .opc1 = 2, .crn = 15, .crm = 2, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_FLASHERR0", + .cp = 15, .opc1 = 2, .crn = 15, .crm = 3, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_FLASHERR1", + .cp = 15, .opc1 = 2, .crn = 15, .crm = 3, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_CDBGDR0", + .cp = 15, .opc1 = 3, .crn = 15, .crm = 0, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_CBDGBR1", + .cp = 15, .opc1 = 3, .crn = 15, .crm = 0, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_TESTR0", + .cp = 15, .opc1 = 4, .crn = 15, .crm = 0, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "IMP_TESTR1", + .cp = 15, .opc1 = 4, .crn = 15, .crm = 0, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 }, + { .name = "IMP_CDBGDCI", + .cp = 15, .opc1 = 0, .crn = 15, .crm = 15, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 }, + { .name = "IMP_CDBGDCT", + .cp = 15, .opc1 = 3, .crn = 15, .crm = 2, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 }, + { .name = "IMP_CDBGICT", + .cp = 15, .opc1 = 3, .crn = 15, .crm = 2, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 }, + { .name = "IMP_CDBGDCD", + .cp = 15, .opc1 = 3, .crn = 15, .crm = 4, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 }, + { .name = "IMP_CDBGICD", + .cp = 15, .opc1 = 3, .crn = 15, .crm = 4, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 }, +}; + + static void cortex_r52_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -809,6 +696,8 @@ static void cortex_r52_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_PMSA); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_AUXCR); cpu->midr = 0x411fd133; /* r1p3 */ cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034023; @@ -839,6 +728,8 @@ static void cortex_r52_initfn(Object *obj) cpu->pmsav7_dregion = 16; cpu->pmsav8r_hdregion = 16; + + define_arm_cp_regs(cpu, cortex_r52_cp_reginfo); } static void cortex_r5f_initfn(Object *obj) @@ -1018,37 +909,6 @@ static void pxa270c5_initfn(Object *obj) cpu->reset_sctlr = 0x00000078; } -static const struct TCGCPUOps arm_v7m_tcg_ops = { - .initialize = arm_translate_init, - .synchronize_from_tb = arm_cpu_synchronize_from_tb, - .debug_excp_handler = arm_debug_excp_handler, - .restore_state_to_opc = arm_restore_state_to_opc, - -#ifdef CONFIG_USER_ONLY - .record_sigsegv = arm_cpu_record_sigsegv, - .record_sigbus = arm_cpu_record_sigbus, -#else - .tlb_fill = arm_cpu_tlb_fill, - .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt, - .do_interrupt = arm_v7m_cpu_do_interrupt, - .do_transaction_failed = arm_cpu_do_transaction_failed, - .do_unaligned_access = arm_cpu_do_unaligned_access, - .adjust_watchpoint_address = arm_adjust_watchpoint_address, - .debug_check_watchpoint = arm_debug_check_watchpoint, - .debug_check_breakpoint = arm_debug_check_breakpoint, -#endif /* !CONFIG_USER_ONLY */ -}; - -static void arm_v7m_class_init(ObjectClass *oc, void *data) -{ - ARMCPUClass *acc = ARM_CPU_CLASS(oc); - CPUClass *cc = CPU_CLASS(oc); - - acc->info = data; - cc->tcg_ops = &arm_v7m_tcg_ops; - cc->gdb_core_xml_file = "arm-m-profile.xml"; -} - #ifndef TARGET_AARCH64 /* * -cpu max: a CPU with as many features enabled as our emulation supports. @@ -1131,18 +991,6 @@ static const ARMCPUInfo arm_tcg_cpus[] = { { .name = "cortex-a8", .initfn = cortex_a8_initfn }, { .name = "cortex-a9", .initfn = cortex_a9_initfn }, { .name = "cortex-a15", .initfn = cortex_a15_initfn }, - { .name = "cortex-m0", .initfn = cortex_m0_initfn, - .class_init = arm_v7m_class_init }, - { .name = "cortex-m3", .initfn = cortex_m3_initfn, - .class_init = arm_v7m_class_init }, - { .name = "cortex-m4", .initfn = cortex_m4_initfn, - .class_init = arm_v7m_class_init }, - { .name = "cortex-m7", .initfn = cortex_m7_initfn, - .class_init = arm_v7m_class_init }, - { .name = "cortex-m33", .initfn = cortex_m33_initfn, - .class_init = arm_v7m_class_init }, - { .name = "cortex-m55", .initfn = cortex_m55_initfn, - .class_init = arm_v7m_class_init }, { .name = "cortex-r5", .initfn = cortex_r5_initfn }, { .name = "cortex-r5f", .initfn = cortex_r5f_initfn }, { .name = "cortex-r52", .initfn = cortex_r52_initfn }, diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index f5f50293132..dbf4163e955 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -1112,6 +1112,16 @@ void aarch64_max_tcg_initfn(Object *obj) u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0); cpu->clidr = u; + /* + * Set CTR_EL0.DIC and IDC to tell the guest it doesnt' need to + * do any cache maintenance for data-to-instruction or + * instruction-to-guest coherence. (Our cache ops are nops.) + */ + t = cpu->ctr; + t = FIELD_DP64(t, CTR_EL0, IDC, 1); + t = FIELD_DP64(t, CTR_EL0, DIC, 1); + cpu->ctr = t; + t = cpu->isar.id_aa64isar0; t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */ @@ -1181,6 +1191,7 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN64_2, 2); /* 64k stage2 supported */ t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2); /* 4k stage2 supported */ t = FIELD_DP64(t, ID_AA64MMFR0, FGT, 1); /* FEAT_FGT */ + t = FIELD_DP64(t, ID_AA64MMFR0, ECV, 2); /* FEAT_ECV */ cpu->isar.id_aa64mmfr0 = t; t = cpu->isar.id_aa64mmfr1; @@ -1201,6 +1212,7 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */ t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */ t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */ + t = FIELD_DP64(t, ID_AA64MMFR2, NV, 2); /* FEAT_NV2 */ t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */ t = FIELD_DP64(t, ID_AA64MMFR2, AT, 1); /* FEAT_LSE2 */ t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1); /* FEAT_IDST */ diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 8ad84623d37..ebaa7f00df3 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -809,9 +809,9 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) goto illegal_return; } - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_pre_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (!return_to_aa64) { env->aarch64 = false; @@ -856,7 +856,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) tbii = EX_TBFLAG_A64(env->hflags, TBII); if ((tbii >> extract64(new_pc, 55, 1)) & 1) { /* TBI is enabled. */ - int core_mmu_idx = cpu_mmu_index(env, false); + int core_mmu_idx = arm_env_mmu_index(env); if (regime_has_2_ranges(core_to_aa64_mmu_idx(core_mmu_idx))) { new_pc = sextract64(new_pc, 0, 56); } else { @@ -876,9 +876,9 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) */ aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; @@ -925,7 +925,7 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) */ int blocklen = 4 << env_archcpu(env)->dcz_blocksize; uint64_t vaddr = vaddr_in & ~(blocklen - 1); - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = arm_env_mmu_index(env); void *mem; /* diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index a6ebd7571a3..5da1b0fc1d4 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -26,6 +26,35 @@ static inline bool fgt_svc(CPUARMState *env, int el) FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1); } +/* Return true if memory alignment should be enforced. */ +static bool aprofile_require_alignment(CPUARMState *env, int el, uint64_t sctlr) +{ +#ifdef CONFIG_USER_ONLY + return false; +#else + /* Check the alignment enable bit. */ + if (sctlr & SCTLR_A) { + return true; + } + + /* + * If translation is disabled, then the default memory type is + * Device(-nGnRnE) instead of Normal, which requires that alignment + * be enforced. Since this affects all ram, it is most efficient + * to handle this during translation. + */ + if (sctlr & SCTLR_M) { + /* Translation enabled: memory type in PTE via MAIR_ELx. */ + return false; + } + if (el < 2 && (arm_hcr_el2_eff(env) & (HCR_DC | HCR_VM))) { + /* Stage 2 translation enabled: memory type in PTE. */ + return false; + } + return true; +#endif +} + static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el, ARMMMUIdx mmu_idx, CPUARMTBFlags flags) @@ -121,8 +150,9 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, { CPUARMTBFlags flags = {}; int el = arm_current_el(env); + uint64_t sctlr = arm_sctlr(env, el); - if (arm_sctlr(env, el) & SCTLR_A) { + if (aprofile_require_alignment(env, el, sctlr)) { DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); } @@ -169,6 +199,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, CPUARMTBFlags flags = {}; ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); uint64_t tcr = regime_tcr(env, mmu_idx); + uint64_t hcr = arm_hcr_el2_eff(env); uint64_t sctlr; int tbii, tbid; @@ -222,7 +253,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, sctlr = regime_sctlr(env, stage1); - if (sctlr & SCTLR_A) { + if (aprofile_require_alignment(env, el, sctlr)) { DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); } @@ -260,8 +291,10 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, switch (mmu_idx) { case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: - /* TODO: ARMv8.3-NV */ - DP_TBFLAG_A64(flags, UNPRIV, 1); + /* FEAT_NV: NV,NV1 == 1,1 means we don't do UNPRIV accesses */ + if ((hcr & (HCR_NV | HCR_NV1)) != (HCR_NV | HCR_NV1)) { + DP_TBFLAG_A64(flags, UNPRIV, 1); + } break; case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: @@ -285,13 +318,34 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, if (arm_fgt_active(env, el)) { DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1); if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) { - DP_TBFLAG_A64(flags, FGT_ERET, 1); + DP_TBFLAG_A64(flags, TRAP_ERET, 1); } if (fgt_svc(env, el)) { DP_TBFLAG_ANY(flags, FGT_SVC, 1); } } + /* + * ERET can also be trapped for FEAT_NV. arm_hcr_el2_eff() takes care + * of "is EL2 enabled" and the NV bit can only be set if FEAT_NV is present. + */ + if (el == 1 && (hcr & HCR_NV)) { + DP_TBFLAG_A64(flags, TRAP_ERET, 1); + DP_TBFLAG_A64(flags, NV, 1); + if (hcr & HCR_NV1) { + DP_TBFLAG_A64(flags, NV1, 1); + } + if (hcr & HCR_NV2) { + DP_TBFLAG_A64(flags, NV2, 1); + if (hcr & HCR_E2H) { + DP_TBFLAG_A64(flags, NV2_MEM_E20, 1); + } + if (env->cp15.sctlr_el[2] & SCTLR_EE) { + DP_TBFLAG_A64(flags, NV2_MEM_BE, 1); + } + } + } + if (cpu_isar_feature(aa64_mte, env_archcpu(env))) { /* * Set MTE_ACTIVE if any access may be Checked, and leave clear diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c index a26adb75aa2..d1f1e02acc1 100644 --- a/target/arm/tcg/m_helper.c +++ b/target/arm/tcg/m_helper.c @@ -373,8 +373,8 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env) bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK); bool take_exception; - /* Take the iothread lock as we are going to touch the NVIC */ - qemu_mutex_lock_iothread(); + /* Take the BQL as we are going to touch the NVIC */ + bql_lock(); /* Check the background context had access to the FPU */ if (!v7m_cpacr_pass(env, is_secure, is_priv)) { @@ -428,7 +428,7 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env) take_exception = !stacked_ok && armv7m_nvic_can_take_pending_exception(env->nvic); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (take_exception) { raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC()); diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build index 6fca38f2ccb..3b1a9f0fc5e 100644 --- a/target/arm/tcg/meson.build +++ b/target/arm/tcg/meson.build @@ -55,3 +55,6 @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files( arm_system_ss.add(files( 'psci.c', )) + +arm_system_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('cpu-v7m.c')) +arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files('cpu-v7m.c')) diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c index ffb8ea1c349..d971b813701 100644 --- a/target/arm/tcg/mte_helper.c +++ b/target/arm/tcg/mte_helper.c @@ -291,7 +291,7 @@ static int load_tag1(uint64_t ptr, uint8_t *mem) uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt) { - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = arm_env_mmu_index(env); uint8_t *mem; int rtag = 0; @@ -311,7 +311,7 @@ static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra) { if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) { arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE, - cpu_mmu_index(env, false), ra); + arm_env_mmu_index(env), ra); g_assert_not_reached(); } } @@ -344,7 +344,7 @@ typedef void stg_store1(uint64_t, uint8_t *, int); static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt, uintptr_t ra, stg_store1 store1) { - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = arm_env_mmu_index(env); uint8_t *mem; check_tag_aligned(env, ptr, ra); @@ -371,7 +371,7 @@ void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt) void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr) { - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = arm_env_mmu_index(env); uintptr_t ra = GETPC(); check_tag_aligned(env, ptr, ra); @@ -381,7 +381,7 @@ void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr) static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt, uintptr_t ra, stg_store1 store1) { - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = arm_env_mmu_index(env); int tag = allocation_tag_from_addr(xt); uint8_t *mem1, *mem2; @@ -429,7 +429,7 @@ void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt) void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr) { - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = arm_env_mmu_index(env); uintptr_t ra = GETPC(); int in_page = -(ptr | TARGET_PAGE_MASK); @@ -445,7 +445,7 @@ void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr) uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) { - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = arm_env_mmu_index(env); uintptr_t ra = GETPC(); int gm_bs = env_archcpu(env)->gm_blocksize; int gm_bs_bytes = 4 << gm_bs; @@ -505,7 +505,7 @@ uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) { - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = arm_env_mmu_index(env); uintptr_t ra = GETPC(); int gm_bs = env_archcpu(env)->gm_blocksize; int gm_bs_bytes = 4 << gm_bs; @@ -555,7 +555,7 @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val) { uintptr_t ra = GETPC(); - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = arm_env_mmu_index(env); int log2_dcz_bytes, log2_tag_bytes; intptr_t dcz_bytes, tag_bytes; uint8_t *mem; diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c index ea08936a852..c199b69fbff 100644 --- a/target/arm/tcg/op_helper.c +++ b/target/arm/tcg/op_helper.c @@ -121,6 +121,61 @@ void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue) } } +/* Sign/zero extend */ +uint32_t HELPER(sxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(int8_t)x; + res |= (uint32_t)(int8_t)(x >> 16) << 16; + return res; +} + +static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) +{ + /* + * Take a division-by-zero exception if necessary; otherwise return + * to get the usual non-trapping division behaviour (result of 0) + */ + if (arm_feature(env, ARM_FEATURE_M) + && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { + raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); + } +} + +uint32_t HELPER(uxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(uint8_t)x; + res |= (uint32_t)(uint8_t)(x >> 16) << 16; + return res; +} + +int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) +{ + if (den == 0) { + handle_possible_div0_trap(env, GETPC()); + return 0; + } + if (num == INT_MIN && den == -1) { + return INT_MIN; + } + return num / den; +} + +uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) +{ + if (den == 0) { + handle_possible_div0_trap(env, GETPC()); + return 0; + } + return num / den; +} + +uint32_t HELPER(rbit)(uint32_t x) +{ + return revbit32(x); +} + uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; @@ -427,9 +482,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) { uint32_t mask; - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_pre_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar); cpsr_write(env, val, mask, CPSRWriteExceptionReturn); @@ -442,9 +497,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) env->regs[15] &= (env->thumb ? ~1 : ~3); arm_rebuild_hflags(env); - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* Access to user mode registers from privileged modes. */ @@ -515,10 +570,24 @@ static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode, */ int curmode = env->uncached_cpsr & CPSR_M; - if (regno == 17) { - /* ELR_Hyp: a special case because access from tgtmode is OK */ - if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) { - goto undef; + if (tgtmode == ARM_CPU_MODE_HYP) { + /* + * Handle Hyp target regs first because some are special cases + * which don't want the usual "not accessible from tgtmode" check. + */ + switch (regno) { + case 16 ... 17: /* ELR_Hyp, SPSR_Hyp */ + if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) { + goto undef; + } + break; + case 13: + if (curmode != ARM_CPU_MODE_MON) { + goto undef; + } + break; + default: + g_assert_not_reached(); } return; } @@ -549,13 +618,6 @@ static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode, } } - if (tgtmode == ARM_CPU_MODE_HYP) { - /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */ - if (curmode != ARM_CPU_MODE_MON) { - goto undef; - } - } - return; undef: @@ -570,7 +632,12 @@ void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode, switch (regno) { case 16: /* SPSRs */ - env->banked_spsr[bank_number(tgtmode)] = value; + if (tgtmode == (env->uncached_cpsr & CPSR_M)) { + /* Only happens for SPSR_Hyp access in Hyp mode */ + env->spsr = value; + } else { + env->banked_spsr[bank_number(tgtmode)] = value; + } break; case 17: /* ELR_Hyp */ env->elr_el[2] = value; @@ -604,7 +671,12 @@ uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno) switch (regno) { case 16: /* SPSRs */ - return env->banked_spsr[bank_number(tgtmode)]; + if (tgtmode == (env->uncached_cpsr & CPSR_M)) { + /* Only happens for SPSR_Hyp access in Hyp mode */ + return env->spsr; + } else { + return env->banked_spsr[bank_number(tgtmode)]; + } case 17: /* ELR_Hyp */ return env->elr_el[2]; case 13: @@ -803,9 +875,9 @@ void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value) const ARMCPRegInfo *ri = rip; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); ri->writefn(env, ri, value); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { ri->writefn(env, ri, value); } @@ -817,9 +889,9 @@ uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip) uint32_t res; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); res = ri->readfn(env, ri); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { res = ri->readfn(env, ri); } @@ -832,9 +904,9 @@ void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value) const ARMCPRegInfo *ri = rip; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); ri->writefn(env, ri, value); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { ri->writefn(env, ri, value); } @@ -846,9 +918,9 @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip) uint64_t res; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); res = ri->readfn(env, ri); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { res = ri->readfn(env, ri); } @@ -930,7 +1002,14 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) * * Conduit SMC, valid call Trap to EL2 PSCI Call * Conduit SMC, inval call Trap to EL2 Undef insn - * Conduit not SMC Undef insn Undef insn + * Conduit not SMC Undef or trap[1] Undef insn + * + * [1] In this case: + * - if HCR_EL2.NV == 1 we must trap to EL2 + * - if HCR_EL2.NV == 0 then newer architecture revisions permit + * AArch64 (but not AArch32) to trap to EL2 as an IMPDEF choice + * - otherwise we must UNDEF + * We take the IMPDEF choice to always UNDEF if HCR_EL2.NV == 0. */ /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state. @@ -944,9 +1023,12 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) : smd_flag && !secure; if (!arm_feature(env, ARM_FEATURE_EL3) && + !(arm_hcr_el2_eff(env) & HCR_NV) && cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { - /* If we have no EL3 then SMC always UNDEFs and can't be - * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3 + /* + * If we have no EL3 then traditionally SMC always UNDEFs and can't be + * trapped to EL2. For nested virtualization, SMC can be trapped to + * the outer hypervisor. PSCI-via-SMC is a sort of ersatz EL3 * firmware within QEMU, and we want an EL2 guest to be able * to forbid its EL1 from making PSCI calls into QEMU's * "firmware" via HCR.TSC, so for these purposes treat diff --git a/target/arm/tcg/psci.c b/target/arm/tcg/psci.c index 6c1239bb968..51d2ca3d30d 100644 --- a/target/arm/tcg/psci.c +++ b/target/arm/tcg/psci.c @@ -24,6 +24,7 @@ #include "sysemu/runstate.h" #include "internals.h" #include "arm-powerctl.h" +#include "target/arm/multiprocessing.h" bool arm_is_psci_call(ARMCPU *cpu, int excp_type) { @@ -107,7 +108,7 @@ void arm_handle_psci_call(ARMCPU *cpu) } target_cpu = ARM_CPU(target_cpu_state); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); ret = target_cpu->power_state; break; default: @@ -215,7 +216,7 @@ void arm_handle_psci_call(ARMCPU *cpu) return; cpu_off: - ret = arm_set_cpu_off(cpu->mp_affinity); + ret = arm_set_cpu_off(arm_cpu_mp_affinity(cpu)); /* notreached */ /* sanity check in case something failed */ assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS); diff --git a/target/arm/tcg/sme_helper.c b/target/arm/tcg/sme_helper.c index 904bfdac43e..e2e05750399 100644 --- a/target/arm/tcg/sme_helper.c +++ b/target/arm/tcg/sme_helper.c @@ -1083,11 +1083,32 @@ void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, } } -typedef uint64_t IMOPFn(uint64_t, uint64_t, uint64_t, uint8_t, bool); +typedef uint32_t IMOPFn32(uint32_t, uint32_t, uint32_t, uint8_t, bool); +static inline void do_imopa_s(uint32_t *za, uint32_t *zn, uint32_t *zm, + uint8_t *pn, uint8_t *pm, + uint32_t desc, IMOPFn32 *fn) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 4; + bool neg = simd_data(desc); + + for (row = 0; row < oprsz; ++row) { + uint8_t pa = (pn[H1(row >> 1)] >> ((row & 1) * 4)) & 0xf; + uint32_t *za_row = &za[tile_vslice_index(row)]; + uint32_t n = zn[H4(row)]; -static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm, - uint8_t *pn, uint8_t *pm, - uint32_t desc, IMOPFn *fn) + for (col = 0; col < oprsz; ++col) { + uint8_t pb = pm[H1(col >> 1)] >> ((col & 1) * 4); + uint32_t *a = &za_row[H4(col)]; + + *a = fn(n, zm[H4(col)], *a, pa & pb, neg); + } + } +} + +typedef uint64_t IMOPFn64(uint64_t, uint64_t, uint64_t, uint8_t, bool); +static inline void do_imopa_d(uint64_t *za, uint64_t *zn, uint64_t *zm, + uint8_t *pn, uint8_t *pm, + uint32_t desc, IMOPFn64 *fn) { intptr_t row, col, oprsz = simd_oprsz(desc) / 8; bool neg = simd_data(desc); @@ -1107,25 +1128,16 @@ static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm, } #define DEF_IMOP_32(NAME, NTYPE, MTYPE) \ -static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ +static uint32_t NAME(uint32_t n, uint32_t m, uint32_t a, uint8_t p, bool neg) \ { \ - uint32_t sum0 = 0, sum1 = 0; \ + uint32_t sum = 0; \ /* Apply P to N as a mask, making the inactive elements 0. */ \ n &= expand_pred_b(p); \ - sum0 += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ - sum0 += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \ - sum0 += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ - sum0 += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \ - sum1 += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \ - sum1 += (NTYPE)(n >> 40) * (MTYPE)(m >> 40); \ - sum1 += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \ - sum1 += (NTYPE)(n >> 56) * (MTYPE)(m >> 56); \ - if (neg) { \ - sum0 = (uint32_t)a - sum0, sum1 = (uint32_t)(a >> 32) - sum1; \ - } else { \ - sum0 = (uint32_t)a + sum0, sum1 = (uint32_t)(a >> 32) + sum1; \ - } \ - return ((uint64_t)sum1 << 32) | sum0; \ + sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ + sum += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \ + sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ + sum += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \ + return neg ? a - sum : a + sum; \ } #define DEF_IMOP_64(NAME, NTYPE, MTYPE) \ @@ -1151,16 +1163,17 @@ DEF_IMOP_64(umopa_d, uint16_t, uint16_t) DEF_IMOP_64(sumopa_d, int16_t, uint16_t) DEF_IMOP_64(usmopa_d, uint16_t, int16_t) -#define DEF_IMOPH(NAME) \ - void HELPER(sme_##NAME)(void *vza, void *vzn, void *vzm, void *vpn, \ - void *vpm, uint32_t desc) \ - { do_imopa(vza, vzn, vzm, vpn, vpm, desc, NAME); } - -DEF_IMOPH(smopa_s) -DEF_IMOPH(umopa_s) -DEF_IMOPH(sumopa_s) -DEF_IMOPH(usmopa_s) -DEF_IMOPH(smopa_d) -DEF_IMOPH(umopa_d) -DEF_IMOPH(sumopa_d) -DEF_IMOPH(usmopa_d) +#define DEF_IMOPH(NAME, S) \ + void HELPER(sme_##NAME##_##S)(void *vza, void *vzn, void *vzm, \ + void *vpn, void *vpm, uint32_t desc) \ + { do_imopa_##S(vza, vzn, vzm, vpn, vpm, desc, NAME##_##S); } + +DEF_IMOPH(smopa, s) +DEF_IMOPH(umopa, s) +DEF_IMOPH(sumopa, s) +DEF_IMOPH(usmopa, s) + +DEF_IMOPH(smopa, d) +DEF_IMOPH(umopa, d) +DEF_IMOPH(sumopa, d) +DEF_IMOPH(usmopa, d) diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 5699dfe6674..6853f58c194 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -5481,7 +5481,7 @@ bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault, CPUARMState *env, target_ulong addr, MMUAccessType access_type, uintptr_t retaddr) { - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = arm_env_mmu_index(env); int mem_off = info->mem_off_first[0]; bool nofault = fault == FAULT_NO; bool have_work = true; @@ -6529,7 +6529,7 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = arm_env_mmu_index(env); const intptr_t reg_max = simd_oprsz(desc); const int scale = simd_data(desc); ARMVectorReg scratch; @@ -6715,7 +6715,7 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = arm_env_mmu_index(env); const intptr_t reg_max = simd_oprsz(desc); const int scale = simd_data(desc); const int esize = 1 << esz; @@ -6920,7 +6920,7 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = arm_env_mmu_index(env); const intptr_t reg_max = simd_oprsz(desc); const int scale = simd_data(desc); void *host[ARM_MAX_VQ * 4]; diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c index 4fdd85359e1..885bf4ec142 100644 --- a/target/arm/tcg/tlb_helper.c +++ b/target/arm/tcg/tlb_helper.c @@ -50,7 +50,15 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn, * ST64BV, or ST64BV0 insns report syndrome info even for stage-1 * faults and regardless of the target EL. */ - if (!(template_syn & ARM_EL_ISV) || target_el != 2 + if (template_syn & ARM_EL_VNCR) { + /* + * FEAT_NV2 faults on accesses via VNCR_EL2 are a special case: + * they are always reported as "same EL", even though we are going + * from EL1 to EL2. + */ + assert(!fi->stage2); + syn = syn_data_abort_vncr(fi->ea, is_write, fsc); + } else if (!(template_syn & ARM_EL_ISV) || target_el != 2 || fi->s1ptw || !fi->stage2) { syn = syn_data_abort_no_iss(same_el, 0, fi->ea, 0, fi->s1ptw, is_write, fsc); @@ -169,6 +177,20 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr, int current_el = arm_current_el(env); bool same_el; uint32_t syn, exc, fsr, fsc; + /* + * We know this must be a data or insn abort, and that + * env->exception.syndrome contains the template syndrome set + * up at translate time. So we can check only the VNCR bit + * (and indeed syndrome does not have the EC field in it, + * because we masked that out in disas_set_insn_syndrome()) + */ + bool is_vncr = (access_type != MMU_INST_FETCH) && + (env->exception.syndrome & ARM_EL_VNCR); + + if (is_vncr) { + /* FEAT_NV2 faults on accesses via VNCR_EL2 go to EL2 */ + target_el = 2; + } if (report_as_gpc_exception(cpu, current_el, fi)) { target_el = 3; @@ -177,7 +199,8 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr, syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk, access_type == MMU_INST_FETCH, - encode_gpcsc(fi), 0, fi->s1ptw, + encode_gpcsc(fi), is_vncr, + 0, fi->s1ptw, access_type == MMU_DATA_STORE, fsc); env->cp15.mfar_el3 = fi->paddr; @@ -258,7 +281,7 @@ void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc) { ARMMMUFaultInfo fi = { .type = ARMFault_Alignment }; int target_el = exception_target_el(env); - int mmu_idx = cpu_mmu_index(env, true); + int mmu_idx = arm_env_mmu_index(env); uint32_t fsc; env->exception.vaddress = pc; diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index f2d05c589c9..2666d527111 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "exec/exec-all.h" #include "translate.h" #include "translate-a64.h" #include "qemu/log.h" @@ -1605,7 +1606,7 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a) if (s->current_el == 0) { return false; } - if (s->fgt_eret) { + if (s->trap_eret) { gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(0), 2); return true; } @@ -1632,7 +1633,7 @@ static bool trans_ERETA(DisasContext *s, arg_reta *a) return false; } /* The FGT trap takes precedence over an auth trap. */ - if (s->fgt_eret) { + if (s->trap_eret) { gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(a->m ? 3 : 2), 2); return true; } @@ -2131,16 +2132,19 @@ static void handle_sys(DisasContext *s, bool isread, crn, crm, op0, op1, op2); const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key); bool need_exit_tb = false; + bool nv_trap_to_el2 = false; + bool nv_redirect_reg = false; + bool skip_fp_access_checks = false; + bool nv2_mem_redirect = false; TCGv_ptr tcg_ri = NULL; TCGv_i64 tcg_rt; - uint32_t syndrome; + uint32_t syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); if (crn == 11 || crn == 15) { /* * Check for TIDCP trap, which must take precedence over * the UNDEF for "no such register" etc. */ - syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); switch (s->current_el) { case 0: if (dc_isar_feature(aa64_tidcp1, s)) { @@ -2164,17 +2168,65 @@ static void handle_sys(DisasContext *s, bool isread, return; } + if (s->nv2 && ri->nv2_redirect_offset) { + /* + * Some registers always redirect to memory; some only do so if + * HCR_EL2.NV1 is 0, and some only if NV1 is 1 (these come in + * pairs which share an offset; see the table in R_CSRPQ). + */ + if (ri->nv2_redirect_offset & NV2_REDIR_NV1) { + nv2_mem_redirect = s->nv1; + } else if (ri->nv2_redirect_offset & NV2_REDIR_NO_NV1) { + nv2_mem_redirect = !s->nv1; + } else { + nv2_mem_redirect = true; + } + } + /* Check access permissions */ if (!cp_access_ok(s->current_el, ri, isread)) { - gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); - return; + /* + * FEAT_NV/NV2 handling does not do the usual FP access checks + * for registers only accessible at EL2 (though it *does* do them + * for registers accessible at EL1). + */ + skip_fp_access_checks = true; + if (s->nv2 && (ri->type & ARM_CP_NV2_REDIRECT)) { + /* + * This is one of the few EL2 registers which should redirect + * to the equivalent EL1 register. We do that after running + * the EL2 register's accessfn. + */ + nv_redirect_reg = true; + assert(!nv2_mem_redirect); + } else if (nv2_mem_redirect) { + /* + * NV2 redirect-to-memory takes precedence over trap to EL2 or + * UNDEF to EL1. + */ + } else if (s->nv && arm_cpreg_traps_in_nv(ri)) { + /* + * This register / instruction exists and is an EL2 register, so + * we must trap to EL2 if accessed in nested virtualization EL1 + * instead of UNDEFing. We'll do that after the usual access checks. + * (This makes a difference only for a couple of registers like + * VSTTBR_EL2 where the "UNDEF if NonSecure" should take priority + * over the trap-to-EL2. Most trapped-by-FEAT_NV registers have + * an accessfn which does nothing when called from EL1, because + * the trap-to-EL3 controls which would apply to that register + * at EL2 don't take priority over the FEAT_NV trap-to-EL2.) + */ + nv_trap_to_el2 = true; + } else { + gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); + return; + } } if (ri->accessfn || (ri->fgt && s->fgt_active)) { /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. */ - syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); gen_a64_update_pc(s, 0); tcg_ri = tcg_temp_new_ptr(); gen_helper_access_check_cp_reg(tcg_ri, tcg_env, @@ -2189,6 +2241,78 @@ static void handle_sys(DisasContext *s, bool isread, gen_a64_update_pc(s, 0); } + if (!skip_fp_access_checks) { + if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { + return; + } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { + return; + } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { + return; + } + } + + if (nv_trap_to_el2) { + gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2); + return; + } + + if (nv_redirect_reg) { + /* + * FEAT_NV2 redirection of an EL2 register to an EL1 register. + * Conveniently in all cases the encoding of the EL1 register is + * identical to the EL2 register except that opc1 is 0. + * Get the reginfo for the EL1 register to use for the actual access. + * We don't use the EL1 register's access function, and + * fine-grained-traps on EL1 also do not apply here. + */ + key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, + crn, crm, op0, 0, op2); + ri = get_arm_cp_reginfo(s->cp_regs, key); + assert(ri); + assert(cp_access_ok(s->current_el, ri, isread)); + /* + * We might not have done an update_pc earlier, so check we don't + * need it. We could support this in future if necessary. + */ + assert(!(ri->type & ARM_CP_RAISES_EXC)); + } + + if (nv2_mem_redirect) { + /* + * This system register is being redirected into an EL2 memory access. + * This means it is not an IO operation, doesn't change hflags, + * and need not end the TB, because it has no side effects. + * + * The access is 64-bit single copy atomic, guaranteed aligned because + * of the definition of VCNR_EL2. Its endianness depends on + * SCTLR_EL2.EE, not on the data endianness of EL1. + * It is done under either the EL2 translation regime or the EL2&0 + * translation regime, depending on HCR_EL2.E2H. It behaves as if + * PSTATE.PAN is 0. + */ + TCGv_i64 ptr = tcg_temp_new_i64(); + MemOp mop = MO_64 | MO_ALIGN | MO_ATOM_IFALIGN; + ARMMMUIdx armmemidx = s->nv2_mem_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2; + int memidx = arm_to_core_mmu_idx(armmemidx); + uint32_t syn; + + mop |= (s->nv2_mem_be ? MO_BE : MO_LE); + + tcg_gen_ld_i64(ptr, tcg_env, offsetof(CPUARMState, cp15.vncr_el2)); + tcg_gen_addi_i64(ptr, ptr, + (ri->nv2_redirect_offset & ~NV2_REDIR_FLAG_MASK)); + tcg_rt = cpu_reg(s, rt); + + syn = syn_data_abort_vncr(0, !isread, 0); + disas_set_insn_syndrome(s, syn); + if (isread) { + tcg_gen_qemu_ld_i64(tcg_rt, ptr, memidx, mop); + } else { + tcg_gen_qemu_st_i64(tcg_rt, ptr, memidx, mop); + } + return; + } + /* Handle special cases first */ switch (ri->type & ARM_CP_SPECIAL_MASK) { case 0: @@ -2204,12 +2328,17 @@ static void handle_sys(DisasContext *s, bool isread, } return; case ARM_CP_CURRENTEL: - /* Reads as current EL value from pstate, which is + { + /* + * Reads as current EL value from pstate, which is * guaranteed to be constant by the tb flags. + * For nested virt we should report EL2. */ + int el = s->nv ? 2 : s->current_el; tcg_rt = cpu_reg(s, rt); - tcg_gen_movi_i64(tcg_rt, s->current_el << 2); + tcg_gen_movi_i64(tcg_rt, el << 2); return; + } case ARM_CP_DC_ZVA: /* Writes clear the aligned block of memory which rt points into. */ if (s->mte_active[0]) { @@ -2267,13 +2396,6 @@ static void handle_sys(DisasContext *s, bool isread, default: g_assert_not_reached(); } - if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { - return; - } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { - return; - } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { - return; - } if (ri->type & ARM_CP_IO) { /* I/O operations must end the TB here (whether read or write) */ @@ -13979,7 +14101,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE); dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC); - dc->fgt_eret = EX_TBFLAG_A64(tb_flags, FGT_ERET); + dc->trap_eret = EX_TBFLAG_A64(tb_flags, TRAP_ERET); dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL); dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16; @@ -13996,6 +14118,11 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); dc->naa = EX_TBFLAG_A64(tb_flags, NAA); + dc->nv = EX_TBFLAG_A64(tb_flags, NV); + dc->nv1 = EX_TBFLAG_A64(tb_flags, NV1); + dc->nv2 = EX_TBFLAG_A64(tb_flags, NV2); + dc->nv2_mem_e20 = EX_TBFLAG_A64(tb_flags, NV2_MEM_E20); + dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = arm_cpu->cp_regs; @@ -14052,7 +14179,7 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) pc_arg &= ~TARGET_PAGE_MASK; } tcg_gen_insn_start(pc_arg, 0, 0); - dc->insn_start = tcg_last_op(); + dc->insn_start_updated = false; } static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index b3660173d1d..dc49a8d8069 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -900,13 +900,7 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) MemOp pow2_align(unsigned i) { static const MemOp mop_align[] = { - 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16, - /* - * FIXME: TARGET_PAGE_BITS_MIN affects TLB_FLAGS_MASK such - * that 256-bit alignment (MO_ALIGN_32) cannot be supported: - * see get_alignment_bits(). Enforce only 128-bit alignment for now. - */ - MO_ALIGN_16 + 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16, MO_ALIGN_32 }; g_assert(i < ARRAY_SIZE(mop_align)); return mop_align[i]; @@ -2822,13 +2816,20 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, break; case ARM_CPU_MODE_HYP: /* - * SPSR_hyp and r13_hyp can only be accessed from Monitor mode - * (and so we can forbid accesses from EL2 or below). elr_hyp - * can be accessed also from Hyp mode, so forbid accesses from - * EL0 or EL1. + * r13_hyp can only be accessed from Monitor mode, and so we + * can forbid accesses from EL2 or below. + * elr_hyp can be accessed also from Hyp mode, so forbid + * accesses from EL0 or EL1. + * SPSR_hyp is supposed to be in the same category as r13_hyp + * and UNPREDICTABLE if accessed from anything except Monitor + * mode. However there is some real-world code that will do + * it because at least some hardware happens to permit the + * access. (Notably a standard Cortex-R52 startup code fragment + * does this.) So we permit SPSR_hyp from Hyp mode also, to allow + * this (incorrect) guest code to run. */ - if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 || - (s->current_el < 3 && *regno != 17)) { + if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 + || (s->current_el < 3 && *regno != 16 && *regno != 17)) { goto undef; } break; @@ -4584,7 +4585,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, tcg_gen_andi_i32(t, t, 1u << maskbit); tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label); - gen_exception_insn(s, 0, EXCP_UDEF, syndrome); + gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2); /* * gen_exception_insn() will set is_jmp to DISAS_NORETURN, * but since we're conditionally branching over it, we want @@ -9272,7 +9273,7 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); } tcg_gen_insn_start(pc_arg, condexec_bits, 0); - dc->insn_start = tcg_last_op(); + dc->insn_start_updated = false; } static bool arm_check_kernelpage(DisasContext *dc) @@ -9691,7 +9692,7 @@ static const TranslatorOps thumb_translator_ops = { /* generate intermediate code for basic block 'tb'. */ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc = { }; const TranslatorOps *ops = &arm_translator_ops; diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 3c3bb3431ad..dc66ff21908 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -138,12 +138,22 @@ typedef struct DisasContext { bool mve_no_pred; /* True if fine-grained traps are active */ bool fgt_active; - /* True if fine-grained trap on ERET is enabled */ - bool fgt_eret; /* True if fine-grained trap on SVC is enabled */ bool fgt_svc; + /* True if a trap on ERET is enabled (FGT or NV) */ + bool trap_eret; /* True if FEAT_LSE2 SCTLR_ELx.nAA is set */ bool naa; + /* True if FEAT_NV HCR_EL2.NV is enabled */ + bool nv; + /* True if NV enabled and HCR_EL2.NV1 is set */ + bool nv1; + /* True if NV enabled and HCR_EL2.NV2 is set */ + bool nv2; + /* True if NV2 enabled and NV2 RAM accesses use EL2&0 translation regime */ + bool nv2_mem_e20; + /* True if NV2 enabled and NV2 RAM accesses are big-endian */ + bool nv2_mem_be; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. @@ -155,10 +165,12 @@ typedef struct DisasContext { uint8_t gm_blocksize; /* True if this page is guarded. */ bool guarded_page; + /* True if the current insn_start has been updated. */ + bool insn_start_updated; /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ int c15_cpar; - /* TCG op of the current insn_start. */ - TCGOp *insn_start; + /* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */ + uint32_t nv2_redirect_offset; } DisasContext; typedef struct DisasCompare { @@ -264,10 +276,10 @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) syn &= ARM_INSN_START_WORD2_MASK; syn >>= ARM_INSN_START_WORD2_SHIFT; - /* We check and clear insn_start_idx to catch multiple updates. */ - assert(s->insn_start != NULL); - tcg_set_insn_start_param(s->insn_start, 2, syn); - s->insn_start = NULL; + /* Check for multiple updates. */ + assert(!s->insn_start_updated); + s->insn_start_updated = true; + tcg_set_insn_start_param(s->base.insn_start, 2, syn); } static inline int curr_insn_len(DisasContext *s) diff --git a/target/arm/trace-events b/target/arm/trace-events index 48cc0512dbe..4438dce7bec 100644 --- a/target/arm/trace-events +++ b/target/arm/trace-events @@ -8,6 +8,7 @@ arm_gt_tval_write(int timer, uint64_t value) "gt_tval_write: timer %d value 0x%" arm_gt_ctl_write(int timer, uint64_t value) "gt_ctl_write: timer %d value 0x%" PRIx64 arm_gt_imask_toggle(int timer) "gt_ctl_write: timer %d IMASK toggle" arm_gt_cntvoff_write(uint64_t value) "gt_cntvoff_write: value 0x%" PRIx64 +arm_gt_cntpoff_write(uint64_t value) "gt_cntpoff_write: value 0x%" PRIx64 arm_gt_update_irq(int timer, int irqstate) "gt_update_irq: timer %d irqstate %d" # kvm.c diff --git a/target/avr/cpu.c b/target/avr/cpu.c index 999c010dedb..45ee1b5f89e 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -43,38 +43,34 @@ static vaddr avr_cpu_get_pc(CPUState *cs) static bool avr_cpu_has_work(CPUState *cs) { - AVRCPU *cpu = AVR_CPU(cs); - CPUAVRState *env = &cpu->env; - return (cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_RESET)) - && cpu_interrupts_enabled(env); + && cpu_interrupts_enabled(cpu_env(cs)); +} + +static int avr_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return ifetch ? MMU_CODE_IDX : MMU_DATA_IDX; } static void avr_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - AVRCPU *cpu = AVR_CPU(cs); - CPUAVRState *env = &cpu->env; - tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); - env->pc_w = tb->pc / 2; /* internally PC points to words */ + cpu_env(cs)->pc_w = tb->pc / 2; /* internally PC points to words */ } static void avr_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) { - AVRCPU *cpu = AVR_CPU(cs); - CPUAVRState *env = &cpu->env; - - env->pc_w = data[0]; + cpu_env(cs)->pc_w = data[0]; } static void avr_cpu_reset_hold(Object *obj) { CPUState *cs = CPU(obj); AVRCPU *cpu = AVR_CPU(cs); - AVRCPUClass *mcc = AVR_CPU_GET_CLASS(cpu); + AVRCPUClass *mcc = AVR_CPU_GET_CLASS(obj); CPUAVRState *env = &cpu->env; if (mcc->parent_phases.hold) { @@ -160,19 +156,12 @@ static Property avr_cpu_properties[] = { static ObjectClass *avr_cpu_class_by_name(const char *cpu_model) { - ObjectClass *oc; - - oc = object_class_by_name(cpu_model); - if (object_class_dynamic_cast(oc, TYPE_AVR_CPU) == NULL) { - oc = NULL; - } - return oc; + return object_class_by_name(cpu_model); } static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - AVRCPU *cpu = AVR_CPU(cs); - CPUAVRState *env = &cpu->env; + CPUAVRState *env = cpu_env(cs); int i; qemu_fprintf(f, "\n"); @@ -216,7 +205,7 @@ static const struct SysemuCPUOps avr_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps avr_tcg_ops = { +static const TCGCPUOps avr_tcg_ops = { .initialize = avr_cpu_tcg_init, .synchronize_from_tb = avr_cpu_synchronize_from_tb, .restore_state_to_opc = avr_restore_state_to_opc, @@ -242,6 +231,7 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = avr_cpu_class_by_name; cc->has_work = avr_cpu_has_work; + cc->mmu_index = avr_cpu_mmu_index; cc->dump_state = avr_cpu_dump_state; cc->set_pc = avr_cpu_set_pc; cc->get_pc = avr_cpu_get_pc; @@ -251,7 +241,6 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data) cc->gdb_read_register = avr_cpu_gdb_read_register; cc->gdb_write_register = avr_cpu_gdb_write_register; cc->gdb_adjust_breakpoint = avr_cpu_gdb_adjust_breakpoint; - cc->gdb_num_core_regs = 35; cc->gdb_core_xml_file = "avr-cpu.xml"; cc->tcg_ops = &avr_tcg_ops; } @@ -282,8 +271,7 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data) */ static void avr_avr5_initfn(Object *obj) { - AVRCPU *cpu = AVR_CPU(obj); - CPUAVRState *env = &cpu->env; + CPUAVRState *env = cpu_env(CPU(obj)); set_avr_feature(env, AVR_FEATURE_LPM); set_avr_feature(env, AVR_FEATURE_IJMP_ICALL); @@ -311,8 +299,7 @@ static void avr_avr5_initfn(Object *obj) */ static void avr_avr51_initfn(Object *obj) { - AVRCPU *cpu = AVR_CPU(obj); - CPUAVRState *env = &cpu->env; + CPUAVRState *env = cpu_env(CPU(obj)); set_avr_feature(env, AVR_FEATURE_LPM); set_avr_feature(env, AVR_FEATURE_IJMP_ICALL); @@ -341,8 +328,7 @@ static void avr_avr51_initfn(Object *obj) */ static void avr_avr6_initfn(Object *obj) { - AVRCPU *cpu = AVR_CPU(obj); - CPUAVRState *env = &cpu->env; + CPUAVRState *env = cpu_env(CPU(obj)); set_avr_feature(env, AVR_FEATURE_LPM); set_avr_feature(env, AVR_FEATURE_IJMP_ICALL); @@ -368,21 +354,6 @@ typedef struct AVRCPUInfo { } AVRCPUInfo; -static void avr_cpu_list_entry(gpointer data, gpointer user_data) -{ - const char *typename = object_class_get_name(OBJECT_CLASS(data)); - - qemu_printf("%s\n", typename); -} - -void avr_cpu_list(void) -{ - GSList *list; - list = object_class_get_list_sorted(TYPE_AVR_CPU, false); - g_slist_foreach(list, avr_cpu_list_entry, NULL); - g_slist_free(list); -} - #define DEFINE_AVR_CPU_TYPE(model, initfn) \ { \ .parent = TYPE_AVR_CPU, \ diff --git a/target/avr/cpu.h b/target/avr/cpu.h index 7960c5c57a8..d185d20dcb7 100644 --- a/target/avr/cpu.h +++ b/target/avr/cpu.h @@ -184,17 +184,8 @@ static inline void set_avr_feature(CPUAVRState *env, int feature) env->features |= (1U << feature); } -#define cpu_list avr_cpu_list -#define cpu_mmu_index avr_cpu_mmu_index - -static inline int avr_cpu_mmu_index(CPUAVRState *env, bool ifetch) -{ - return ifetch ? MMU_CODE_IDX : MMU_DATA_IDX; -} - void avr_cpu_tcg_init(void); -void avr_cpu_list(void); int cpu_avr_exec(CPUState *cpu); enum { diff --git a/target/avr/gdbstub.c b/target/avr/gdbstub.c index 150344d8b94..2eeee2bf4e1 100644 --- a/target/avr/gdbstub.c +++ b/target/avr/gdbstub.c @@ -23,8 +23,7 @@ int avr_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - AVRCPU *cpu = AVR_CPU(cs); - CPUAVRState *env = &cpu->env; + CPUAVRState *env = cpu_env(cs); /* R */ if (n < 32) { @@ -53,8 +52,7 @@ int avr_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int avr_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - AVRCPU *cpu = AVR_CPU(cs); - CPUAVRState *env = &cpu->env; + CPUAVRState *env = cpu_env(cs); /* R */ if (n < 32) { diff --git a/target/avr/helper.c b/target/avr/helper.c index fdc9884ea0e..eeca415c43d 100644 --- a/target/avr/helper.c +++ b/target/avr/helper.c @@ -30,8 +30,7 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - AVRCPU *cpu = AVR_CPU(cs); - CPUAVRState *env = &cpu->env; + CPUAVRState *env = cpu_env(cs); /* * We cannot separate a skip from the next instruction, @@ -69,8 +68,7 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) void avr_cpu_do_interrupt(CPUState *cs) { - AVRCPU *cpu = AVR_CPU(cs); - CPUAVRState *env = &cpu->env; + CPUAVRState *env = cpu_env(cs); uint32_t ret = env->pc_w; int vector = 0; @@ -144,9 +142,7 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size, if (probe) { page_size = 1; } else { - AVRCPU *cpu = AVR_CPU(cs); - CPUAVRState *env = &cpu->env; - env->fullacc = 1; + cpu_env(cs)->fullacc = 1; cpu_loop_exit_restore(cs, retaddr); } } diff --git a/target/avr/machine.c b/target/avr/machine.c index 16f7a3e031d..4402862fb96 100644 --- a/target/avr/machine.c +++ b/target/avr/machine.c @@ -100,7 +100,7 @@ const VMStateDescription vms_avr_cpu = { .name = "cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.pc_w, AVRCPU), VMSTATE_UINT32(env.sp, AVRCPU), VMSTATE_UINT32(env.skip, AVRCPU), diff --git a/target/avr/translate.c b/target/avr/translate.c index cdffa045194..87e2bd5ef18 100644 --- a/target/avr/translate.c +++ b/target/avr/translate.c @@ -2657,11 +2657,10 @@ static bool canonicalize_skip(DisasContext *ctx) static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); - CPUAVRState *env = cpu_env(cs); uint32_t tb_flags = ctx->base.tb->flags; ctx->cs = cs; - ctx->env = env; + ctx->env = cpu_env(cs); ctx->npc = ctx->base.pc_first / 2; ctx->skip_cond = TCG_COND_NEVER; @@ -2805,7 +2804,7 @@ static const TranslatorOps avr_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc = { }; translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base); diff --git a/target/cris/cpu.c b/target/cris/cpu.c index 675b73ac04f..eb4bddcb7e7 100644 --- a/target/cris/cpu.c +++ b/target/cris/cpu.c @@ -56,12 +56,16 @@ static bool cris_cpu_has_work(CPUState *cs) return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); } +static int cris_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return !!(cpu_env(cs)->pregs[PR_CCS] & U_FLAG); +} + static void cris_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - CRISCPU *cpu = CRIS_CPU(s); - CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(cpu); - CPUCRISState *env = &cpu->env; + CPUState *cs = CPU(obj); + CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj); + CPUCRISState *env = cpu_env(cs); uint32_t vr; if (ccc->parent_phases.hold) { @@ -95,48 +99,8 @@ static ObjectClass *cris_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(CRIS_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && !object_class_dynamic_cast(oc, TYPE_CRIS_CPU)) { - oc = NULL; - } - return oc; -} -/* Sort alphabetically by VR. */ -static gint cris_cpu_list_compare(gconstpointer a, gconstpointer b) -{ - CRISCPUClass *ccc_a = CRIS_CPU_CLASS(a); - CRISCPUClass *ccc_b = CRIS_CPU_CLASS(b); - - /* */ - if (ccc_a->vr > ccc_b->vr) { - return 1; - } else if (ccc_a->vr < ccc_b->vr) { - return -1; - } else { - return 0; - } -} - -static void cris_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - const char *typename = object_class_get_name(oc); - char *name; - - name = g_strndup(typename, strlen(typename) - strlen(CRIS_CPU_TYPE_SUFFIX)); - qemu_printf(" %s\n", name); - g_free(name); -} - -void cris_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list(TYPE_CRIS_CPU, false); - list = g_slist_sort(list, cris_cpu_list_compare); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, cris_cpu_list_entry, NULL); - g_slist_free(list); + return oc; } static void cris_cpu_realizefn(DeviceState *dev, Error **errp) @@ -182,10 +146,7 @@ static void cris_cpu_set_irq(void *opaque, int irq, int level) static void cris_disas_set_info(CPUState *cpu, disassemble_info *info) { - CRISCPU *cc = CRIS_CPU(cpu); - CPUCRISState *env = &cc->env; - - if (env->pregs[PR_VR] != 32) { + if (cpu_env(cpu)->pregs[PR_VR] != 32) { info->mach = bfd_mach_cris_v0_v10; info->print_insn = print_insn_crisv10; } else { @@ -218,7 +179,7 @@ static const struct SysemuCPUOps cris_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps crisv10_tcg_ops = { +static const TCGCPUOps crisv10_tcg_ops = { .initialize = cris_initialize_crisv10_tcg, .restore_state_to_opc = cris_restore_state_to_opc, @@ -229,7 +190,7 @@ static const struct TCGCPUOps crisv10_tcg_ops = { #endif /* !CONFIG_USER_ONLY */ }; -static const struct TCGCPUOps crisv32_tcg_ops = { +static const TCGCPUOps crisv32_tcg_ops = { .initialize = cris_initialize_tcg, .restore_state_to_opc = cris_restore_state_to_opc, @@ -314,6 +275,7 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = cris_cpu_class_by_name; cc->has_work = cris_cpu_has_work; + cc->mmu_index = cris_cpu_mmu_index; cc->dump_state = cris_cpu_dump_state; cc->set_pc = cris_cpu_set_pc; cc->get_pc = cris_cpu_get_pc; diff --git a/target/cris/cpu.h b/target/cris/cpu.h index 1be7f90319c..3904e5448c6 100644 --- a/target/cris/cpu.h +++ b/target/cris/cpu.h @@ -260,10 +260,6 @@ enum { /* MMU modes definitions */ #define MMU_USER_IDX 1 -static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch) -{ - return !!(env->pregs[PR_CCS] & U_FLAG); -} /* Support function regs. */ #define SFR_RW_GC_CFG 0][0 @@ -287,7 +283,4 @@ static inline void cpu_get_tb_cpu_state(CPUCRISState *env, vaddr *pc, | X_FLAG | PFIX_FLAG)); } -#define cpu_list cris_cpu_list -void cris_cpu_list(void); - #endif diff --git a/target/cris/gdbstub.c b/target/cris/gdbstub.c index 25c0ca33a5e..9e87069da89 100644 --- a/target/cris/gdbstub.c +++ b/target/cris/gdbstub.c @@ -23,8 +23,7 @@ int crisv10_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - CRISCPU *cpu = CRIS_CPU(cs); - CPUCRISState *env = &cpu->env; + CPUCRISState *env = cpu_env(cs); if (n < 15) { return gdb_get_reg32(mem_buf, env->regs[n]); @@ -55,8 +54,7 @@ int crisv10_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int cris_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - CRISCPU *cpu = CRIS_CPU(cs); - CPUCRISState *env = &cpu->env; + CPUCRISState *env = cpu_env(cs); uint8_t srs; srs = env->pregs[PR_SRS]; @@ -90,8 +88,7 @@ int cris_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int cris_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - CRISCPU *cpu = CRIS_CPU(cs); - CPUCRISState *env = &cpu->env; + CPUCRISState *env = cpu_env(cs); uint32_t tmp; if (n > 49) { diff --git a/target/cris/helper.c b/target/cris/helper.c index c0bf987e3e8..1c3f86876f6 100644 --- a/target/cris/helper.c +++ b/target/cris/helper.c @@ -53,8 +53,7 @@ bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - CRISCPU *cpu = CRIS_CPU(cs); - CPUCRISState *env = &cpu->env; + CPUCRISState *env = cpu_env(cs); struct cris_mmu_result res; int prot, miss; target_ulong phy; @@ -97,8 +96,7 @@ bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, void crisv10_cpu_do_interrupt(CPUState *cs) { - CRISCPU *cpu = CRIS_CPU(cs); - CPUCRISState *env = &cpu->env; + CPUCRISState *env = cpu_env(cs); int ex_vec = -1; D_LOG("exception index=%d interrupt_req=%d\n", @@ -159,8 +157,7 @@ void crisv10_cpu_do_interrupt(CPUState *cs) void cris_cpu_do_interrupt(CPUState *cs) { - CRISCPU *cpu = CRIS_CPU(cs); - CPUCRISState *env = &cpu->env; + CPUCRISState *env = cpu_env(cs); int ex_vec = -1; D_LOG("exception index=%d interrupt_req=%d\n", @@ -262,8 +259,7 @@ hwaddr cris_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { CPUClass *cc = CPU_GET_CLASS(cs); - CRISCPU *cpu = CRIS_CPU(cs); - CPUCRISState *env = &cpu->env; + CPUCRISState *env = cpu_env(cs); bool ret = false; if (interrupt_request & CPU_INTERRUPT_HARD diff --git a/target/cris/machine.c b/target/cris/machine.c index f370f33486d..7b9bde872aa 100644 --- a/target/cris/machine.c +++ b/target/cris/machine.c @@ -26,7 +26,7 @@ static const VMStateDescription vmstate_tlbset = { .name = "cpu/tlbset", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(lo, TLBSet), VMSTATE_UINT32(hi, TLBSet), VMSTATE_END_OF_LIST() @@ -37,7 +37,7 @@ static const VMStateDescription vmstate_cris_env = { .name = "env", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CPUCRISState, 16), VMSTATE_UINT32_ARRAY(pregs, CPUCRISState, 16), VMSTATE_UINT32(pc, CPUCRISState), @@ -85,7 +85,7 @@ const VMStateDescription vmstate_cris_cpu = { .name = "cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CPU(), VMSTATE_STRUCT(env, CRISCPU, 1, vmstate_cris_env, CPUCRISState), VMSTATE_END_OF_LIST() diff --git a/target/cris/translate.c b/target/cris/translate.c index b3974ba0bbb..b3a4d61d0a2 100644 --- a/target/cris/translate.c +++ b/target/cris/translate.c @@ -94,6 +94,7 @@ typedef struct DisasContext { CRISCPU *cpu; target_ulong pc, ppc; + int mem_index; /* Decoder. */ unsigned int (*decoder)(CPUCRISState *env, struct DisasContext *dc); @@ -1008,37 +1009,31 @@ static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type) static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr) { - int mem_index = cpu_mmu_index(&dc->cpu->env, false); - /* If we get a fault on a delayslot we must keep the jmp state in the cpu-state to be able to re-execute the jmp. */ if (dc->delayed_branch == 1) { cris_store_direct_jmp(dc); } - tcg_gen_qemu_ld_i64(dst, addr, mem_index, MO_TEUQ); + tcg_gen_qemu_ld_i64(dst, addr, dc->mem_index, MO_TEUQ); } static void gen_load(DisasContext *dc, TCGv dst, TCGv addr, unsigned int size, int sign) { - int mem_index = cpu_mmu_index(&dc->cpu->env, false); - /* If we get a fault on a delayslot we must keep the jmp state in the cpu-state to be able to re-execute the jmp. */ if (dc->delayed_branch == 1) { cris_store_direct_jmp(dc); } - tcg_gen_qemu_ld_tl(dst, addr, mem_index, + tcg_gen_qemu_ld_tl(dst, addr, dc->mem_index, MO_TE + ctz32(size) + (sign ? MO_SIGN : 0)); } static void gen_store (DisasContext *dc, TCGv addr, TCGv val, unsigned int size) { - int mem_index = cpu_mmu_index(&dc->cpu->env, false); - /* If we get a fault on a delayslot we must keep the jmp state in the cpu-state to be able to re-execute the jmp. */ if (dc->delayed_branch == 1) { @@ -1055,7 +1050,7 @@ static void gen_store (DisasContext *dc, TCGv addr, TCGv val, return; } - tcg_gen_qemu_st_tl(val, addr, mem_index, MO_TE + ctz32(size)); + tcg_gen_qemu_st_tl(val, addr, dc->mem_index, MO_TE + ctz32(size)); if (dc->flags_x) { cris_evaluate_flags(dc); @@ -2971,6 +2966,7 @@ static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->cpu = env_archcpu(env); dc->ppc = pc_start; dc->pc = pc_start; + dc->mem_index = cpu_mmu_index(cs, false); dc->flags_uptodate = 1; dc->flags_x = tb_flags & X_FLAG; dc->cc_x_uptodate = 0; @@ -3006,7 +3002,6 @@ static void cris_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); - CPUCRISState *env = cpu_env(cs); unsigned int insn_len; /* Pretty disas. */ @@ -3014,7 +3009,7 @@ static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) dc->clear_x = 1; - insn_len = dc->decoder(env, dc); + insn_len = dc->decoder(cpu_env(cs), dc); dc->ppc = dc->pc; dc->pc += insn_len; dc->base.pc_next += insn_len; @@ -3172,7 +3167,7 @@ static const TranslatorOps cris_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cs, tb, max_insns, pc, host_pc, &cris_tr_ops, &dc.base); @@ -3180,8 +3175,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - CRISCPU *cpu = CRIS_CPU(cs); - CPUCRISState *env = &cpu->env; + CPUCRISState *env = cpu_env(cs); const char * const *regnames; const char * const *pregnames; int i; diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc index 6df599fdce8..73fc27c15d4 100644 --- a/target/cris/translate_v10.c.inc +++ b/target/cris/translate_v10.c.inc @@ -91,8 +91,6 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val, static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val, unsigned int size) { - int mem_index = cpu_mmu_index(&dc->cpu->env, false); - /* If we get a fault on a delayslot we must keep the jmp state in the cpu-state to be able to re-execute the jmp. */ if (dc->delayed_branch == 1) { @@ -101,11 +99,11 @@ static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val, /* Conditional writes. */ if (dc->flags_x) { - gen_store_v10_conditional(dc, addr, val, size, mem_index); + gen_store_v10_conditional(dc, addr, val, size, dc->mem_index); return; } - tcg_gen_qemu_st_tl(val, addr, mem_index, ctz32(size) | MO_TE); + tcg_gen_qemu_st_tl(val, addr, dc->mem_index, ctz32(size) | MO_TE); } diff --git a/target/hexagon/README b/target/hexagon/README index 69b2ffe9bb2..746ebec378a 100644 --- a/target/hexagon/README +++ b/target/hexagon/README @@ -189,11 +189,17 @@ the packet, and we mark the implicit writes. After the analysis is performed, we initialize the result register for each of the predicated assignments. In addition to instruction semantics, we use a generator to create the decode -tree. This generation is also a two step process. The first step is to run -target/hexagon/gen_dectree_import.c to produce +tree. This generation is a four step process. +Step 1 is to run target/hexagon/gen_dectree_import.c to produce /target/hexagon/iset.py -This file is imported by target/hexagon/dectree.py to produce - /target/hexagon/dectree_generated.h.inc +Step 2 is to import iset.py into target/hexagon/gen_decodetree.py to produce + /target/hexagon/normal_decode_generated + /target/hexagon/hvx_decode_generated + /target/hexagon/subinsn_*_decode_generated +Step 3 is to process the above files with QEMU's decodetree.py to produce + /target/hexagon/decode_*_generated.c.inc +Step 4 is to import iset.py into target/hexagon/gen_trans_funcs.py to produce + /target/hexagon/decodetree_trans_funcs_generated.c.inc *** Key Files *** diff --git a/target/hexagon/attribs_def.h.inc b/target/hexagon/attribs_def.h.inc index 21d457fa4a4..87942d46f47 100644 --- a/target/hexagon/attribs_def.h.inc +++ b/target/hexagon/attribs_def.h.inc @@ -117,7 +117,6 @@ DEF_ATTRIB(IMPLICIT_READS_P1, "Reads the P1 register", "", "") DEF_ATTRIB(IMPLICIT_READS_P2, "Reads the P2 register", "", "") DEF_ATTRIB(IMPLICIT_READS_P3, "Reads the P3 register", "", "") DEF_ATTRIB(IMPLICIT_WRITES_USR, "May write USR", "", "") -DEF_ATTRIB(WRITES_PRED_REG, "Writes a predicate register", "", "") DEF_ATTRIB(COMMUTES, "The operation is communitive", "", "") DEF_ATTRIB(DEALLOCRET, "dealloc_return", "", "") DEF_ATTRIB(DEALLOCFRAME, "deallocframe", "", "") diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index 9d1ffc3b4bb..3a716b9be3c 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -32,26 +32,6 @@ static void hexagon_v69_cpu_init(Object *obj) { } static void hexagon_v71_cpu_init(Object *obj) { } static void hexagon_v73_cpu_init(Object *obj) { } -static void hexagon_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - char *name = g_strdup(object_class_get_name(oc)); - if (g_str_has_suffix(name, HEXAGON_CPU_TYPE_SUFFIX)) { - name[strlen(name) - strlen(HEXAGON_CPU_TYPE_SUFFIX)] = '\0'; - } - qemu_printf(" %s\n", name); - g_free(name); -} - -void hexagon_cpu_list(void) -{ - GSList *list; - list = object_class_get_list_sorted(TYPE_HEXAGON_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, hexagon_cpu_list_entry, NULL); - g_slist_free(list); -} - static ObjectClass *hexagon_cpu_class_by_name(const char *cpu_model) { ObjectClass *oc; @@ -63,9 +43,7 @@ static ObjectClass *hexagon_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_strfreev(cpuname); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_HEXAGON_CPU)) { - return NULL; - } + return oc; } @@ -258,10 +236,7 @@ static void hexagon_dump(CPUHexagonState *env, FILE *f, int flags) static void hexagon_dump_state(CPUState *cs, FILE *f, int flags) { - HexagonCPU *cpu = HEXAGON_CPU(cs); - CPUHexagonState *env = &cpu->env; - - hexagon_dump(env, f, flags); + hexagon_dump(cpu_env(cs), f, flags); } void hexagon_debug(CPUHexagonState *env) @@ -271,25 +246,19 @@ void hexagon_debug(CPUHexagonState *env) static void hexagon_cpu_set_pc(CPUState *cs, vaddr value) { - HexagonCPU *cpu = HEXAGON_CPU(cs); - CPUHexagonState *env = &cpu->env; - env->gpr[HEX_REG_PC] = value; + cpu_env(cs)->gpr[HEX_REG_PC] = value; } static vaddr hexagon_cpu_get_pc(CPUState *cs) { - HexagonCPU *cpu = HEXAGON_CPU(cs); - CPUHexagonState *env = &cpu->env; - return env->gpr[HEX_REG_PC]; + return cpu_env(cs)->gpr[HEX_REG_PC]; } static void hexagon_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - HexagonCPU *cpu = HEXAGON_CPU(cs); - CPUHexagonState *env = &cpu->env; tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); - env->gpr[HEX_REG_PC] = tb->pc; + cpu_env(cs)->gpr[HEX_REG_PC] = tb->pc; } static bool hexagon_cpu_has_work(CPUState *cs) @@ -301,18 +270,14 @@ static void hexagon_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) { - HexagonCPU *cpu = HEXAGON_CPU(cs); - CPUHexagonState *env = &cpu->env; - - env->gpr[HEX_REG_PC] = data[0]; + cpu_env(cs)->gpr[HEX_REG_PC] = data[0]; } static void hexagon_cpu_reset_hold(Object *obj) { CPUState *cs = CPU(obj); - HexagonCPU *cpu = HEXAGON_CPU(cs); - HexagonCPUClass *mcc = HEXAGON_CPU_GET_CLASS(cpu); - CPUHexagonState *env = &cpu->env; + HexagonCPUClass *mcc = HEXAGON_CPU_GET_CLASS(obj); + CPUHexagonState *env = cpu_env(cs); if (mcc->parent_phases.hold) { mcc->parent_phases.hold(obj); @@ -341,8 +306,7 @@ static void hexagon_cpu_realize(DeviceState *dev, Error **errp) gdb_register_coprocessor(cs, hexagon_hvx_gdb_read_register, hexagon_hvx_gdb_write_register, - NUM_VREGS + NUM_QREGS, - "hexagon-hvx.xml", 0); + gdb_find_static_feature("hexagon-hvx.xml"), 0); qemu_init_vcpu(cs); cpu_reset(cs); @@ -359,7 +323,7 @@ static void hexagon_cpu_init(Object *obj) #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps hexagon_tcg_ops = { +static const TCGCPUOps hexagon_tcg_ops = { .initialize = hexagon_translate_init, .synchronize_from_tb = hexagon_cpu_synchronize_from_tb, .restore_state_to_opc = hexagon_restore_state_to_opc, @@ -385,7 +349,6 @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data) cc->get_pc = hexagon_cpu_get_pc; cc->gdb_read_register = hexagon_gdb_read_register; cc->gdb_write_register = hexagon_gdb_write_register; - cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS; cc->gdb_stop_before_watchpoint = true; cc->gdb_core_xml_file = "hexagon-core.xml"; cc->disas_set_info = hexagon_cpu_disas_set_info; diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index 7d16083c6aa..3eef58fe8fc 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -37,9 +37,6 @@ #define CPU_RESOLVING_TYPE TYPE_HEXAGON_CPU -void hexagon_cpu_list(void); -#define cpu_list hexagon_cpu_list - #define MMU_USER_IDX 0 typedef struct { @@ -149,15 +146,6 @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc, *flags = hex_flags; } -static inline int cpu_mmu_index(CPUHexagonState *env, bool ifetch) -{ -#ifdef CONFIG_USER_ONLY - return MMU_USER_IDX; -#else -#error System mode not supported on Hexagon yet -#endif -} - typedef HexagonCPU ArchCPU; void hexagon_translate_init(void); diff --git a/target/hexagon/decode.c b/target/hexagon/decode.c index 946c55cc71d..a40210ca1e5 100644 --- a/target/hexagon/decode.c +++ b/target/hexagon/decode.c @@ -52,174 +52,41 @@ DEF_REGMAP(R_8, 8, 0, 1, 2, 3, 4, 5, 6, 7) #define DECODE_MAPPED_REG(OPNUM, NAME) \ insn->regno[OPNUM] = DECODE_REGISTER_##NAME[insn->regno[OPNUM]]; -typedef struct { - const struct DectreeTable *table_link; - const struct DectreeTable *table_link_b; - Opcode opcode; - enum { - DECTREE_ENTRY_INVALID, - DECTREE_TABLE_LINK, - DECTREE_SUBINSNS, - DECTREE_EXTSPACE, - DECTREE_TERMINAL - } type; -} DectreeEntry; - -typedef struct DectreeTable { - unsigned int (*lookup_function)(int startbit, int width, uint32_t opcode); - unsigned int size; - unsigned int startbit; - unsigned int width; - const DectreeEntry table[]; -} DectreeTable; - -#define DECODE_NEW_TABLE(TAG, SIZE, WHATNOT) \ - static const DectreeTable dectree_table_##TAG; -#define TABLE_LINK(TABLE) /* NOTHING */ -#define TERMINAL(TAG, ENC) /* NOTHING */ -#define SUBINSNS(TAG, CLASSA, CLASSB, ENC) /* NOTHING */ -#define EXTSPACE(TAG, ENC) /* NOTHING */ -#define INVALID() /* NOTHING */ -#define DECODE_END_TABLE(...) /* NOTHING */ -#define DECODE_MATCH_INFO(...) /* NOTHING */ -#define DECODE_LEGACY_MATCH_INFO(...) /* NOTHING */ -#define DECODE_OPINFO(...) /* NOTHING */ - -#include "dectree_generated.h.inc" - -#undef DECODE_OPINFO -#undef DECODE_MATCH_INFO -#undef DECODE_LEGACY_MATCH_INFO -#undef DECODE_END_TABLE -#undef INVALID -#undef TERMINAL -#undef SUBINSNS -#undef EXTSPACE -#undef TABLE_LINK -#undef DECODE_NEW_TABLE -#undef DECODE_SEPARATOR_BITS - -#define DECODE_SEPARATOR_BITS(START, WIDTH) NULL, START, WIDTH -#define DECODE_NEW_TABLE_HELPER(TAG, SIZE, FN, START, WIDTH) \ - static const DectreeTable dectree_table_##TAG = { \ - .size = SIZE, \ - .lookup_function = FN, \ - .startbit = START, \ - .width = WIDTH, \ - .table = { -#define DECODE_NEW_TABLE(TAG, SIZE, WHATNOT) \ - DECODE_NEW_TABLE_HELPER(TAG, SIZE, WHATNOT) - -#define TABLE_LINK(TABLE) \ - { .type = DECTREE_TABLE_LINK, .table_link = &dectree_table_##TABLE }, -#define TERMINAL(TAG, ENC) \ - { .type = DECTREE_TERMINAL, .opcode = TAG }, -#define SUBINSNS(TAG, CLASSA, CLASSB, ENC) \ - { \ - .type = DECTREE_SUBINSNS, \ - .table_link = &dectree_table_DECODE_SUBINSN_##CLASSA, \ - .table_link_b = &dectree_table_DECODE_SUBINSN_##CLASSB \ - }, -#define EXTSPACE(TAG, ENC) { .type = DECTREE_EXTSPACE }, -#define INVALID() { .type = DECTREE_ENTRY_INVALID, .opcode = XX_LAST_OPCODE }, - -#define DECODE_END_TABLE(...) } }; - -#define DECODE_MATCH_INFO(...) /* NOTHING */ -#define DECODE_LEGACY_MATCH_INFO(...) /* NOTHING */ -#define DECODE_OPINFO(...) /* NOTHING */ - -#include "dectree_generated.h.inc" - -#undef DECODE_OPINFO -#undef DECODE_MATCH_INFO -#undef DECODE_LEGACY_MATCH_INFO -#undef DECODE_END_TABLE -#undef INVALID -#undef TERMINAL -#undef SUBINSNS -#undef EXTSPACE -#undef TABLE_LINK -#undef DECODE_NEW_TABLE -#undef DECODE_NEW_TABLE_HELPER -#undef DECODE_SEPARATOR_BITS - -static const DectreeTable dectree_table_DECODE_EXT_EXT_noext = { - .size = 1, .lookup_function = NULL, .startbit = 0, .width = 0, - .table = { - { .type = DECTREE_ENTRY_INVALID, .opcode = XX_LAST_OPCODE }, - } -}; - -static const DectreeTable *ext_trees[XX_LAST_EXT_IDX]; +/* Helper functions for decode_*_generated.c.inc */ +#define DECODE_MAPPED(NAME) \ +static int decode_mapped_reg_##NAME(DisasContext *ctx, int x) \ +{ \ + return DECODE_REGISTER_##NAME[x]; \ +} +DECODE_MAPPED(R_16) +DECODE_MAPPED(R_8) +DECODE_MAPPED(R__8) -static void decode_ext_init(void) +/* Helper function for decodetree_trans_funcs_generated.c.inc */ +static int shift_left(DisasContext *ctx, int x, int n, int immno) { - int i; - for (i = EXT_IDX_noext; i < EXT_IDX_noext_AFTER; i++) { - ext_trees[i] = &dectree_table_DECODE_EXT_EXT_noext; - } - for (i = EXT_IDX_mmvec; i < EXT_IDX_mmvec_AFTER; i++) { - ext_trees[i] = &dectree_table_DECODE_EXT_EXT_mmvec; + int ret = x; + Insn *insn = ctx->insn; + if (!insn->extension_valid || + insn->which_extended != immno) { + ret <<= n; } + return ret; } -typedef struct { - uint32_t mask; - uint32_t match; -} DecodeITableEntry; - -#define DECODE_NEW_TABLE(TAG, SIZE, WHATNOT) /* NOTHING */ -#define TABLE_LINK(TABLE) /* NOTHING */ -#define TERMINAL(TAG, ENC) /* NOTHING */ -#define SUBINSNS(TAG, CLASSA, CLASSB, ENC) /* NOTHING */ -#define EXTSPACE(TAG, ENC) /* NOTHING */ -#define INVALID() /* NOTHING */ -#define DECODE_END_TABLE(...) /* NOTHING */ -#define DECODE_OPINFO(...) /* NOTHING */ - -#define DECODE_MATCH_INFO_NORMAL(TAG, MASK, MATCH) \ - [TAG] = { \ - .mask = MASK, \ - .match = MATCH, \ - }, - -#define DECODE_MATCH_INFO_NULL(TAG, MASK, MATCH) \ - [TAG] = { .match = ~0 }, - -#define DECODE_MATCH_INFO(...) DECODE_MATCH_INFO_NORMAL(__VA_ARGS__) -#define DECODE_LEGACY_MATCH_INFO(...) /* NOTHING */ - -static const DecodeITableEntry decode_itable[XX_LAST_OPCODE] = { -#include "dectree_generated.h.inc" -}; - -#undef DECODE_MATCH_INFO -#define DECODE_MATCH_INFO(...) DECODE_MATCH_INFO_NULL(__VA_ARGS__) +/* Include the generated decoder for 32 bit insn */ +#include "decode_normal_generated.c.inc" +#include "decode_hvx_generated.c.inc" -#undef DECODE_LEGACY_MATCH_INFO -#define DECODE_LEGACY_MATCH_INFO(...) DECODE_MATCH_INFO_NORMAL(__VA_ARGS__) +/* Include the generated decoder for 16 bit insn */ +#include "decode_subinsn_a_generated.c.inc" +#include "decode_subinsn_l1_generated.c.inc" +#include "decode_subinsn_l2_generated.c.inc" +#include "decode_subinsn_s1_generated.c.inc" +#include "decode_subinsn_s2_generated.c.inc" -static const DecodeITableEntry decode_legacy_itable[XX_LAST_OPCODE] = { -#include "dectree_generated.h.inc" -}; - -#undef DECODE_OPINFO -#undef DECODE_MATCH_INFO -#undef DECODE_LEGACY_MATCH_INFO -#undef DECODE_END_TABLE -#undef INVALID -#undef TERMINAL -#undef SUBINSNS -#undef EXTSPACE -#undef TABLE_LINK -#undef DECODE_NEW_TABLE -#undef DECODE_SEPARATOR_BITS - -void decode_init(void) -{ - decode_ext_init(); -} +/* Include the generated helpers for the decoder */ +#include "decodetree_trans_funcs_generated.c.inc" void decode_send_insn_to(Packet *packet, int start, int newloc) { @@ -550,7 +417,7 @@ apply_extender(Packet *pkt, int i, uint32_t extender) int immed_num; uint32_t base_immed; - immed_num = opcode_which_immediate_is_extended(pkt->insn[i].opcode); + immed_num = pkt->insn[i].which_extended; base_immed = pkt->insn[i].immed[immed_num]; pkt->insn[i].immed[immed_num] = extender | fZXTN(6, 32, base_immed); @@ -593,186 +460,96 @@ static SlotMask get_valid_slots(const Packet *pkt, unsigned int slot) } } -#define DECODE_NEW_TABLE(TAG, SIZE, WHATNOT) /* NOTHING */ -#define TABLE_LINK(TABLE) /* NOTHING */ -#define TERMINAL(TAG, ENC) /* NOTHING */ -#define SUBINSNS(TAG, CLASSA, CLASSB, ENC) /* NOTHING */ -#define EXTSPACE(TAG, ENC) /* NOTHING */ -#define INVALID() /* NOTHING */ -#define DECODE_END_TABLE(...) /* NOTHING */ -#define DECODE_MATCH_INFO(...) /* NOTHING */ -#define DECODE_LEGACY_MATCH_INFO(...) /* NOTHING */ - -#define DECODE_REG(REGNO, WIDTH, STARTBIT) \ - insn->regno[REGNO] = ((encoding >> STARTBIT) & ((1 << WIDTH) - 1)); - -#define DECODE_IMPL_REG(REGNO, VAL) \ - insn->regno[REGNO] = VAL; - -#define DECODE_IMM(IMMNO, WIDTH, STARTBIT, VALSTART) \ - insn->immed[IMMNO] |= (((encoding >> STARTBIT) & ((1 << WIDTH) - 1))) << \ - (VALSTART); - -#define DECODE_IMM_SXT(IMMNO, WIDTH) \ - insn->immed[IMMNO] = ((((int32_t)insn->immed[IMMNO]) << (32 - WIDTH)) >> \ - (32 - WIDTH)); - -#define DECODE_IMM_NEG(IMMNO, WIDTH) \ - insn->immed[IMMNO] = -insn->immed[IMMNO]; - -#define DECODE_IMM_SHIFT(IMMNO, SHAMT) \ - if ((!insn->extension_valid) || \ - (insn->which_extended != IMMNO)) { \ - insn->immed[IMMNO] <<= SHAMT; \ - } - -#define DECODE_OPINFO(TAG, BEH) \ - case TAG: \ - { BEH } \ - break; \ - /* - * Fill in the operands of the instruction - * dectree_generated.h.inc has a DECODE_OPINFO entry for each opcode - * For example, - * DECODE_OPINFO(A2_addi, - * DECODE_REG(0,5,0) - * DECODE_REG(1,5,16) - * DECODE_IMM(0,7,21,9) - * DECODE_IMM(0,9,5,0) - * DECODE_IMM_SXT(0,16) - * with the macros defined above, we'll fill in a switch statement - * where each case is an opcode tag. + * Section 10.3 of the Hexagon V73 Programmer's Reference Manual + * + * A duplex is encoded as a 32-bit instruction with bits [15:14] set to 00. + * The sub-instructions that comprise a duplex are encoded as 13-bit fields + * in the duplex. + * + * Per table 10-4, the 4-bit duplex iclass is encoded in bits 31:29, 13 */ -static void -decode_op(Insn *insn, Opcode tag, uint32_t encoding) +static uint32_t get_duplex_iclass(uint32_t encoding) { - insn->immed[0] = 0; - insn->immed[1] = 0; - insn->opcode = tag; - if (insn->extension_valid) { - insn->which_extended = opcode_which_immediate_is_extended(tag); - } - - switch (tag) { -#include "dectree_generated.h.inc" - default: - break; - } - - insn->generate = opcode_genptr[tag]; - - insn->iclass = iclass_bits(encoding); + uint32_t iclass = extract32(encoding, 13, 1); + iclass = deposit32(iclass, 1, 3, extract32(encoding, 29, 3)); + return iclass; } -#undef DECODE_REG -#undef DECODE_IMPL_REG -#undef DECODE_IMM -#undef DECODE_IMM_SHIFT -#undef DECODE_OPINFO -#undef DECODE_MATCH_INFO -#undef DECODE_LEGACY_MATCH_INFO -#undef DECODE_END_TABLE -#undef INVALID -#undef TERMINAL -#undef SUBINSNS -#undef EXTSPACE -#undef TABLE_LINK -#undef DECODE_NEW_TABLE -#undef DECODE_SEPARATOR_BITS - -static unsigned int -decode_subinsn_tablewalk(Insn *insn, const DectreeTable *table, - uint32_t encoding) -{ - unsigned int i; - Opcode opc; - if (table->lookup_function) { - i = table->lookup_function(table->startbit, table->width, encoding); - } else { - i = extract32(encoding, table->startbit, table->width); - } - if (table->table[i].type == DECTREE_TABLE_LINK) { - return decode_subinsn_tablewalk(insn, table->table[i].table_link, - encoding); - } else if (table->table[i].type == DECTREE_TERMINAL) { - opc = table->table[i].opcode; - if ((encoding & decode_itable[opc].mask) != decode_itable[opc].match) { - return 0; - } - decode_op(insn, opc, encoding); - return 1; - } else { - return 0; - } -} +/* + * Per table 10-5, the duplex ICLASS field values that specify the group of + * each sub-instruction in a duplex + * + * This table points to the decode instruction for each entry in the table + */ +typedef bool (*subinsn_decode_func)(DisasContext *ctx, uint16_t insn); +typedef struct { + subinsn_decode_func decode_slot0_subinsn; + subinsn_decode_func decode_slot1_subinsn; +} subinsn_decode_groups; + +static const subinsn_decode_groups decode_groups[16] = { + [0x0] = { decode_subinsn_l1, decode_subinsn_l1 }, + [0x1] = { decode_subinsn_l2, decode_subinsn_l1 }, + [0x2] = { decode_subinsn_l2, decode_subinsn_l2 }, + [0x3] = { decode_subinsn_a, decode_subinsn_a }, + [0x4] = { decode_subinsn_l1, decode_subinsn_a }, + [0x5] = { decode_subinsn_l2, decode_subinsn_a }, + [0x6] = { decode_subinsn_s1, decode_subinsn_a }, + [0x7] = { decode_subinsn_s2, decode_subinsn_a }, + [0x8] = { decode_subinsn_s1, decode_subinsn_l1 }, + [0x9] = { decode_subinsn_s1, decode_subinsn_l2 }, + [0xa] = { decode_subinsn_s1, decode_subinsn_s1 }, + [0xb] = { decode_subinsn_s2, decode_subinsn_s1 }, + [0xc] = { decode_subinsn_s2, decode_subinsn_l1 }, + [0xd] = { decode_subinsn_s2, decode_subinsn_l2 }, + [0xe] = { decode_subinsn_s2, decode_subinsn_s2 }, + [0xf] = { NULL, NULL }, /* Reserved */ +}; -static unsigned int get_insn_a(uint32_t encoding) +static uint16_t get_slot0_subinsn(uint32_t encoding) { return extract32(encoding, 0, 13); } -static unsigned int get_insn_b(uint32_t encoding) +static uint16_t get_slot1_subinsn(uint32_t encoding) { return extract32(encoding, 16, 13); } static unsigned int -decode_insns_tablewalk(Insn *insn, const DectreeTable *table, - uint32_t encoding) +decode_insns(DisasContext *ctx, Insn *insn, uint32_t encoding) { - unsigned int i; - unsigned int a, b; - Opcode opc; - if (table->lookup_function) { - i = table->lookup_function(table->startbit, table->width, encoding); - } else { - i = extract32(encoding, table->startbit, table->width); - } - if (table->table[i].type == DECTREE_TABLE_LINK) { - return decode_insns_tablewalk(insn, table->table[i].table_link, - encoding); - } else if (table->table[i].type == DECTREE_SUBINSNS) { - a = get_insn_a(encoding); - b = get_insn_b(encoding); - b = decode_subinsn_tablewalk(insn, table->table[i].table_link_b, b); - a = decode_subinsn_tablewalk(insn + 1, table->table[i].table_link, a); - if ((a == 0) || (b == 0)) { - return 0; + if (parse_bits(encoding) != 0) { + if (decode_normal(ctx, encoding) || + decode_hvx(ctx, encoding)) { + insn->generate = opcode_genptr[insn->opcode]; + insn->iclass = iclass_bits(encoding); + return 1; } - return 2; - } else if (table->table[i].type == DECTREE_TERMINAL) { - opc = table->table[i].opcode; - if ((encoding & decode_itable[opc].mask) != decode_itable[opc].match) { - if ((encoding & decode_legacy_itable[opc].mask) != - decode_legacy_itable[opc].match) { - return 0; + g_assert_not_reached(); + } else { + uint32_t iclass = get_duplex_iclass(encoding); + unsigned int slot0_subinsn = get_slot0_subinsn(encoding); + unsigned int slot1_subinsn = get_slot1_subinsn(encoding); + subinsn_decode_func decode_slot0_subinsn = + decode_groups[iclass].decode_slot0_subinsn; + subinsn_decode_func decode_slot1_subinsn = + decode_groups[iclass].decode_slot1_subinsn; + + /* The slot1 subinsn needs to be in the packet first */ + if (decode_slot1_subinsn(ctx, slot1_subinsn)) { + insn->generate = opcode_genptr[insn->opcode]; + insn->iclass = iclass_bits(encoding); + ctx->insn = ++insn; + if (decode_slot0_subinsn(ctx, slot0_subinsn)) { + insn->generate = opcode_genptr[insn->opcode]; + insn->iclass = iclass_bits(encoding); + return 2; } } - decode_op(insn, opc, encoding); - return 1; - } else if (table->table[i].type == DECTREE_EXTSPACE) { - /* - * For now, HVX will be the only coproc - */ - return decode_insns_tablewalk(insn, ext_trees[EXT_IDX_mmvec], encoding); - } else { - return 0; - } -} - -static unsigned int -decode_insns(Insn *insn, uint32_t encoding) -{ - const DectreeTable *table; - if (parse_bits(encoding) != 0) { - /* Start with PP table - 32 bit instructions */ - table = &dectree_table_DECODE_ROOT_32; - } else { - /* start with EE table - duplex instructions */ - table = &dectree_table_DECODE_ROOT_EE; + g_assert_not_reached(); } - return decode_insns_tablewalk(insn, table, encoding); } static void decode_add_endloop_insn(Insn *insn, int loopnum) @@ -916,8 +693,8 @@ decode_set_slot_number(Packet *pkt) * or number of words used on success */ -int decode_packet(int max_words, const uint32_t *words, Packet *pkt, - bool disas_only) +int decode_packet(DisasContext *ctx, int max_words, const uint32_t *words, + Packet *pkt, bool disas_only) { int num_insns = 0; int words_read = 0; @@ -930,9 +707,11 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, memset(pkt, 0, sizeof(*pkt)); /* Try to build packet */ while (!end_of_packet && (words_read < max_words)) { + Insn *insn = &pkt->insn[num_insns]; + ctx->insn = insn; encoding32 = words[words_read]; end_of_packet = is_packet_end(encoding32); - new_insns = decode_insns(&pkt->insn[num_insns], encoding32); + new_insns = decode_insns(ctx, insn, encoding32); g_assert(new_insns > 0); /* * If we saw an extender, mark next word extended so immediate @@ -1006,9 +785,13 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, int disassemble_hexagon(uint32_t *words, int nwords, bfd_vma pc, GString *buf) { + DisasContext ctx; Packet pkt; - if (decode_packet(nwords, words, &pkt, true) > 0) { + memset(&ctx, 0, sizeof(DisasContext)); + ctx.pkt = &pkt; + + if (decode_packet(&ctx, nwords, words, &pkt, true) > 0) { snprint_a_pkt_disas(buf, &pkt, words, pc); return pkt.encod_pkt_size_in_bytes; } else { diff --git a/target/hexagon/decode.h b/target/hexagon/decode.h index c66f5ea64d7..3f3012b978d 100644 --- a/target/hexagon/decode.h +++ b/target/hexagon/decode.h @@ -21,12 +21,13 @@ #include "cpu.h" #include "opcodes.h" #include "insn.h" +#include "translate.h" void decode_init(void); void decode_send_insn_to(Packet *packet, int start, int newloc); -int decode_packet(int max_words, const uint32_t *words, Packet *pkt, - bool disas_only); +int decode_packet(DisasContext *ctx, int max_words, const uint32_t *words, + Packet *pkt, bool disas_only); #endif diff --git a/target/hexagon/dectree.py b/target/hexagon/dectree.py deleted file mode 100755 index 3b32948a04a..00000000000 --- a/target/hexagon/dectree.py +++ /dev/null @@ -1,403 +0,0 @@ -#!/usr/bin/env python3 - -## -## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. -## -## This program is free software; you can redistribute it and/or modify -## it under the terms of the GNU General Public License as published by -## the Free Software Foundation; either version 2 of the License, or -## (at your option) any later version. -## -## This program is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -## GNU General Public License for more details. -## -## You should have received a copy of the GNU General Public License -## along with this program; if not, see . -## - -import io -import re - -import sys -import iset - -encs = { - tag: "".join(reversed(iset.iset[tag]["enc"].replace(" ", ""))) - for tag in iset.tags - if iset.iset[tag]["enc"] != "MISSING ENCODING" -} - -enc_classes = set([iset.iset[tag]["enc_class"] for tag in encs.keys()]) -subinsn_enc_classes = set( - [enc_class for enc_class in enc_classes if enc_class.startswith("SUBINSN_")] -) -ext_enc_classes = set( - [ - enc_class - for enc_class in enc_classes - if enc_class not in ("NORMAL", "16BIT") and not enc_class.startswith("SUBINSN_") - ] -) - -try: - subinsn_groupings = iset.subinsn_groupings -except AttributeError: - subinsn_groupings = {} - -for tag, subinsn_grouping in subinsn_groupings.items(): - encs[tag] = "".join(reversed(subinsn_grouping["enc"].replace(" ", ""))) - -dectree_normal = {"leaves": set()} -dectree_16bit = {"leaves": set()} -dectree_subinsn_groupings = {"leaves": set()} -dectree_subinsns = {name: {"leaves": set()} for name in subinsn_enc_classes} -dectree_extensions = {name: {"leaves": set()} for name in ext_enc_classes} - -for tag in encs.keys(): - if tag in subinsn_groupings: - dectree_subinsn_groupings["leaves"].add(tag) - continue - enc_class = iset.iset[tag]["enc_class"] - if enc_class.startswith("SUBINSN_"): - if len(encs[tag]) != 32: - encs[tag] = encs[tag] + "0" * (32 - len(encs[tag])) - dectree_subinsns[enc_class]["leaves"].add(tag) - elif enc_class == "16BIT": - if len(encs[tag]) != 16: - raise Exception( - 'Tag "{}" has enc_class "{}" and not an encoding ' - + "width of 16 bits!".format(tag, enc_class) - ) - dectree_16bit["leaves"].add(tag) - else: - if len(encs[tag]) != 32: - raise Exception( - 'Tag "{}" has enc_class "{}" and not an encoding ' - + "width of 32 bits!".format(tag, enc_class) - ) - if enc_class == "NORMAL": - dectree_normal["leaves"].add(tag) - else: - dectree_extensions[enc_class]["leaves"].add(tag) - -faketags = set() -for tag, enc in iset.enc_ext_spaces.items(): - faketags.add(tag) - encs[tag] = "".join(reversed(enc.replace(" ", ""))) - dectree_normal["leaves"].add(tag) - -faketags |= set(subinsn_groupings.keys()) - - -def every_bit_counts(bitset): - for i in range(1, len(next(iter(bitset)))): - if len(set([bits[:i] + bits[i + 1 :] for bits in bitset])) == len(bitset): - return False - return True - - -def auto_separate(node): - tags = node["leaves"] - if len(tags) <= 1: - return - enc_width = len(encs[next(iter(tags))]) - opcode_bit_for_all = [ - all([encs[tag][i] in "01" for tag in tags]) for i in range(enc_width) - ] - opcode_bit_is_0_for_all = [ - opcode_bit_for_all[i] and all([encs[tag][i] == "0" for tag in tags]) - for i in range(enc_width) - ] - opcode_bit_is_1_for_all = [ - opcode_bit_for_all[i] and all([encs[tag][i] == "1" for tag in tags]) - for i in range(enc_width) - ] - differentiator_opcode_bit = [ - opcode_bit_for_all[i] - and not (opcode_bit_is_0_for_all[i] or opcode_bit_is_1_for_all[i]) - for i in range(enc_width) - ] - best_width = 0 - for width in range(4, 0, -1): - for lsb in range(enc_width - width, -1, -1): - bitset = set([encs[tag][lsb : lsb + width] for tag in tags]) - if all(differentiator_opcode_bit[lsb : lsb + width]) and ( - len(bitset) == len(tags) or every_bit_counts(bitset) - ): - best_width = width - best_lsb = lsb - caught_all_tags = len(bitset) == len(tags) - break - if best_width != 0: - break - if best_width == 0: - raise Exception( - "Could not find a way to differentiate the encodings " - + "of the following tags:\n{}".format("\n".join(tags)) - ) - if caught_all_tags: - for width in range(1, best_width): - for lsb in range(enc_width - width, -1, -1): - bitset = set([encs[tag][lsb : lsb + width] for tag in tags]) - if all(differentiator_opcode_bit[lsb : lsb + width]) and len( - bitset - ) == len(tags): - best_width = width - best_lsb = lsb - break - else: - continue - break - node["separator_lsb"] = best_lsb - node["separator_width"] = best_width - node["children"] = [] - for value in range(2**best_width): - child = {} - bits = "".join(reversed("{:0{}b}".format(value, best_width))) - child["leaves"] = set( - [tag for tag in tags if encs[tag][best_lsb : best_lsb + best_width] == bits] - ) - node["children"].append(child) - for child in node["children"]: - auto_separate(child) - - -auto_separate(dectree_normal) -auto_separate(dectree_16bit) -if subinsn_groupings: - auto_separate(dectree_subinsn_groupings) -for dectree_subinsn in dectree_subinsns.values(): - auto_separate(dectree_subinsn) -for dectree_ext in dectree_extensions.values(): - auto_separate(dectree_ext) - -for tag in faketags: - del encs[tag] - - -def table_name(parents, node): - path = parents + [node] - root = path[0] - tag = next(iter(node["leaves"])) - if tag in subinsn_groupings: - enc_width = len(subinsn_groupings[tag]["enc"].replace(" ", "")) - else: - tag = next(iter(node["leaves"] - faketags)) - enc_width = len(encs[tag]) - determining_bits = ["_"] * enc_width - for parent, child in zip(path[:-1], path[1:]): - lsb = parent["separator_lsb"] - width = parent["separator_width"] - value = parent["children"].index(child) - determining_bits[lsb : lsb + width] = list( - reversed("{:0{}b}".format(value, width)) - ) - if tag in subinsn_groupings: - name = "DECODE_ROOT_EE" - else: - enc_class = iset.iset[tag]["enc_class"] - if enc_class in ext_enc_classes: - name = "DECODE_EXT_{}".format(enc_class) - elif enc_class in subinsn_enc_classes: - name = "DECODE_SUBINSN_{}".format(enc_class) - else: - name = "DECODE_ROOT_{}".format(enc_width) - if node != root: - name += "_" + "".join(reversed(determining_bits)) - return name - - -def print_node(f, node, parents): - if len(node["leaves"]) <= 1: - return - name = table_name(parents, node) - lsb = node["separator_lsb"] - width = node["separator_width"] - print( - "DECODE_NEW_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))".format( - name, 2**width, lsb, width - ), - file=f, - ) - for child in node["children"]: - if len(child["leaves"]) == 0: - print("INVALID()", file=f) - elif len(child["leaves"]) == 1: - (tag,) = child["leaves"] - if tag in subinsn_groupings: - class_a = subinsn_groupings[tag]["class_a"] - class_b = subinsn_groupings[tag]["class_b"] - enc = subinsn_groupings[tag]["enc"].replace(" ", "") - if "RESERVED" in tag: - print("INVALID()", file=f) - else: - print( - 'SUBINSNS({},{},{},"{}")'.format(tag, class_a, class_b, enc), - file=f, - ) - elif tag in iset.enc_ext_spaces: - enc = iset.enc_ext_spaces[tag].replace(" ", "") - print('EXTSPACE({},"{}")'.format(tag, enc), file=f) - else: - enc = "".join(reversed(encs[tag])) - print('TERMINAL({},"{}")'.format(tag, enc), file=f) - else: - print("TABLE_LINK({})".format(table_name(parents + [node], child)), file=f) - print( - "DECODE_END_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))".format( - name, 2**width, lsb, width - ), - file=f, - ) - print(file=f) - parents.append(node) - for child in node["children"]: - print_node(f, child, parents) - parents.pop() - - -def print_tree(f, tree): - print_node(f, tree, []) - - -def print_match_info(f): - for tag in sorted(encs.keys(), key=iset.tags.index): - enc = "".join(reversed(encs[tag])) - mask = int(re.sub(r"[^1]", r"0", enc.replace("0", "1")), 2) - match = int(re.sub(r"[^01]", r"0", enc), 2) - suffix = "" - print( - "DECODE{}_MATCH_INFO({},0x{:x}U,0x{:x}U)".format(suffix, tag, mask, match), - file=f, - ) - - -regre = re.compile(r"((? 1: - raise Exception('Tag "{}" has split register field!'.format(tag)) - reg_enc_field = reg_enc_fields[0] - if 2 ** len(reg_enc_field) != reg_num_choices: - raise Exception( - 'Tag "{}" has incorrect register field width!'.format(tag) - ) - print( - " DECODE_REG({},{},{})".format( - regno, len(reg_enc_field), enc.index(reg_enc_field) - ), - file=f, - ) - if reg_type in num_registers and reg_num_choices != num_registers[reg_type]: - print( - " DECODE_MAPPED_REG({},{})".format(regno, reg_mapping), - file=f, - ) - regno += 1 - - def implicit_register_key(reg): - return implicit_registers[reg] - - for reg in sorted( - set( - [ - r - for r in ( - iset.iset[tag]["rregs"].split(",") - + iset.iset[tag]["wregs"].split(",") - ) - if r in implicit_registers - ] - ), - key=implicit_register_key, - ): - print( - " DECODE_IMPL_REG({},{})".format(regno, implicit_registers[reg]), - file=f, - ) - regno += 1 - if imms and imms[0][0].isupper(): - imms = reversed(imms) - for imm in imms: - if imm[0].isupper(): - immno = 1 - else: - immno = 0 - imm_type = imm[0] - imm_width = int(imm[1]) - imm_shift = imm[2] - if imm_shift: - imm_shift = int(imm_shift) - else: - imm_shift = 0 - if imm_type.islower(): - imm_letter = "i" - else: - imm_letter = "I" - remainder = imm_width - for m in reversed(list(re.finditer(imm_letter + "+", enc))): - remainder -= m.end() - m.start() - print( - " DECODE_IMM({},{},{},{})".format( - immno, m.end() - m.start(), m.start(), remainder - ), - file=f, - ) - if remainder != 0: - if imm[2]: - imm[2] = ":" + imm[2] - raise Exception( - 'Tag "{}" has an incorrect number of ' - + 'encoding bits for immediate "{}"'.format(tag, "".join(imm)) - ) - if imm_type.lower() in "sr": - print(" DECODE_IMM_SXT({},{})".format(immno, imm_width), file=f) - if imm_type.lower() == "n": - print(" DECODE_IMM_NEG({},{})".format(immno, imm_width), file=f) - if imm_shift: - print( - " DECODE_IMM_SHIFT({},{})".format(immno, imm_shift), file=f - ) - print(")", file=f) - - -if __name__ == "__main__": - with open(sys.argv[1], "w") as f: - print_tree(f, dectree_normal) - print_tree(f, dectree_16bit) - if subinsn_groupings: - print_tree(f, dectree_subinsn_groupings) - for name, dectree_subinsn in sorted(dectree_subinsns.items()): - print_tree(f, dectree_subinsn) - for name, dectree_ext in sorted(dectree_extensions.items()): - print_tree(f, dectree_ext) - print_match_info(f) - print_op_info(f) diff --git a/target/hexagon/gdbstub.c b/target/hexagon/gdbstub.c index 54d37e006e0..502c6987f09 100644 --- a/target/hexagon/gdbstub.c +++ b/target/hexagon/gdbstub.c @@ -22,8 +22,7 @@ int hexagon_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - HexagonCPU *cpu = HEXAGON_CPU(cs); - CPUHexagonState *env = &cpu->env; + CPUHexagonState *env = cpu_env(cs); if (n == HEX_REG_P3_0_ALIASED) { uint32_t p3_0 = 0; @@ -42,8 +41,7 @@ int hexagon_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - HexagonCPU *cpu = HEXAGON_CPU(cs); - CPUHexagonState *env = &cpu->env; + CPUHexagonState *env = cpu_env(cs); if (n == HEX_REG_P3_0_ALIASED) { uint32_t p3_0 = ldtul_p(mem_buf); @@ -81,8 +79,11 @@ static int gdb_get_qreg(CPUHexagonState *env, GByteArray *mem_buf, int n) return total; } -int hexagon_hvx_gdb_read_register(CPUHexagonState *env, GByteArray *mem_buf, int n) +int hexagon_hvx_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { + HexagonCPU *cpu = HEXAGON_CPU(cs); + CPUHexagonState *env = &cpu->env; + if (n < NUM_VREGS) { return gdb_get_vreg(env, mem_buf, n); } @@ -115,8 +116,11 @@ static int gdb_put_qreg(CPUHexagonState *env, uint8_t *mem_buf, int n) return MAX_VEC_SIZE_BYTES / 8; } -int hexagon_hvx_gdb_write_register(CPUHexagonState *env, uint8_t *mem_buf, int n) +int hexagon_hvx_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { + HexagonCPU *cpu = HEXAGON_CPU(cs); + CPUHexagonState *env = &cpu->env; + if (n < NUM_VREGS) { return gdb_put_vreg(env, mem_buf, n); } diff --git a/target/hexagon/gen_analyze_funcs.py b/target/hexagon/gen_analyze_funcs.py index c3b521abef7..a9af666cefa 100755 --- a/target/hexagon/gen_analyze_funcs.py +++ b/target/hexagon/gen_analyze_funcs.py @@ -23,162 +23,6 @@ import hex_common -## -## Helpers for gen_analyze_func -## -def is_predicated(tag): - return "A_CONDEXEC" in hex_common.attribdict[tag] - - -def analyze_opn_old(f, tag, regtype, regid, regno): - regN = f"{regtype}{regid}N" - predicated = "true" if is_predicated(tag) else "false" - if regtype == "R": - if regid in {"ss", "tt"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_reg_read_pair(ctx, {regN});\n") - elif regid in {"dd", "ee", "xx", "yy"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_reg_write_pair(ctx, {regN}, {predicated});\n") - elif regid in {"s", "t", "u", "v"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_reg_read(ctx, {regN});\n") - elif regid in {"d", "e", "x", "y"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_reg_write(ctx, {regN}, {predicated});\n") - else: - hex_common.bad_register(regtype, regid) - elif regtype == "P": - if regid in {"s", "t", "u", "v"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_pred_read(ctx, {regN});\n") - elif regid in {"d", "e", "x"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_pred_write(ctx, {regN});\n") - else: - hex_common.bad_register(regtype, regid) - elif regtype == "C": - if regid == "ss": - f.write( - f" const int {regN} = insn->regno[{regno}] " - "+ HEX_REG_SA0;\n" - ) - f.write(f" ctx_log_reg_read_pair(ctx, {regN});\n") - elif regid == "dd": - f.write(f" const int {regN} = insn->regno[{regno}] " "+ HEX_REG_SA0;\n") - f.write(f" ctx_log_reg_write_pair(ctx, {regN}, {predicated});\n") - elif regid == "s": - f.write( - f" const int {regN} = insn->regno[{regno}] " - "+ HEX_REG_SA0;\n" - ) - f.write(f" ctx_log_reg_read(ctx, {regN});\n") - elif regid == "d": - f.write(f" const int {regN} = insn->regno[{regno}] " "+ HEX_REG_SA0;\n") - f.write(f" ctx_log_reg_write(ctx, {regN}, {predicated});\n") - else: - hex_common.bad_register(regtype, regid) - elif regtype == "M": - if regid == "u": - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_reg_read(ctx, {regN});\n") - else: - hex_common.bad_register(regtype, regid) - elif regtype == "V": - newv = "EXT_DFL" - if hex_common.is_new_result(tag): - newv = "EXT_NEW" - elif hex_common.is_tmp_result(tag): - newv = "EXT_TMP" - if regid in {"dd", "xx"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write( - f" ctx_log_vreg_write_pair(ctx, {regN}, {newv}, " f"{predicated});\n" - ) - elif regid in {"uu", "vv"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_vreg_read_pair(ctx, {regN});\n") - elif regid in {"s", "u", "v", "w"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_vreg_read(ctx, {regN});\n") - elif regid in {"d", "x", "y"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_vreg_write(ctx, {regN}, {newv}, " f"{predicated});\n") - else: - hex_common.bad_register(regtype, regid) - elif regtype == "Q": - if regid in {"d", "e", "x"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_qreg_write(ctx, {regN});\n") - elif regid in {"s", "t", "u", "v"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_qreg_read(ctx, {regN});\n") - else: - hex_common.bad_register(regtype, regid) - elif regtype == "G": - if regid in {"dd"}: - f.write(f"// const int {regN} = insn->regno[{regno}];\n") - elif regid in {"d"}: - f.write(f"// const int {regN} = insn->regno[{regno}];\n") - elif regid in {"ss"}: - f.write(f"// const int {regN} = insn->regno[{regno}];\n") - elif regid in {"s"}: - f.write(f"// const int {regN} = insn->regno[{regno}];\n") - else: - hex_common.bad_register(regtype, regid) - elif regtype == "S": - if regid in {"dd"}: - f.write(f"// const int {regN} = insn->regno[{regno}];\n") - elif regid in {"d"}: - f.write(f"// const int {regN} = insn->regno[{regno}];\n") - elif regid in {"ss"}: - f.write(f"// const int {regN} = insn->regno[{regno}];\n") - elif regid in {"s"}: - f.write(f"// const int {regN} = insn->regno[{regno}];\n") - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def analyze_opn_new(f, tag, regtype, regid, regno): - regN = f"{regtype}{regid}N" - if regtype == "N": - if regid in {"s", "t"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_reg_read(ctx, {regN});\n") - else: - hex_common.bad_register(regtype, regid) - elif regtype == "P": - if regid in {"t", "u", "v"}: - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_pred_read(ctx, {regN});\n") - else: - hex_common.bad_register(regtype, regid) - elif regtype == "O": - if regid == "s": - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" ctx_log_vreg_read(ctx, {regN});\n") - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def analyze_opn(f, tag, regtype, regid, i): - if hex_common.is_pair(regid): - analyze_opn_old(f, tag, regtype, regid, i) - elif hex_common.is_single(regid): - if hex_common.is_old_val(regtype, regid, tag): - analyze_opn_old(f, tag, regtype, regid, i) - elif hex_common.is_new_val(regtype, regid, tag): - analyze_opn_new(f, tag, regtype, regid, i) - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - ## ## Generate the code to analyze the instruction ## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;} @@ -203,7 +47,11 @@ def gen_analyze_func(f, tag, regs, imms): i = 0 ## Analyze all the registers for regtype, regid in regs: - analyze_opn(f, tag, regtype, regid, i) + reg = hex_common.get_register(tag, regtype, regid) + if reg.is_written(): + reg.analyze_write(f, tag, i) + else: + reg.analyze_read(f, i) i += 1 has_generated_helper = not hex_common.skip_qemu_helper( @@ -236,6 +84,7 @@ def main(): if is_idef_parser_enabled: hex_common.read_idef_parser_enabled_file(sys.argv[5]) hex_common.calculate_attribs() + hex_common.init_registers() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() diff --git a/target/hexagon/gen_decodetree.py b/target/hexagon/gen_decodetree.py new file mode 100755 index 00000000000..a4fcd622c54 --- /dev/null +++ b/target/hexagon/gen_decodetree.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python3 + +## +## Copyright (c) 2024 Taylor Simpson +## +## This program is free software; you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation; either version 2 of the License, or +## (at your option) any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, see . +## + +import io +import re + +import sys +import textwrap +import iset +import hex_common + +encs = { + tag: "".join(reversed(iset.iset[tag]["enc"].replace(" ", ""))) + for tag in iset.tags + if iset.iset[tag]["enc"] != "MISSING ENCODING" +} + + +regre = re.compile(r"((? 1: + raise Exception(f"{tag} has split register field!") + reg_enc_field = reg_enc_fields[0] + if 2 ** len(reg_enc_field) != reg_num_choices: + raise Exception(f"{tag} has incorrect register field width!") + + f.write(f"%{tag}_{reg_type}{reg_id}\t" + f"{enc.index(reg_enc_field)}:{len(reg_enc_field)}") + + if (reg_type in num_registers and + reg_num_choices != num_registers[reg_type]): + f.write(f"\t!function=decode_mapped_reg_{reg_mapping}") + f.write("\n") + + # Write the field definitions for the immediates + for imm in imms: + immno = 1 if imm[0].isupper() else 0 + imm_type = imm[0] + imm_width = int(imm[1]) + imm_letter = "i" if imm_type.islower() else "I" + fields = [] + sign_mark = "s" if imm_type.lower() in "sr" else "" + for m in reversed(list(re.finditer(imm_letter + "+", enc))): + fields.append(f"{m.start()}:{sign_mark}{m.end() - m.start()}") + sign_mark = "" + field_str = " ".join(fields) + f.write(f"%{tag}_{imm_type}{imm_letter}\t{field_str}\n") + + ## Handle instructions with unused encoding letters + ## Change the unused letters to ignored + if tag in tags_with_unused_d_encoding: + enc_str = enc_str.replace("d", "-") + if tag in tags_with_unused_t_encoding: + enc_str = enc_str.replace("t", "-") + + # Replace the operand letters with . + for x in operand_letters: + enc_str = enc_str.replace(x, ".") + + # Write the instruction format + f.write(f"@{tag}\t{enc_str}") + for reg in regs: + reg_type = reg[0] + reg_id = reg[1] + f.write(f" {reg_type}{reg_id}=%{tag}_{reg_type}{reg_id}") + for imm in imms: + imm_type = imm[0] + imm_letter = "i" if imm_type.islower() else "I" + f.write(f" {imm_type}{imm_letter}=%{tag}_{imm_type}{imm_letter}") + + if not is_subinsn: + f.write(" %PP") + f.write("\n") + + # Replace the 0s and 1s with . + enc_str = enc_str.replace("0", ".").replace("1", ".") + + # Write the instruction pattern + f.write(f"{tag}\t{enc_str} @{tag}\n") + + +if __name__ == "__main__": + hex_common.read_semantics_file(sys.argv[1]) + class_to_decode = sys.argv[2] + with open(sys.argv[3], "w") as f: + gen_decodetree_file(f, class_to_decode) diff --git a/target/hexagon/gen_dectree_import.c b/target/hexagon/gen_dectree_import.c index ee354677fd6..87f20c14f19 100644 --- a/target/hexagon/gen_dectree_import.c +++ b/target/hexagon/gen_dectree_import.c @@ -56,24 +56,6 @@ const char * const opcode_syntax[XX_LAST_OPCODE] = { #undef EXTINSN }; -const char * const opcode_rregs[] = { -#define REGINFO(TAG, REGINFO, RREGS, WREGS) RREGS, -#define IMMINFO(TAG, SIGN, SIZE, SHAMT, SIGN2, SIZE2, SHAMT2) /* nothing */ -#include "op_regs_generated.h.inc" - NULL -#undef REGINFO -#undef IMMINFO -}; - -const char * const opcode_wregs[] = { -#define REGINFO(TAG, REGINFO, RREGS, WREGS) WREGS, -#define IMMINFO(TAG, SIGN, SIZE, SHAMT, SIGN2, SIZE2, SHAMT2) /* nothing */ -#include "op_regs_generated.h.inc" - NULL -#undef REGINFO -#undef IMMINFO -}; - const OpcodeEncoding opcode_encodings[] = { #define DEF_ENC32(TAG, ENCSTR) \ [TAG] = { .encoding = ENCSTR }, @@ -130,8 +112,6 @@ static void gen_iset_table(FILE *out) fprintf(out, "\t\'%s\' : {\n", opcode_names[i]); fprintf(out, "\t\t\'tag\' : \'%s\',\n", opcode_names[i]); fprintf(out, "\t\t\'syntax\' : \'%s\',\n", opcode_syntax[i]); - fprintf(out, "\t\t\'rregs\' : \'%s\',\n", opcode_rregs[i]); - fprintf(out, "\t\t\'wregs\' : \'%s\',\n", opcode_wregs[i]); fprintf(out, "\t\t\'enc\' : \'%s\',\n", get_opcode_enc(i)); fprintf(out, "\t\t\'enc_class\' : \'%s\',\n", get_opcode_enc_class(i)); fprintf(out, "\t},\n"); @@ -150,33 +130,6 @@ static void gen_tags_list(FILE *out) fprintf(out, "];\n\n"); } -static void gen_enc_ext_spaces_table(FILE *out) -{ - fprintf(out, "enc_ext_spaces = {\n"); -#define DEF_EXT_SPACE(SPACEID, ENCSTR) \ - fprintf(out, "\t\'%s\' : \'%s\',\n", #SPACEID, ENCSTR); -#include "imported/encode.def" -#undef DEF_EXT_SPACE - fprintf(out, "};\n\n"); -} - -static void gen_subinsn_groupings_table(FILE *out) -{ - fprintf(out, "subinsn_groupings = {\n"); -#define DEF_PACKED32(TAG, TYPEA, TYPEB, ENCSTR) \ - do { \ - fprintf(out, "\t\'%s\' : {\n", #TAG); \ - fprintf(out, "\t\t\'name\' : \'%s\',\n", #TAG); \ - fprintf(out, "\t\t\'class_a\' : \'%s\',\n", #TYPEA); \ - fprintf(out, "\t\t\'class_b\' : \'%s\',\n", #TYPEB); \ - fprintf(out, "\t\t\'enc\' : \'%s\',\n", ENCSTR); \ - fprintf(out, "\t},\n"); \ - } while (0); -#include "imported/encode.def" -#undef DEF_PACKED32 - fprintf(out, "};\n\n"); -} - int main(int argc, char *argv[]) { FILE *outfile; @@ -193,8 +146,6 @@ int main(int argc, char *argv[]) gen_iset_table(outfile); gen_tags_list(outfile); - gen_enc_ext_spaces_table(outfile); - gen_subinsn_groupings_table(outfile); fclose(outfile); return 0; diff --git a/target/hexagon/gen_helper_funcs.py b/target/hexagon/gen_helper_funcs.py index ce21d3b688e..9cc3d69c49b 100755 --- a/target/hexagon/gen_helper_funcs.py +++ b/target/hexagon/gen_helper_funcs.py @@ -23,181 +23,14 @@ import hex_common -## -## Helpers for gen_helper_function -## -def gen_decl_ea(f): - f.write(" uint32_t EA;\n") - - -def gen_helper_return_type(f, regtype, regid, regno): - if regno > 1: - f.write(", ") - f.write("int32_t") - - -def gen_helper_return_type_pair(f, regtype, regid, regno): - if regno > 1: - f.write(", ") - f.write("int64_t") - - -def gen_helper_arg(f, regtype, regid, regno): - if regno > 0: - f.write(", ") - f.write(f"int32_t {regtype}{regid}V") - - -def gen_helper_arg_new(f, regtype, regid, regno): - if regno >= 0: - f.write(", ") - f.write(f"int32_t {regtype}{regid}N") - - -def gen_helper_arg_pair(f, regtype, regid, regno): - if regno >= 0: - f.write(", ") - f.write(f"int64_t {regtype}{regid}V") - - -def gen_helper_arg_ext(f, regtype, regid, regno): - if regno > 0: - f.write(", ") - f.write(f"void *{regtype}{regid}V_void") - - -def gen_helper_arg_ext_pair(f, regtype, regid, regno): - if regno > 0: - f.write(", ") - f.write(f"void *{regtype}{regid}V_void") - - -def gen_helper_arg_opn(f, regtype, regid, i, tag): - if hex_common.is_pair(regid): - if hex_common.is_hvx_reg(regtype): - gen_helper_arg_ext_pair(f, regtype, regid, i) - else: - gen_helper_arg_pair(f, regtype, regid, i) - elif hex_common.is_single(regid): - if hex_common.is_old_val(regtype, regid, tag): - if hex_common.is_hvx_reg(regtype): - gen_helper_arg_ext(f, regtype, regid, i) - else: - gen_helper_arg(f, regtype, regid, i) - elif hex_common.is_new_val(regtype, regid, tag): - gen_helper_arg_new(f, regtype, regid, i) - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def gen_helper_arg_imm(f, immlett): - f.write(f", int32_t {hex_common.imm_name(immlett)}") - - -def gen_helper_dest_decl(f, regtype, regid, regno, subfield=""): - f.write(f" int32_t {regtype}{regid}V{subfield} = 0;\n") - - -def gen_helper_dest_decl_pair(f, regtype, regid, regno, subfield=""): - f.write(f" int64_t {regtype}{regid}V{subfield} = 0;\n") - - -def gen_helper_dest_decl_ext(f, regtype, regid): - if regtype == "Q": - f.write( - f" /* {regtype}{regid}V is *(MMQReg *)" f"({regtype}{regid}V_void) */\n" - ) - else: - f.write( - f" /* {regtype}{regid}V is *(MMVector *)" - f"({regtype}{regid}V_void) */\n" - ) - - -def gen_helper_dest_decl_ext_pair(f, regtype, regid, regno): - f.write( - f" /* {regtype}{regid}V is *(MMVectorPair *))" - f"{regtype}{regid}V_void) */\n" - ) - - -def gen_helper_dest_decl_opn(f, regtype, regid, i): - if hex_common.is_pair(regid): - if hex_common.is_hvx_reg(regtype): - gen_helper_dest_decl_ext_pair(f, regtype, regid, i) - else: - gen_helper_dest_decl_pair(f, regtype, regid, i) - elif hex_common.is_single(regid): - if hex_common.is_hvx_reg(regtype): - gen_helper_dest_decl_ext(f, regtype, regid) - else: - gen_helper_dest_decl(f, regtype, regid, i) - else: - hex_common.bad_register(regtype, regid) - - -def gen_helper_src_var_ext(f, regtype, regid): - if regtype == "Q": - f.write( - f" /* {regtype}{regid}V is *(MMQReg *)" f"({regtype}{regid}V_void) */\n" - ) - else: - f.write( - f" /* {regtype}{regid}V is *(MMVector *)" - f"({regtype}{regid}V_void) */\n" - ) - - -def gen_helper_src_var_ext_pair(f, regtype, regid, regno): - f.write( - f" /* {regtype}{regid}V{regno} is *(MMVectorPair *)" - f"({regtype}{regid}V{regno}_void) */\n" - ) - - -def gen_helper_return(f, regtype, regid, regno): - f.write(f" return {regtype}{regid}V;\n") - - -def gen_helper_return_pair(f, regtype, regid, regno): - f.write(f" return {regtype}{regid}V;\n") - - -def gen_helper_dst_write_ext(f, regtype, regid): - return - - -def gen_helper_dst_write_ext_pair(f, regtype, regid): - return - - -def gen_helper_return_opn(f, regtype, regid, i): - if hex_common.is_pair(regid): - if hex_common.is_hvx_reg(regtype): - gen_helper_dst_write_ext_pair(f, regtype, regid) - else: - gen_helper_return_pair(f, regtype, regid, i) - elif hex_common.is_single(regid): - if hex_common.is_hvx_reg(regtype): - gen_helper_dst_write_ext(f, regtype, regid) - else: - gen_helper_return(f, regtype, regid, i) - else: - hex_common.bad_register(regtype, regid) - - ## ## Generate the TCG code to call the helper ## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;} ## We produce: ## int32_t HELPER(A2_add)(CPUHexagonState *env, int32_t RsV, int32_t RtV) ## { -## uint32_t slot __attribute__(unused)) = 4; ## int32_t RdV = 0; ## { RdV=RsV+RtV;} -## COUNT_HELPER(A2_add); ## return RdV; ## } ## @@ -205,151 +38,67 @@ def gen_helper_function(f, tag, tagregs, tagimms): regs = tagregs[tag] imms = tagimms[tag] - numresults = 0 - numscalarresults = 0 - numscalarreadwrite = 0 - for regtype, regid in regs: - if hex_common.is_written(regid): - numresults += 1 - if hex_common.is_scalar_reg(regtype): - numscalarresults += 1 - if hex_common.is_readwrite(regid): - if hex_common.is_scalar_reg(regtype): - numscalarreadwrite += 1 - - if numscalarresults > 1: - ## The helper is bogus when there is more than one result - f.write( - f"void HELPER({tag})(CPUHexagonState *env) " f"{{ BOGUS_HELPER({tag}); }}\n" - ) - else: - ## The return type of the function is the type of the destination - ## register (if scalar) - i = 0 + ret_type = hex_common.helper_ret_type(tag, regs).func_arg + + declared = [] + for arg in hex_common.helper_args(tag, regs, imms): + declared.append(arg.func_arg) + + arguments = ", ".join(declared) + f.write(f"{ret_type} HELPER({tag})({arguments})\n") + f.write("{\n") + if hex_common.need_ea(tag): + f.write(hex_common.code_fmt(f"""\ + uint32_t EA; + """)) + ## Declare the return variable + if not hex_common.is_predicated(tag): for regtype, regid in regs: - if hex_common.is_written(regid): - if hex_common.is_pair(regid): - if hex_common.is_hvx_reg(regtype): - continue - else: - gen_helper_return_type_pair(f, regtype, regid, i) - elif hex_common.is_single(regid): - if hex_common.is_hvx_reg(regtype): - continue - else: - gen_helper_return_type(f, regtype, regid, i) - else: - hex_common.bad_register(regtype, regid) - i += 1 + reg = hex_common.get_register(tag, regtype, regid) + if reg.is_writeonly() and not reg.is_hvx_reg(): + f.write(hex_common.code_fmt(f"""\ + {reg.helper_arg_type()} {reg.helper_arg_name()} = 0; + """)) - if numscalarresults == 0: - f.write("void") - f.write(f" HELPER({tag})(CPUHexagonState *env") - - ## Arguments include the vector destination operands - i = 1 - for regtype, regid in regs: - if hex_common.is_written(regid): - if hex_common.is_pair(regid): - if hex_common.is_hvx_reg(regtype): - gen_helper_arg_ext_pair(f, regtype, regid, i) - else: - continue - elif hex_common.is_single(regid): - if hex_common.is_hvx_reg(regtype): - gen_helper_arg_ext(f, regtype, regid, i) - else: - # This is the return value of the function - continue - else: - hex_common.bad_register(regtype, regid) - i += 1 - - ## For conditional instructions, we pass in the destination register - if "A_CONDEXEC" in hex_common.attribdict[tag]: - for regtype, regid in regs: - if hex_common.is_writeonly(regid) and not hex_common.is_hvx_reg( - regtype - ): - gen_helper_arg_opn(f, regtype, regid, i, tag) - i += 1 - - ## Arguments to the helper function are the source regs and immediates - for regtype, regid in regs: - if hex_common.is_read(regid): - if hex_common.is_hvx_reg(regtype) and hex_common.is_readwrite(regid): - continue - gen_helper_arg_opn(f, regtype, regid, i, tag) - i += 1 - for immlett, bits, immshift in imms: - gen_helper_arg_imm(f, immlett) - i += 1 - - if hex_common.need_pkt_has_multi_cof(tag): - f.write(", uint32_t pkt_has_multi_cof") - if (hex_common.need_pkt_need_commit(tag)): - f.write(", uint32_t pkt_need_commit") - - if hex_common.need_PC(tag): - if i > 0: - f.write(", ") - f.write("target_ulong PC") - i += 1 - if hex_common.helper_needs_next_PC(tag): - if i > 0: - f.write(", ") - f.write("target_ulong next_PC") - i += 1 - if hex_common.need_slot(tag): - if i > 0: - f.write(", ") - f.write("uint32_t slotval") - i += 1 - if hex_common.need_part1(tag): - if i > 0: - f.write(", ") - f.write("uint32_t part1") - f.write(")\n{\n") - if hex_common.need_ea(tag): - gen_decl_ea(f) - ## Declare the return variable - i = 0 - if "A_CONDEXEC" not in hex_common.attribdict[tag]: - for regtype, regid in regs: - if hex_common.is_writeonly(regid): - gen_helper_dest_decl_opn(f, regtype, regid, i) - i += 1 - - for regtype, regid in regs: - if hex_common.is_read(regid): - if hex_common.is_pair(regid): - if hex_common.is_hvx_reg(regtype): - gen_helper_src_var_ext_pair(f, regtype, regid, i) - elif hex_common.is_single(regid): - if hex_common.is_hvx_reg(regtype): - gen_helper_src_var_ext(f, regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - if hex_common.need_slot(tag): - if "A_LOAD" in hex_common.attribdict[tag]: - f.write(" bool pkt_has_store_s1 = slotval & 0x1;\n") - f.write(" uint32_t slot = slotval >> 1;\n") - - if "A_FPOP" in hex_common.attribdict[tag]: - f.write(" arch_fpop_start(env);\n") - - f.write(f" {hex_common.semdict[tag]}\n") - - if "A_FPOP" in hex_common.attribdict[tag]: - f.write(" arch_fpop_end(env);\n") + ## Print useful information about HVX registers + for regtype, regid in regs: + reg = hex_common.get_register(tag, regtype, regid) + if reg.is_hvx_reg(): + reg.helper_hvx_desc(f) + + if hex_common.need_slot(tag): + if "A_LOAD" in hex_common.attribdict[tag]: + f.write(hex_common.code_fmt(f"""\ + bool pkt_has_store_s1 = slotval & 0x1; + """)) + f.write(hex_common.code_fmt(f"""\ + uint32_t slot = slotval >> 1; + """)) + + if "A_FPOP" in hex_common.attribdict[tag]: + f.write(hex_common.code_fmt(f"""\ + arch_fpop_start(env); + """)) + + f.write(hex_common.code_fmt(f"""\ + {hex_common.semdict[tag]} + """)) + + if "A_FPOP" in hex_common.attribdict[tag]: + f.write(hex_common.code_fmt(f"""\ + arch_fpop_end(env); + """)) + + ## Return the scalar result + for regtype, regid in regs: + reg = hex_common.get_register(tag, regtype, regid) + if reg.is_written() and not reg.is_hvx_reg(): + f.write(hex_common.code_fmt(f"""\ + return {reg.helper_arg_name()}; + """)) - ## Save/return the return variable - for regtype, regid in regs: - if hex_common.is_written(regid): - gen_helper_return_opn(f, regtype, regid, i) - f.write("}\n\n") - ## End of the helper definition + f.write("}\n\n") + ## End of the helper definition def main(): @@ -370,6 +119,7 @@ def main(): if is_idef_parser_enabled: hex_common.read_idef_parser_enabled_file(sys.argv[5]) hex_common.calculate_attribs() + hex_common.init_registers() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() diff --git a/target/hexagon/gen_helper_protos.py b/target/hexagon/gen_helper_protos.py index 131043795a7..c82b0f54e4d 100755 --- a/target/hexagon/gen_helper_protos.py +++ b/target/hexagon/gen_helper_protos.py @@ -22,39 +22,6 @@ import string import hex_common -## -## Helpers for gen_helper_prototype -## -def_helper_types = { - "N": "s32", - "O": "s32", - "P": "s32", - "M": "s32", - "C": "s32", - "R": "s32", - "V": "ptr", - "Q": "ptr", -} - -def_helper_types_pair = { - "R": "s64", - "C": "s64", - "S": "s64", - "G": "s64", - "V": "ptr", - "Q": "ptr", -} - - -def gen_def_helper_opn(f, tag, regtype, regid, i): - if hex_common.is_pair(regid): - f.write(f", {def_helper_types_pair[regtype]}") - elif hex_common.is_single(regid): - f.write(f", {def_helper_types[regtype]}") - else: - hex_common.bad_register(regtype, regid) - - ## ## Generate the DEF_HELPER prototype for an instruction ## For A2_add: Rd32=add(Rs32,Rt32) @@ -65,116 +32,15 @@ def gen_helper_prototype(f, tag, tagregs, tagimms): regs = tagregs[tag] imms = tagimms[tag] - numresults = 0 - numscalarresults = 0 - numscalarreadwrite = 0 - for regtype, regid in regs: - if hex_common.is_written(regid): - numresults += 1 - if hex_common.is_scalar_reg(regtype): - numscalarresults += 1 - if hex_common.is_readwrite(regid): - if hex_common.is_scalar_reg(regtype): - numscalarreadwrite += 1 - - if numscalarresults > 1: - ## The helper is bogus when there is more than one result - f.write(f"DEF_HELPER_1({tag}, void, env)\n") - else: - ## Figure out how many arguments the helper will take - if numscalarresults == 0: - def_helper_size = len(regs) + len(imms) + numscalarreadwrite + 1 - if hex_common.need_pkt_has_multi_cof(tag): - def_helper_size += 1 - if hex_common.need_pkt_need_commit(tag): - def_helper_size += 1 - if hex_common.need_part1(tag): - def_helper_size += 1 - if hex_common.need_slot(tag): - def_helper_size += 1 - if hex_common.need_PC(tag): - def_helper_size += 1 - if hex_common.helper_needs_next_PC(tag): - def_helper_size += 1 - if hex_common.need_condexec_reg(tag, regs): - def_helper_size += 1 - f.write(f"DEF_HELPER_{def_helper_size}({tag}") - ## The return type is void - f.write(", void") - else: - def_helper_size = len(regs) + len(imms) + numscalarreadwrite - if hex_common.need_pkt_has_multi_cof(tag): - def_helper_size += 1 - if hex_common.need_pkt_need_commit(tag): - def_helper_size += 1 - if hex_common.need_part1(tag): - def_helper_size += 1 - if hex_common.need_slot(tag): - def_helper_size += 1 - if hex_common.need_PC(tag): - def_helper_size += 1 - if hex_common.need_condexec_reg(tag, regs): - def_helper_size += 1 - if hex_common.helper_needs_next_PC(tag): - def_helper_size += 1 - f.write(f"DEF_HELPER_{def_helper_size}({tag}") - - ## Generate the qemu DEF_HELPER type for each result - ## Iterate over this list twice - ## - Emit the scalar result - ## - Emit the vector result - i = 0 - for regtype, regid in regs: - if hex_common.is_written(regid): - if not hex_common.is_hvx_reg(regtype): - gen_def_helper_opn(f, tag, regtype, regid, i) - i += 1 - - ## Put the env between the outputs and inputs - f.write(", env") - i += 1 - - # Second pass - for regtype, regid in regs: - if hex_common.is_written(regid): - if hex_common.is_hvx_reg(regtype): - gen_def_helper_opn(f, tag, regtype, regid, i) - i += 1 - - ## For conditional instructions, we pass in the destination register - if "A_CONDEXEC" in hex_common.attribdict[tag]: - for regtype, regid in regs: - if hex_common.is_writeonly(regid) and not hex_common.is_hvx_reg( - regtype - ): - gen_def_helper_opn(f, tag, regtype, regid, i) - i += 1 + declared = [] + ret_type = hex_common.helper_ret_type(tag, regs).proto_arg + declared.append(ret_type) - ## Generate the qemu type for each input operand (regs and immediates) - for regtype, regid in regs: - if hex_common.is_read(regid): - if hex_common.is_hvx_reg(regtype) and hex_common.is_readwrite(regid): - continue - gen_def_helper_opn(f, tag, regtype, regid, i) - i += 1 - for immlett, bits, immshift in imms: - f.write(", s32") + for arg in hex_common.helper_args(tag, regs, imms): + declared.append(arg.proto_arg) - ## Add the arguments for the instruction pkt_has_multi_cof, - ## pkt_needs_commit, PC, next_PC, slot, and part1 (if needed) - if hex_common.need_pkt_has_multi_cof(tag): - f.write(", i32") - if hex_common.need_pkt_need_commit(tag): - f.write(', i32') - if hex_common.need_PC(tag): - f.write(", i32") - if hex_common.helper_needs_next_PC(tag): - f.write(", i32") - if hex_common.need_slot(tag): - f.write(", i32") - if hex_common.need_part1(tag): - f.write(" , i32") - f.write(")\n") + arguments = ", ".join(declared) + f.write(f"DEF_HELPER_{len(declared) - 1}({tag}, {arguments})\n") def main(): @@ -195,6 +61,7 @@ def main(): if is_idef_parser_enabled: hex_common.read_idef_parser_enabled_file(sys.argv[5]) hex_common.calculate_attribs() + hex_common.init_registers() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() diff --git a/target/hexagon/gen_idef_parser_funcs.py b/target/hexagon/gen_idef_parser_funcs.py index f4518e653f5..550a48cb7be 100644 --- a/target/hexagon/gen_idef_parser_funcs.py +++ b/target/hexagon/gen_idef_parser_funcs.py @@ -46,6 +46,7 @@ def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.calculate_attribs() + hex_common.init_registers() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() @@ -132,22 +133,9 @@ def main(): arguments = [] for regtype, regid in regs: - prefix = "in " if hex_common.is_read(regid) else "" - - is_pair = hex_common.is_pair(regid) - is_single_old = hex_common.is_single(regid) and hex_common.is_old_val( - regtype, regid, tag - ) - is_single_new = hex_common.is_single(regid) and hex_common.is_new_val( - regtype, regid, tag - ) - - if is_pair or is_single_old: - arguments.append(f"{prefix}{regtype}{regid}V") - elif is_single_new: - arguments.append(f"{prefix}{regtype}{regid}N") - else: - hex_common.bad_register(regtype, regid) + reg = hex_common.get_register(tag, regtype, regid) + prefix = "in " if reg.is_read() else "" + arguments.append(f"{prefix}{reg.reg_tcg()}") for immlett, bits, immshift in imms: arguments.append(hex_common.imm_name(immlett)) diff --git a/target/hexagon/gen_op_regs.py b/target/hexagon/gen_op_regs.py index a8a7712129c..7b7b33895ab 100755 --- a/target/hexagon/gen_op_regs.py +++ b/target/hexagon/gen_op_regs.py @@ -70,6 +70,7 @@ def strip_reg_prefix(x): def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) + hex_common.init_registers() tagregs = hex_common.get_tagregs(full=True) tagimms = hex_common.get_tagimms() @@ -80,11 +81,12 @@ def main(): wregs = [] regids = "" for regtype, regid, _, numregs in regs: - if hex_common.is_read(regid): + reg = hex_common.get_register(tag, regtype, regid) + if reg.is_read(): if regid[0] not in regids: regids += regid[0] rregs.append(regtype + regid + numregs) - if hex_common.is_written(regid): + if reg.is_written(): wregs.append(regtype + regid + numregs) if regid[0] not in regids: regids += regid[0] diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h index d992059fce8..1c4391b4156 100644 --- a/target/hexagon/gen_tcg.h +++ b/target/hexagon/gen_tcg.h @@ -68,15 +68,14 @@ do { \ TCGv tcgv_siV = tcg_constant_tl(siV); \ tcg_gen_mov_tl(EA, RxV); \ - gen_helper_fcircadd(RxV, RxV, tcgv_siV, MuV, \ - hex_gpr[HEX_REG_CS0 + MuN]); \ + gen_helper_fcircadd(RxV, RxV, tcgv_siV, MuV, CS); \ } while (0) #define GET_EA_pcr(SHIFT) \ do { \ TCGv ireg = tcg_temp_new(); \ tcg_gen_mov_tl(EA, RxV); \ gen_read_ireg(ireg, MuV, (SHIFT)); \ - gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ + gen_helper_fcircadd(RxV, RxV, ireg, MuV, CS); \ } while (0) /* Instructions with multiple definitions */ @@ -113,7 +112,7 @@ TCGv ireg = tcg_temp_new(); \ tcg_gen_mov_tl(EA, RxV); \ gen_read_ireg(ireg, MuV, SHIFT); \ - gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ + gen_helper_fcircadd(RxV, RxV, ireg, MuV, CS); \ LOAD; \ } while (0) @@ -427,7 +426,7 @@ TCGv BYTE G_GNUC_UNUSED = tcg_temp_new(); \ tcg_gen_mov_tl(EA, RxV); \ gen_read_ireg(ireg, MuV, SHIFT); \ - gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ + gen_helper_fcircadd(RxV, RxV, ireg, MuV, CS); \ STORE; \ } while (0) diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py index f5246cee6df..3d8e3cb6a26 100755 --- a/target/hexagon/gen_tcg_funcs.py +++ b/target/hexagon/gen_tcg_funcs.py @@ -23,461 +23,13 @@ import hex_common -## -## Helpers for gen_tcg_func -## -def gen_decl_ea_tcg(f, tag): - f.write(" TCGv EA G_GNUC_UNUSED = tcg_temp_new();\n") - - -def genptr_decl_pair_writable(f, tag, regtype, regid, regno): - regN = f"{regtype}{regid}N" - if regtype == "R": - f.write(f" const int {regN} = insn->regno[{regno}];\n") - elif regtype == "C": - f.write(f" const int {regN} = insn->regno[{regno}] + HEX_REG_SA0;\n") - else: - hex_common.bad_register(regtype, regid) - f.write(f" TCGv_i64 {regtype}{regid}V = " f"get_result_gpr_pair(ctx, {regN});\n") - - -def genptr_decl_writable(f, tag, regtype, regid, regno): - regN = f"{regtype}{regid}N" - if regtype == "R": - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" TCGv {regtype}{regid}V = get_result_gpr(ctx, {regN});\n") - elif regtype == "C": - f.write(f" const int {regN} = insn->regno[{regno}] + HEX_REG_SA0;\n") - f.write(f" TCGv {regtype}{regid}V = get_result_gpr(ctx, {regN});\n") - elif regtype == "P": - f.write(f" const int {regN} = insn->regno[{regno}];\n") - f.write(f" TCGv {regtype}{regid}V = tcg_temp_new();\n") - else: - hex_common.bad_register(regtype, regid) - - -def genptr_decl(f, tag, regtype, regid, regno): - regN = f"{regtype}{regid}N" - if regtype == "R": - if regid in {"ss", "tt"}: - f.write(f" TCGv_i64 {regtype}{regid}V = tcg_temp_new_i64();\n") - f.write(f" const int {regN} = insn->regno[{regno}];\n") - elif regid in {"dd", "ee", "xx", "yy"}: - genptr_decl_pair_writable(f, tag, regtype, regid, regno) - elif regid in {"s", "t", "u", "v"}: - f.write( - f" TCGv {regtype}{regid}V = " f"hex_gpr[insn->regno[{regno}]];\n" - ) - elif regid in {"d", "e", "x", "y"}: - genptr_decl_writable(f, tag, regtype, regid, regno) - else: - hex_common.bad_register(regtype, regid) - elif regtype == "P": - if regid in {"s", "t", "u", "v"}: - f.write( - f" TCGv {regtype}{regid}V = " f"hex_pred[insn->regno[{regno}]];\n" - ) - elif regid in {"d", "e", "x"}: - genptr_decl_writable(f, tag, regtype, regid, regno) - else: - hex_common.bad_register(regtype, regid) - elif regtype == "C": - if regid == "ss": - f.write(f" TCGv_i64 {regtype}{regid}V = " f"tcg_temp_new_i64();\n") - f.write(f" const int {regN} = insn->regno[{regno}] + " "HEX_REG_SA0;\n") - elif regid == "dd": - genptr_decl_pair_writable(f, tag, regtype, regid, regno) - elif regid == "s": - f.write(f" TCGv {regtype}{regid}V = tcg_temp_new();\n") - f.write( - f" const int {regtype}{regid}N = insn->regno[{regno}] + " - "HEX_REG_SA0;\n" - ) - elif regid == "d": - genptr_decl_writable(f, tag, regtype, regid, regno) - else: - hex_common.bad_register(regtype, regid) - elif regtype == "M": - if regid == "u": - f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") - f.write( - f" TCGv {regtype}{regid}V = hex_gpr[{regtype}{regid}N + " - "HEX_REG_M0];\n" - ) - else: - hex_common.bad_register(regtype, regid) - elif regtype == "V": - if regid in {"dd"}: - f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") - f.write(f" const intptr_t {regtype}{regid}V_off =\n") - if hex_common.is_tmp_result(tag): - f.write( - f" ctx_tmp_vreg_off(ctx, {regtype}{regid}N, 2, " "true);\n" - ) - else: - f.write(f" ctx_future_vreg_off(ctx, {regtype}{regid}N,") - f.write(" 2, true);\n") - if not hex_common.skip_qemu_helper(tag): - f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") - f.write( - f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, " - f"{regtype}{regid}V_off);\n" - ) - elif regid in {"uu", "vv", "xx"}: - f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") - f.write(f" const intptr_t {regtype}{regid}V_off =\n") - f.write(f" offsetof(CPUHexagonState, {regtype}{regid}V);\n") - if not hex_common.skip_qemu_helper(tag): - f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") - f.write( - f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, " - f"{regtype}{regid}V_off);\n" - ) - elif regid in {"s", "u", "v", "w"}: - f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") - f.write(f" const intptr_t {regtype}{regid}V_off =\n") - f.write(f" vreg_src_off(ctx, {regtype}{regid}N);\n") - if not hex_common.skip_qemu_helper(tag): - f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") - elif regid in {"d", "x", "y"}: - f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") - f.write(f" const intptr_t {regtype}{regid}V_off =\n") - if regid == "y": - f.write(" offsetof(CPUHexagonState, vtmp);\n") - elif hex_common.is_tmp_result(tag): - f.write( - f" ctx_tmp_vreg_off(ctx, {regtype}{regid}N, 1, " "true);\n" - ) - else: - f.write(f" ctx_future_vreg_off(ctx, {regtype}{regid}N,") - f.write(" 1, true);\n") - - if not hex_common.skip_qemu_helper(tag): - f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") - f.write( - f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, " - f"{regtype}{regid}V_off);\n" - ) - else: - hex_common.bad_register(regtype, regid) - elif regtype == "Q": - if regid in {"d", "e", "x"}: - f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") - f.write(f" const intptr_t {regtype}{regid}V_off =\n") - f.write(f" get_result_qreg(ctx, {regtype}{regid}N);\n") - if not hex_common.skip_qemu_helper(tag): - f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") - f.write( - f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, " - f"{regtype}{regid}V_off);\n" - ) - elif regid in {"s", "t", "u", "v"}: - f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") - f.write(f" const intptr_t {regtype}{regid}V_off =\n") - f.write( - f" offsetof(CPUHexagonState, " f"QRegs[{regtype}{regid}N]);\n" - ) - if not hex_common.skip_qemu_helper(tag): - f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def genptr_decl_new(f, tag, regtype, regid, regno): - if regtype == "N": - if regid in {"s", "t"}: - f.write( - f" TCGv {regtype}{regid}N = " - f"get_result_gpr(ctx, insn->regno[{regno}]);\n" - ) - else: - hex_common.bad_register(regtype, regid) - elif regtype == "P": - if regid in {"t", "u", "v"}: - f.write( - f" TCGv {regtype}{regid}N = " - f"ctx->new_pred_value[insn->regno[{regno}]];\n" - ) - else: - hex_common.bad_register(regtype, regid) - elif regtype == "O": - if regid == "s": - f.write( - f" const intptr_t {regtype}{regid}N_num = " - f"insn->regno[{regno}];\n" - ) - if hex_common.skip_qemu_helper(tag): - f.write(f" const intptr_t {regtype}{regid}N_off =\n") - f.write(" ctx_future_vreg_off(ctx, " f"{regtype}{regid}N_num,") - f.write(" 1, true);\n") - else: - f.write( - f" TCGv {regtype}{regid}N = " - f"tcg_constant_tl({regtype}{regid}N_num);\n" - ) - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def genptr_decl_opn(f, tag, regtype, regid, i): - if hex_common.is_pair(regid): - genptr_decl(f, tag, regtype, regid, i) - elif hex_common.is_single(regid): - if hex_common.is_old_val(regtype, regid, tag): - genptr_decl(f, tag, regtype, regid, i) - elif hex_common.is_new_val(regtype, regid, tag): - genptr_decl_new(f, tag, regtype, regid, i) - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def genptr_decl_imm(f, immlett): - if immlett.isupper(): - i = 1 - else: - i = 0 - f.write(f" int {hex_common.imm_name(immlett)} = insn->immed[{i}];\n") - - -def genptr_src_read(f, tag, regtype, regid): - if regtype == "R": - if regid in {"ss", "tt", "xx", "yy"}: - f.write( - f" tcg_gen_concat_i32_i64({regtype}{regid}V, " - f"hex_gpr[{regtype}{regid}N],\n" - ) - f.write( - f" hex_gpr[{regtype}" - f"{regid}N + 1]);\n" - ) - elif regid in {"x", "y"}: - ## For read/write registers, we need to get the original value into - ## the result TCGv. For conditional instructions, this is done in - ## gen_start_packet. For unconditional instructions, we do it here. - if "A_CONDEXEC" not in hex_common.attribdict[tag]: - f.write( - f" tcg_gen_mov_tl({regtype}{regid}V, " - f"hex_gpr[{regtype}{regid}N]);\n" - ) - elif regid not in {"s", "t", "u", "v"}: - hex_common.bad_register(regtype, regid) - elif regtype == "P": - if regid == "x": - f.write( - f" tcg_gen_mov_tl({regtype}{regid}V, " - f"hex_pred[{regtype}{regid}N]);\n" - ) - elif regid not in {"s", "t", "u", "v"}: - hex_common.bad_register(regtype, regid) - elif regtype == "C": - if regid == "ss": - f.write( - f" gen_read_ctrl_reg_pair(ctx, {regtype}{regid}N, " - f"{regtype}{regid}V);\n" - ) - elif regid == "s": - f.write( - f" gen_read_ctrl_reg(ctx, {regtype}{regid}N, " - f"{regtype}{regid}V);\n" - ) - else: - hex_common.bad_register(regtype, regid) - elif regtype == "M": - if regid != "u": - hex_common.bad_register(regtype, regid) - elif regtype == "V": - if regid in {"uu", "vv", "xx"}: - f.write(f" tcg_gen_gvec_mov(MO_64, {regtype}{regid}V_off,\n") - f.write(f" vreg_src_off(ctx, {regtype}{regid}N),\n") - f.write(" sizeof(MMVector), sizeof(MMVector));\n") - f.write(" tcg_gen_gvec_mov(MO_64,\n") - f.write(f" {regtype}{regid}V_off + sizeof(MMVector),\n") - f.write(f" vreg_src_off(ctx, {regtype}{regid}N ^ 1),\n") - f.write(" sizeof(MMVector), sizeof(MMVector));\n") - elif regid in {"s", "u", "v", "w"}: - if not hex_common.skip_qemu_helper(tag): - f.write( - f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, " - f"{regtype}{regid}V_off);\n" - ) - elif regid in {"x", "y"}: - f.write(f" tcg_gen_gvec_mov(MO_64, {regtype}{regid}V_off,\n") - f.write(f" vreg_src_off(ctx, {regtype}{regid}N),\n") - f.write(" sizeof(MMVector), sizeof(MMVector));\n") - else: - hex_common.bad_register(regtype, regid) - elif regtype == "Q": - if regid in {"s", "t", "u", "v"}: - if not hex_common.skip_qemu_helper(tag): - f.write( - f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, " - f"{regtype}{regid}V_off);\n" - ) - elif regid in {"x"}: - f.write(f" tcg_gen_gvec_mov(MO_64, {regtype}{regid}V_off,\n") - f.write( - f" offsetof(CPUHexagonState, " f"QRegs[{regtype}{regid}N]),\n" - ) - f.write(" sizeof(MMQReg), sizeof(MMQReg));\n") - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def genptr_src_read_new(f, regtype, regid): - if regtype == "N": - if regid not in {"s", "t"}: - hex_common.bad_register(regtype, regid) - elif regtype == "P": - if regid not in {"t", "u", "v"}: - hex_common.bad_register(regtype, regid) - elif regtype == "O": - if regid != "s": - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def genptr_src_read_opn(f, regtype, regid, tag): - if hex_common.is_pair(regid): - genptr_src_read(f, tag, regtype, regid) - elif hex_common.is_single(regid): - if hex_common.is_old_val(regtype, regid, tag): - genptr_src_read(f, tag, regtype, regid) - elif hex_common.is_new_val(regtype, regid, tag): - genptr_src_read_new(f, regtype, regid) - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def gen_helper_call_opn(f, tag, regtype, regid, i): - if i > 0: - f.write(", ") - if hex_common.is_pair(regid): - f.write(f"{regtype}{regid}V") - elif hex_common.is_single(regid): - if hex_common.is_old_val(regtype, regid, tag): - f.write(f"{regtype}{regid}V") - elif hex_common.is_new_val(regtype, regid, tag): - f.write(f"{regtype}{regid}N") - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def gen_helper_decl_imm(f, immlett): - f.write( - f" TCGv tcgv_{hex_common.imm_name(immlett)} = " - f"tcg_constant_tl({hex_common.imm_name(immlett)});\n" - ) - - -def gen_helper_call_imm(f, immlett): - f.write(f", tcgv_{hex_common.imm_name(immlett)}") - - -def genptr_dst_write_pair(f, tag, regtype, regid): - f.write(f" gen_log_reg_write_pair(ctx, {regtype}{regid}N, " - f"{regtype}{regid}V);\n") - - -def genptr_dst_write(f, tag, regtype, regid): - if regtype == "R": - if regid in {"dd", "xx", "yy"}: - genptr_dst_write_pair(f, tag, regtype, regid) - elif regid in {"d", "e", "x", "y"}: - f.write( - f" gen_log_reg_write(ctx, {regtype}{regid}N, " - f"{regtype}{regid}V);\n" - ) - else: - hex_common.bad_register(regtype, regid) - elif regtype == "P": - if regid in {"d", "e", "x"}: - f.write( - f" gen_log_pred_write(ctx, {regtype}{regid}N, " - f"{regtype}{regid}V);\n" - ) - else: - hex_common.bad_register(regtype, regid) - elif regtype == "C": - if regid == "dd": - f.write( - f" gen_write_ctrl_reg_pair(ctx, {regtype}{regid}N, " - f"{regtype}{regid}V);\n" - ) - elif regid == "d": - f.write( - f" gen_write_ctrl_reg(ctx, {regtype}{regid}N, " - f"{regtype}{regid}V);\n" - ) - else: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def genptr_dst_write_ext(f, tag, regtype, regid, newv="EXT_DFL"): - if regtype == "V": - if regid in {"xx"}: - f.write( - f" gen_log_vreg_write_pair(ctx, {regtype}{regid}V_off, " - f"{regtype}{regid}N, {newv});\n" - ) - elif regid in {"y"}: - f.write( - f" gen_log_vreg_write(ctx, {regtype}{regid}V_off, " - f"{regtype}{regid}N, {newv});\n" - ) - elif regid not in {"dd", "d", "x"}: - hex_common.bad_register(regtype, regid) - elif regtype == "Q": - if regid not in {"d", "e", "x"}: - hex_common.bad_register(regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - -def genptr_dst_write_opn(f, regtype, regid, tag): - if hex_common.is_pair(regid): - if hex_common.is_hvx_reg(regtype): - if hex_common.is_tmp_result(tag): - genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP") - else: - genptr_dst_write_ext(f, tag, regtype, regid) - else: - genptr_dst_write(f, tag, regtype, regid) - elif hex_common.is_single(regid): - if hex_common.is_hvx_reg(regtype): - if hex_common.is_new_result(tag): - genptr_dst_write_ext(f, tag, regtype, regid, "EXT_NEW") - elif hex_common.is_tmp_result(tag): - genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP") - else: - genptr_dst_write_ext(f, tag, regtype, regid, "EXT_DFL") - else: - genptr_dst_write(f, tag, regtype, regid) - else: - hex_common.bad_register(regtype, regid) - - ## ## Generate the TCG code to call the helper ## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;} ## We produce: ## static void generate_A2_add(DisasContext *ctx) ## { -## Insn *insn __attribute__((unused)) = ctx->insn; +## Insn *insn G_GNUC_UNUSED = ctx->insn; ## const int RdN = insn->regno[0]; ## TCGv RdV = get_result_gpr(ctx, RdN); ## TCGv RsV = hex_gpr[insn->regno[1]]; @@ -496,44 +48,27 @@ def gen_tcg_func(f, tag, regs, imms): f.write(f"static void generate_{tag}(DisasContext *ctx)\n") f.write("{\n") - f.write(" Insn *insn __attribute__((unused)) = ctx->insn;\n") + f.write(" Insn *insn G_GNUC_UNUSED = ctx->insn;\n") if hex_common.need_ea(tag): - gen_decl_ea_tcg(f, tag) - i = 0 + f.write(" TCGv EA G_GNUC_UNUSED = tcg_temp_new();\n") + ## Declare all the operands (regs and immediates) + i = 0 for regtype, regid in regs: - genptr_decl_opn(f, tag, regtype, regid, i) + reg = hex_common.get_register(tag, regtype, regid) + reg.decl_tcg(f, tag, i) i += 1 for immlett, bits, immshift in imms: - genptr_decl_imm(f, immlett) - - if "A_PRIV" in hex_common.attribdict[tag]: - f.write(" fCHECKFORPRIV();\n") - if "A_GUEST" in hex_common.attribdict[tag]: - f.write(" fCHECKFORGUEST();\n") - - ## Read all the inputs - for regtype, regid in regs: - if hex_common.is_read(regid): - genptr_src_read_opn(f, regtype, regid, tag) + i = 1 if immlett.isupper() else 0 + f.write(f" int {hex_common.imm_name(immlett)} = insn->immed[{i}];\n") if hex_common.is_idef_parser_enabled(tag): declared = [] ## Handle registers for regtype, regid in regs: - if hex_common.is_pair(regid) or ( - hex_common.is_single(regid) - and hex_common.is_old_val(regtype, regid, tag) - ): - declared.append(f"{regtype}{regid}V") - if regtype == "M": - declared.append(f"{regtype}{regid}N") - elif hex_common.is_new_val(regtype, regid, tag): - declared.append(f"{regtype}{regid}N") - else: - hex_common.bad_register(regtype, regid) - + reg = hex_common.get_register(tag, regtype, regid) + reg.idef_arg(declared) ## Handle immediates for immlett, bits, immshift in imms: declared.append(hex_common.imm_name(immlett)) @@ -545,76 +80,22 @@ def gen_tcg_func(f, tag, regs, imms): f.write(f" fGEN_TCG_{tag}({hex_common.semdict[tag]});\n") else: ## Generate the call to the helper - for immlett, bits, immshift in imms: - gen_helper_decl_imm(f, immlett) - if hex_common.need_pkt_has_multi_cof(tag): - f.write(" TCGv pkt_has_multi_cof = ") - f.write("tcg_constant_tl(ctx->pkt->pkt_has_multi_cof);\n") - if hex_common.need_pkt_need_commit(tag): - f.write(" TCGv pkt_need_commit = ") - f.write("tcg_constant_tl(ctx->need_commit);\n") - if hex_common.need_part1(tag): - f.write(" TCGv part1 = tcg_constant_tl(insn->part1);\n") - if hex_common.need_slot(tag): - f.write(" TCGv slotval = gen_slotval(ctx);\n") - if hex_common.need_PC(tag): - f.write(" TCGv PC = tcg_constant_tl(ctx->pkt->pc);\n") - if hex_common.helper_needs_next_PC(tag): - f.write(" TCGv next_PC = tcg_constant_tl(ctx->next_PC);\n") - f.write(f" gen_helper_{tag}(") - i = 0 - ## If there is a scalar result, it is the return type - for regtype, regid in regs: - if hex_common.is_written(regid): - if hex_common.is_hvx_reg(regtype): - continue - gen_helper_call_opn(f, tag, regtype, regid, i) - i += 1 - if i > 0: - f.write(", ") - f.write("tcg_env") - i = 1 - ## For conditional instructions, we pass in the destination register - if "A_CONDEXEC" in hex_common.attribdict[tag]: - for regtype, regid in regs: - if hex_common.is_writeonly(regid) and not hex_common.is_hvx_reg( - regtype - ): - gen_helper_call_opn(f, tag, regtype, regid, i) - i += 1 - for regtype, regid in regs: - if hex_common.is_written(regid): - if not hex_common.is_hvx_reg(regtype): - continue - gen_helper_call_opn(f, tag, regtype, regid, i) - i += 1 - for regtype, regid in regs: - if hex_common.is_read(regid): - if hex_common.is_hvx_reg(regtype) and hex_common.is_readwrite(regid): - continue - gen_helper_call_opn(f, tag, regtype, regid, i) - i += 1 - for immlett, bits, immshift in imms: - gen_helper_call_imm(f, immlett) + declared = [] + ret_type = hex_common.helper_ret_type(tag, regs).call_arg + if ret_type != "void": + declared.append(ret_type) + + for arg in hex_common.helper_args(tag, regs, imms): + declared.append(arg.call_arg) - if hex_common.need_pkt_has_multi_cof(tag): - f.write(", pkt_has_multi_cof") - if hex_common.need_pkt_need_commit(tag): - f.write(", pkt_need_commit") - if hex_common.need_PC(tag): - f.write(", PC") - if hex_common.helper_needs_next_PC(tag): - f.write(", next_PC") - if hex_common.need_slot(tag): - f.write(", slotval") - if hex_common.need_part1(tag): - f.write(", part1") - f.write(");\n") + arguments = ", ".join(declared) + f.write(f" gen_helper_{tag}({arguments});\n") ## Write all the outputs for regtype, regid in regs: - if hex_common.is_written(regid): - genptr_dst_write_opn(f, regtype, regid, tag) + reg = hex_common.get_register(tag, regtype, regid) + if reg.is_written(): + reg.log_write(f, tag) f.write("}\n\n") @@ -632,6 +113,7 @@ def main(): hex_common.read_overrides_file(sys.argv[3]) hex_common.read_overrides_file(sys.argv[4]) hex_common.calculate_attribs() + hex_common.init_registers() ## Whether or not idef-parser is enabled is ## determined by the number of arguments to ## this script: diff --git a/target/hexagon/gen_trans_funcs.py b/target/hexagon/gen_trans_funcs.py new file mode 100755 index 00000000000..53e844a44be --- /dev/null +++ b/target/hexagon/gen_trans_funcs.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 + +## +## Copyright (c) 2024 Taylor Simpson +## +## This program is free software; you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation; either version 2 of the License, or +## (at your option) any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, see . +## + +import io +import re + +import sys +import textwrap +import iset +import hex_common + +encs = { + tag: "".join(reversed(iset.iset[tag]["enc"].replace(" ", ""))) + for tag in iset.tags + if iset.iset[tag]["enc"] != "MISSING ENCODING" +} + + +regre = re.compile(r"((?which_extended = {0 if letter.islower() else 1}; + """)) + +## +## Generate the QEMU decodetree trans_ function for each instruction +## For A2_add: Rd32=add(Rs32,Rt32) +## We produce: +## static bool trans_A2_add(DisasContext *ctx, arg_A2_add *args) +## { +## Insn *insn = ctx->insn; +## insn->opcode = A2_add; +## insn->regno[0] = args->Rd; +## insn->regno[1] = args->Rs; +## insn->regno[2] = args->Rt; +## return true; +## } +## +def gen_trans_funcs(f): + f.write(f"/* DO NOT MODIFY - This file is generated by {sys.argv[0]} */\n\n") + for tag in sorted(encs.keys(), key=iset.tags.index): + regs = ordered_unique(regre.findall(iset.iset[tag]["syntax"])) + imms = ordered_unique(immre.findall(iset.iset[tag]["syntax"])) + + f.write(textwrap.dedent(f"""\ + static bool trans_{tag}(DisasContext *ctx, arg_{tag} *args) + {open_curly} + Insn *insn = ctx->insn; + insn->opcode = {tag}; + """)) + + regno = 0 + for reg in regs: + reg_type = reg[0] + reg_id = reg[1] + f.write(code_fmt(f"""\ + insn->regno[{regno}] = args->{reg_type}{reg_id}; + """)) + regno += 1 + + if len(imms) != 0: + mark_which_imm_extended(f, tag) + + for imm in imms: + imm_type = imm[0] + imm_letter = "i" if imm_type.islower() else "I" + immno = 0 if imm_type.islower() else 1 + imm_shift = int(imm[2]) if imm[2] else 0 + if imm_shift: + f.write(code_fmt(f"""\ + insn->immed[{immno}] = + shift_left(ctx, args->{imm_type}{imm_letter}, + {imm_shift}, {immno}); + """)) + else: + f.write(code_fmt(f"""\ + insn->immed[{immno}] = args->{imm_type}{imm_letter}; + """)) + + f.write(textwrap.dedent(f"""\ + return true; + {close_curly} + """)) + + +if __name__ == "__main__": + hex_common.read_semantics_file(sys.argv[1]) + with open(sys.argv[2], "w") as f: + gen_trans_funcs(f) diff --git a/target/hexagon/hex_common.py b/target/hexagon/hex_common.py index 0da65d6dd6a..195620c7ecd 100755 --- a/target/hexagon/hex_common.py +++ b/target/hexagon/hex_common.py @@ -20,19 +20,19 @@ import sys import re import string +import textwrap behdict = {} # tag ->behavior semdict = {} # tag -> semantics attribdict = {} # tag -> attributes macros = {} # macro -> macro information... attribinfo = {} # Register information and misc +registers = {} # register -> register functions +new_registers = {} tags = [] # list of all tags overrides = {} # tags with helper overrides idef_parser_enabled = {} # tags enabled for idef-parser -def bad_register(regtype, regid): - raise Exception(f"Bad register parse: regtype '{regtype}' regid '{regid}'") - # We should do this as a hash for performance, # but to keep order let's keep it as a list. def uniquify(seq): @@ -91,10 +91,6 @@ def is_cond_call(tag): def calculate_attribs(): add_qemu_macro_attrib("fREAD_PC", "A_IMPLICIT_READS_PC") add_qemu_macro_attrib("fTRAP", "A_IMPLICIT_READS_PC") - add_qemu_macro_attrib("fWRITE_P0", "A_WRITES_PRED_REG") - add_qemu_macro_attrib("fWRITE_P1", "A_WRITES_PRED_REG") - add_qemu_macro_attrib("fWRITE_P2", "A_WRITES_PRED_REG") - add_qemu_macro_attrib("fWRITE_P3", "A_WRITES_PRED_REG") add_qemu_macro_attrib("fSET_OVERFLOW", "A_IMPLICIT_WRITES_USR") add_qemu_macro_attrib("fSET_LPCFG", "A_IMPLICIT_WRITES_USR") add_qemu_macro_attrib("fLOAD", "A_SCALAR_LOAD") @@ -119,13 +115,6 @@ def calculate_attribs(): continue macro = macros[macname] attribdict[tag] |= set(macro.attribs) - # Figure out which instructions write predicate registers - tagregs = get_tagregs() - for tag in tags: - regs = tagregs[tag] - for regtype, regid in regs: - if regtype == "P" and is_written(regid): - attribdict[tag].add("A_WRITES_PRED_REG") # Mark conditional jumps and calls # Not all instructions are properly marked with A_CONDEXEC for tag in tags: @@ -208,46 +197,6 @@ def get_tagimms(): return dict(zip(tags, list(map(compute_tag_immediates, tags)))) -def is_pair(regid): - return len(regid) == 2 - - -def is_single(regid): - return len(regid) == 1 - - -def is_written(regid): - return regid[0] in "dexy" - - -def is_writeonly(regid): - return regid[0] in "de" - - -def is_read(regid): - return regid[0] in "stuvwxy" - - -def is_readwrite(regid): - return regid[0] in "xy" - - -def is_scalar_reg(regtype): - return regtype in "RPC" - - -def is_hvx_reg(regtype): - return regtype in "VQ" - - -def is_old_val(regtype, regid, tag): - return regtype + regid + "V" in semdict[tag] - - -def is_new_val(regtype, regid, tag): - return regtype + regid + "N" in semdict[tag] - - def need_slot(tag): if ( "A_CVI_SCATTER" not in attribdict[tag] @@ -272,7 +221,7 @@ def need_PC(tag): return "A_IMPLICIT_READS_PC" in attribdict[tag] -def helper_needs_next_PC(tag): +def need_next_PC(tag): return "A_CALL" in attribdict[tag] @@ -283,26 +232,11 @@ def need_pkt_has_multi_cof(tag): def need_pkt_need_commit(tag): return 'A_IMPLICIT_WRITES_USR' in attribdict[tag] -def need_condexec_reg(tag, regs): - if "A_CONDEXEC" in attribdict[tag]: - for regtype, regid in regs: - if is_writeonly(regid) and not is_hvx_reg(regtype): - return True - return False - def skip_qemu_helper(tag): return tag in overrides.keys() -def is_tmp_result(tag): - return "A_CVI_TMP" in attribdict[tag] or "A_CVI_TMP_DST" in attribdict[tag] - - -def is_new_result(tag): - return "A_CVI_NEW" in attribdict[tag] - - def is_idef_parser_enabled(tag): return tag in idef_parser_enabled @@ -350,3 +284,850 @@ def read_idef_parser_enabled_file(name): with open(name, "r") as idef_parser_enabled_file: lines = idef_parser_enabled_file.read().strip().split("\n") idef_parser_enabled = set(lines) + + +def is_predicated(tag): + return "A_CONDEXEC" in attribdict[tag] + + +def code_fmt(txt): + return textwrap.indent(textwrap.dedent(txt), " ") + + +def hvx_newv(tag): + if "A_CVI_NEW" in attribdict[tag]: + return "EXT_NEW" + elif "A_CVI_TMP" in attribdict[tag] or "A_CVI_TMP_DST" in attribdict[tag]: + return "EXT_TMP" + else: + return "EXT_DFL" + +def vreg_offset_func(tag): + if "A_CVI_TMP" in attribdict[tag] or "A_CVI_TMP_DST" in attribdict[tag]: + return "ctx_tmp_vreg_off" + else: + return "ctx_future_vreg_off" + +class HelperArg: + def __init__(self, proto_arg, call_arg, func_arg): + self.proto_arg = proto_arg + self.call_arg = call_arg + self.func_arg = func_arg + +class Register: + def __init__(self, regtype, regid): + self.regtype = regtype + self.regid = regid + self.reg_num = f"{regtype}{regid}N" + def decl_reg_num(self, f, regno): + f.write(code_fmt(f"""\ + const int {self.reg_num} = insn->regno[{regno}]; + """)) + def idef_arg(self, declared): + declared.append(self.reg_tcg()) + def helper_arg(self): + return HelperArg( + self.helper_proto_type(), + self.reg_tcg(), + f"{self.helper_arg_type()} {self.helper_arg_name()}" + ) + +# +# Every register is either Single or Pair or Hvx +# +class Scalar: + def is_scalar_reg(self): + return True + def is_hvx_reg(self): + return False + def helper_arg_name(self): + return self.reg_tcg() + +class Single(Scalar): + def helper_proto_type(self): + return "s32" + def helper_arg_type(self): + return "int32_t" + +class Pair(Scalar): + def helper_proto_type(self): + return "s64" + def helper_arg_type(self): + return "int64_t" + +class Hvx: + def is_scalar_reg(self): + return False + def is_hvx_reg(self): + return True + def hvx_off(self): + return f"{self.reg_tcg()}_off" + def helper_proto_type(self): + return "ptr" + def helper_arg_type(self): + return "void *" + def helper_arg_name(self): + return f"{self.reg_tcg()}_void" + +# +# Every register is either Dest or OldSource or NewSource or ReadWrite +# +class Dest: + def reg_tcg(self): + return f"{self.regtype}{self.regid}V" + def is_written(self): + return True + def is_writeonly(self): + return True + def is_read(self): + return False + def is_readwrite(self): + return False + +class Source: + def is_written(self): + return False + def is_writeonly(self): + return False + def is_read(self): + return True + def is_readwrite(self): + return False + +class OldSource(Source): + def reg_tcg(self): + return f"{self.regtype}{self.regid}V" + +class NewSource(Source): + def reg_tcg(self): + return f"{self.regtype}{self.regid}N" + +class ReadWrite: + def reg_tcg(self): + return f"{self.regtype}{self.regid}V" + def is_written(self): + return True + def is_writeonly(self): + return False + def is_read(self): + return True + def is_readwrite(self): + return True + +class GprDest(Register, Single, Dest): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = get_result_gpr(ctx, {self.reg_num}); + """)) + def log_write(self, f, tag): + f.write(code_fmt(f"""\ + gen_log_reg_write(ctx, {self.reg_num}, {self.reg_tcg()}); + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_reg_write(ctx, {self.reg_num}, {predicated}); + """)) + +class GprSource(Register, Single, OldSource): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = hex_gpr[{self.reg_num}]; + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_reg_read(ctx, {self.reg_num}); + """)) + +class GprNewSource(Register, Single, NewSource): + def decl_tcg(self, f, tag, regno): + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = get_result_gpr(ctx, insn->regno[{regno}]); + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_reg_read(ctx, {self.reg_num}); + """)) + +class GprReadWrite(Register, Single, ReadWrite): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = get_result_gpr(ctx, {self.reg_num}); + """)) + ## For read/write registers, we need to get the original value into + ## the result TCGv. For predicated instructions, this is done in + ## gen_start_packet. For un-predicated instructions, we do it here. + if not is_predicated(tag): + f.write(code_fmt(f"""\ + tcg_gen_mov_tl({self.reg_tcg()}, hex_gpr[{self.reg_num}]); + """)) + def log_write(self, f, tag): + f.write(code_fmt(f"""\ + gen_log_reg_write(ctx, {self.reg_num}, {self.reg_tcg()}); + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_reg_write(ctx, {self.reg_num}, {predicated}); + """)) + +class ControlDest(Register, Single, Dest): + def decl_reg_num(self, f, regno): + f.write(code_fmt(f"""\ + const int {self.reg_num} = insn->regno[{regno}] + HEX_REG_SA0; + """)) + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = get_result_gpr(ctx, {self.reg_num}); + """)) + def log_write(self, f, tag): + f.write(code_fmt(f"""\ + gen_write_ctrl_reg(ctx, {self.reg_num}, {self.reg_tcg()}); + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_reg_write(ctx, {self.reg_num}, {predicated}); + """)) + +class ControlSource(Register, Single, OldSource): + def decl_reg_num(self, f, regno): + f.write(code_fmt(f"""\ + const int {self.reg_num} = insn->regno[{regno}] + HEX_REG_SA0; + """)) + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno); + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = tcg_temp_new(); + gen_read_ctrl_reg(ctx, {self.reg_num}, {self.reg_tcg()}); + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_reg_read(ctx, {self.reg_num}); + """)) + +class ModifierSource(Register, Single, OldSource): + def decl_reg_num(self, f, regno): + f.write(code_fmt(f"""\ + const int {self.reg_num} = insn->regno[{regno}] + HEX_REG_M0; + """)) + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = hex_gpr[{self.reg_num}]; + TCGv CS G_GNUC_UNUSED = + hex_gpr[{self.reg_num} - HEX_REG_M0 + HEX_REG_CS0]; + """)) + def idef_arg(self, declared): + declared.append(self.reg_tcg()) + declared.append("CS") + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_reg_read(ctx, {self.reg_num}); + """)) + +class PredDest(Register, Single, Dest): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = tcg_temp_new(); + """)) + def log_write(self, f, tag): + f.write(code_fmt(f"""\ + gen_log_pred_write(ctx, {self.reg_num}, {self.reg_tcg()}); + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_pred_write(ctx, {self.reg_num}); + """)) + +class PredSource(Register, Single, OldSource): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = hex_pred[{self.reg_num}]; + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_pred_read(ctx, {self.reg_num}); + """)) + +class PredNewSource(Register, Single, NewSource): + def decl_tcg(self, f, tag, regno): + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = get_result_pred(ctx, insn->regno[{regno}]); + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_pred_read(ctx, {self.reg_num}); + """)) + +class PredReadWrite(Register, Single, ReadWrite): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv {self.reg_tcg()} = tcg_temp_new(); + tcg_gen_mov_tl({self.reg_tcg()}, hex_pred[{self.reg_num}]); + """)) + def log_write(self, f, tag): + f.write(code_fmt(f"""\ + gen_log_pred_write(ctx, {self.reg_num}, {self.reg_tcg()}); + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_pred_write(ctx, {self.reg_num}); + """)) + +class PairDest(Register, Pair, Dest): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv_i64 {self.reg_tcg()} = + get_result_gpr_pair(ctx, {self.reg_num}); + """)) + def log_write(self, f, tag): + f.write(code_fmt(f"""\ + gen_log_reg_write_pair(ctx, {self.reg_num}, {self.reg_tcg()}); + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_reg_write_pair(ctx, {self.reg_num}, {predicated}); + """)) + +class PairSource(Register, Pair, OldSource): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv_i64 {self.reg_tcg()} = tcg_temp_new_i64(); + tcg_gen_concat_i32_i64({self.reg_tcg()}, + hex_gpr[{self.reg_num}], + hex_gpr[{self.reg_num} + 1]); + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_reg_read_pair(ctx, {self.reg_num}); + """)) + +class PairReadWrite(Register, Pair, ReadWrite): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv_i64 {self.reg_tcg()} = + get_result_gpr_pair(ctx, {self.reg_num}); + tcg_gen_concat_i32_i64({self.reg_tcg()}, + hex_gpr[{self.reg_num}], + hex_gpr[{self.reg_num} + 1]); + """)) + def log_write(self, f, tag): + f.write(code_fmt(f"""\ + gen_log_reg_write_pair(ctx, {self.reg_num}, {self.reg_tcg()}); + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_reg_write_pair(ctx, {self.reg_num}, {predicated}); + """)) + +class ControlPairDest(Register, Pair, Dest): + def decl_reg_num(self, f, regno): + f.write(code_fmt(f"""\ + const int {self.reg_num} = insn->regno[{regno}] + HEX_REG_SA0; + """)) + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv_i64 {self.reg_tcg()} = + get_result_gpr_pair(ctx, {self.reg_num}); + """)) + def log_write(self, f, tag): + f.write(code_fmt(f"""\ + gen_write_ctrl_reg_pair(ctx, {self.reg_num}, {self.reg_tcg()}); + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_reg_write_pair(ctx, {self.reg_num}, {predicated}); + """)) + +class ControlPairSource(Register, Pair, OldSource): + def decl_reg_num(self, f, regno): + f.write(code_fmt(f"""\ + const int {self.reg_num} = insn->regno[{regno}] + HEX_REG_SA0; + """)) + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + TCGv_i64 {self.reg_tcg()} = tcg_temp_new_i64(); + gen_read_ctrl_reg_pair(ctx, {self.reg_num}, {self.reg_tcg()}); + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_reg_read_pair(ctx, {self.reg_num}); + """)) + +class VRegDest(Register, Hvx, Dest): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = + {vreg_offset_func(tag)}(ctx, {self.reg_num}, 1, true); + """)) + if not skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + TCGv_ptr {self.reg_tcg()} = tcg_temp_new_ptr(); + tcg_gen_addi_ptr({self.reg_tcg()}, tcg_env, {self.hvx_off()}); + """)) + def log_write(self, f, tag): + pass + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMVector *)({self.helper_arg_name()}) */ + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + newv = hvx_newv(tag) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_vreg_write(ctx, {self.reg_num}, {newv}, {predicated}); + """)) + +class VRegSource(Register, Hvx, OldSource): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = vreg_src_off(ctx, {self.reg_num}); + """)) + if not skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + TCGv_ptr {self.reg_tcg()} = tcg_temp_new_ptr(); + tcg_gen_addi_ptr({self.reg_tcg()}, tcg_env, {self.hvx_off()}); + """)) + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMVector *)({self.helper_arg_name()}) */ + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_vreg_read(ctx, {self.reg_num}); + """)) + +class VRegNewSource(Register, Hvx, NewSource): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + if skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = + ctx_future_vreg_off(ctx, {self.reg_num}, 1, true); + """)) + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMVector *)({self.helper_arg_name()}) */ + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_vreg_read(ctx, {self.reg_num}); + """)) + +class VRegReadWrite(Register, Hvx, ReadWrite): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = + {vreg_offset_func(tag)}(ctx, {self.reg_num}, 1, true); + tcg_gen_gvec_mov(MO_64, {self.hvx_off()}, + vreg_src_off(ctx, {self.reg_num}), + sizeof(MMVector), sizeof(MMVector)); + """)) + if not skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + TCGv_ptr {self.reg_tcg()} = tcg_temp_new_ptr(); + tcg_gen_addi_ptr({self.reg_tcg()}, tcg_env, {self.hvx_off()}); + """)) + def log_write(self, f, tag): + pass + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMVector *)({self.helper_arg_name()}) */ + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + newv = hvx_newv(tag) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_vreg_write(ctx, {self.reg_num}, {newv}, {predicated}); + """)) + +class VRegTmp(Register, Hvx, ReadWrite): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = offsetof(CPUHexagonState, vtmp); + """)) + if not skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + TCGv_ptr {self.reg_tcg()} = tcg_temp_new_ptr(); + tcg_gen_addi_ptr({self.reg_tcg()}, tcg_env, {self.hvx_off()}); + tcg_gen_gvec_mov(MO_64, {self.hvx_off()}, + vreg_src_off(ctx, {self.reg_num}), + sizeof(MMVector), sizeof(MMVector)); + """)) + def log_write(self, f, tag): + f.write(code_fmt(f"""\ + gen_log_vreg_write(ctx, {self.hvx_off()}, {self.reg_num}, + {hvx_newv(tag)}); + """)) + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMVector *)({self.helper_arg_name()}) */ + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + newv = hvx_newv(tag) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_vreg_write(ctx, {self.reg_num}, {newv}, {predicated}); + """)) + +class VRegPairDest(Register, Hvx, Dest): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = + {vreg_offset_func(tag)}(ctx, {self.reg_num}, 2, true); + """)) + if not skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + TCGv_ptr {self.reg_tcg()} = tcg_temp_new_ptr(); + tcg_gen_addi_ptr({self.reg_tcg()}, tcg_env, {self.hvx_off()}); + """)) + def log_write(self, f, tag): + pass + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMVectorPair *)({self.helper_arg_name()}) */ + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + newv = hvx_newv(tag) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_vreg_write_pair(ctx, {self.reg_num}, {newv}, {predicated}); + """)) + +class VRegPairSource(Register, Hvx, OldSource): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = + offsetof(CPUHexagonState, {self.reg_tcg()}); + tcg_gen_gvec_mov(MO_64, {self.hvx_off()}, + vreg_src_off(ctx, {self.reg_num}), + sizeof(MMVector), sizeof(MMVector)); + tcg_gen_gvec_mov(MO_64, {self.hvx_off()} + sizeof(MMVector), + vreg_src_off(ctx, {self.reg_num} ^ 1), + sizeof(MMVector), sizeof(MMVector)); + """)) + if not skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + TCGv_ptr {self.reg_tcg()} = tcg_temp_new_ptr(); + tcg_gen_addi_ptr({self.reg_tcg()}, tcg_env, {self.hvx_off()}); + """)) + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMVectorPair *)({self.helper_arg_name()}) */ + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_vreg_read_pair(ctx, {self.reg_num}); + """)) + +class VRegPairReadWrite(Register, Hvx, ReadWrite): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = + offsetof(CPUHexagonState, {self.reg_tcg()}); + tcg_gen_gvec_mov(MO_64, {self.hvx_off()}, + vreg_src_off(ctx, {self.reg_num}), + sizeof(MMVector), sizeof(MMVector)); + tcg_gen_gvec_mov(MO_64, {self.hvx_off()} + sizeof(MMVector), + vreg_src_off(ctx, {self.reg_num} ^ 1), + sizeof(MMVector), sizeof(MMVector)); + """)) + if not skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + TCGv_ptr {self.reg_tcg()} = tcg_temp_new_ptr(); + tcg_gen_addi_ptr({self.reg_tcg()}, tcg_env, {self.hvx_off()}); + """)) + def log_write(self, f, tag): + f.write(code_fmt(f"""\ + gen_log_vreg_write_pair(ctx, {self.hvx_off()}, {self.reg_num}, + {hvx_newv(tag)}); + """)) + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMVectorPair *)({self.helper_arg_name()}) */ + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + newv = hvx_newv(tag) + predicated = "true" if is_predicated(tag) else "false" + f.write(code_fmt(f"""\ + ctx_log_vreg_write_pair(ctx, {self.reg_num}, {newv}, {predicated}); + """)) + +class QRegDest(Register, Hvx, Dest): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = + get_result_qreg(ctx, {self.reg_num}); + """)) + if not skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + TCGv_ptr {self.reg_tcg()} = tcg_temp_new_ptr(); + tcg_gen_addi_ptr({self.reg_tcg()}, tcg_env, {self.hvx_off()}); + """)) + def log_write(self, f, tag): + pass + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMQReg *)({self.helper_arg_name()}) */ + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_qreg_write(ctx, {self.reg_num}); + """)) + +class QRegSource(Register, Hvx, OldSource): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = + offsetof(CPUHexagonState, QRegs[{self.reg_num}]); + """)) + if not skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + TCGv_ptr {self.reg_tcg()} = tcg_temp_new_ptr(); + tcg_gen_addi_ptr({self.reg_tcg()}, tcg_env, {self.hvx_off()}); + """)) + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMQReg *)({self.helper_arg_name()}) */ + """)) + def analyze_read(self, f, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_qreg_read(ctx, {self.reg_num}); + """)) + +class QRegReadWrite(Register, Hvx, ReadWrite): + def decl_tcg(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + const intptr_t {self.hvx_off()} = + get_result_qreg(ctx, {self.reg_num}); + tcg_gen_gvec_mov(MO_64, {self.hvx_off()}, + offsetof(CPUHexagonState, QRegs[{self.reg_num}]), + sizeof(MMQReg), sizeof(MMQReg)); + """)) + if not skip_qemu_helper(tag): + f.write(code_fmt(f"""\ + TCGv_ptr {self.reg_tcg()} = tcg_temp_new_ptr(); + tcg_gen_addi_ptr({self.reg_tcg()}, tcg_env, {self.hvx_off()}); + """)) + def log_write(self, f, tag): + pass + def helper_hvx_desc(self, f): + f.write(code_fmt(f"""\ + /* {self.reg_tcg()} is *(MMQReg *)({self.helper_arg_name()}) */ + """)) + def analyze_write(self, f, tag, regno): + self.decl_reg_num(f, regno) + f.write(code_fmt(f"""\ + ctx_log_qreg_write(ctx, {self.reg_num}); + """)) + +def init_registers(): + regs = { + GprDest("R", "d"), + GprDest("R", "e"), + GprSource("R", "s"), + GprSource("R", "t"), + GprSource("R", "u"), + GprSource("R", "v"), + GprReadWrite("R", "x"), + GprReadWrite("R", "y"), + ControlDest("C", "d"), + ControlSource("C", "s"), + ModifierSource("M", "u"), + PredDest("P", "d"), + PredDest("P", "e"), + PredSource("P", "s"), + PredSource("P", "t"), + PredSource("P", "u"), + PredSource("P", "v"), + PredReadWrite("P", "x"), + PairDest("R", "dd"), + PairDest("R", "ee"), + PairSource("R", "ss"), + PairSource("R", "tt"), + PairReadWrite("R", "xx"), + PairReadWrite("R", "yy"), + ControlPairDest("C", "dd"), + ControlPairSource("C", "ss"), + VRegDest("V", "d"), + VRegSource("V", "s"), + VRegSource("V", "u"), + VRegSource("V", "v"), + VRegSource("V", "w"), + VRegReadWrite("V", "x"), + VRegTmp("V", "y"), + VRegPairDest("V", "dd"), + VRegPairSource("V", "uu"), + VRegPairSource("V", "vv"), + VRegPairReadWrite("V", "xx"), + QRegDest("Q", "d"), + QRegDest("Q", "e"), + QRegSource("Q", "s"), + QRegSource("Q", "t"), + QRegSource("Q", "u"), + QRegSource("Q", "v"), + QRegReadWrite("Q", "x"), + } + for reg in regs: + registers[f"{reg.regtype}{reg.regid}"] = reg + + new_regs = { + GprNewSource("N", "s"), + GprNewSource("N", "t"), + PredNewSource("P", "t"), + PredNewSource("P", "u"), + PredNewSource("P", "v"), + VRegNewSource("O", "s"), + } + for reg in new_regs: + new_registers[f"{reg.regtype}{reg.regid}"] = reg + +def get_register(tag, regtype, regid): + if f"{regtype}{regid}V" in semdict[tag]: + return registers[f"{regtype}{regid}"] + else: + return new_registers[f"{regtype}{regid}"] + +def helper_ret_type(tag, regs): + ## If there is a scalar result, it is the return type + return_type = HelperArg( "void", "void", "void") + numscalarresults = 0 + for regtype, regid in regs: + reg = get_register(tag, regtype, regid) + if reg.is_written() and reg.is_scalar_reg(): + return_type = HelperArg( + reg.helper_proto_type(), + reg.reg_tcg(), + reg.helper_arg_type() + ) + if numscalarresults > 1: + raise Exception("numscalarresults > 1") + return return_type + +def helper_args(tag, regs, imms): + args = [] + + ## First argument is the CPU state + args.append(HelperArg( + "env", + "tcg_env", + "CPUHexagonState *env" + )) + + ## For predicated instructions, we pass in the destination register + if is_predicated(tag): + for regtype, regid in regs: + reg = get_register(tag, regtype, regid) + if reg.is_writeonly() and not reg.is_hvx_reg(): + args.append(reg.helper_arg()) + + ## Pass the HVX destination registers + for regtype, regid in regs: + reg = get_register(tag, regtype, regid) + if reg.is_written() and reg.is_hvx_reg(): + args.append(reg.helper_arg()) + + ## Pass the source registers + for regtype, regid in regs: + reg = get_register(tag, regtype, regid) + if reg.is_read() and not (reg.is_hvx_reg() and reg.is_readwrite()): + args.append(reg.helper_arg()) + + ## Pass the immediates + for immlett, bits, immshift in imms: + args.append(HelperArg( + "s32", + f"tcg_constant_tl({imm_name(immlett)})", + f"int32_t {imm_name(immlett)}" + )) + + ## Other stuff the helper might need + if need_pkt_has_multi_cof(tag): + args.append(HelperArg( + "i32", + "tcg_constant_tl(ctx->pkt->pkt_has_multi_cof)", + "uint32_t pkt_has_multi_cof" + )) + if need_pkt_need_commit(tag): + args.append(HelperArg( + "i32", + "tcg_constant_tl(ctx->need_commit)", + "uint32_t pkt_need_commit" + )) + if need_PC(tag): + args.append(HelperArg( + "i32", + "tcg_constant_tl(ctx->pkt->pc)", + "target_ulong PC" + )) + if need_next_PC(tag): + args.append(HelperArg( + "i32", + "tcg_constant_tl(ctx->next_PC)", + "target_ulong next_PC" + )) + if need_slot(tag): + args.append(HelperArg( + "i32", + "gen_slotval(ctx)", + "uint32_t slotval" + )) + if need_part1(tag): + args.append(HelperArg( + "i32", + "tcg_constant_tl(insn->part1)" + "uint32_t part1" + )) + return args diff --git a/target/hexagon/idef-parser/macros.inc b/target/hexagon/idef-parser/macros.inc index 7478d4db171..94975d95832 100644 --- a/target/hexagon/idef-parser/macros.inc +++ b/target/hexagon/idef-parser/macros.inc @@ -127,5 +127,5 @@ /* Include fHIDE macros which hide type declarations */ #define fHIDE(A) A -/* Purge non-relavant parts */ +/* Purge non-relevant parts */ #define fBRANCH_SPECULATE_STALL(A, B, C, D, E) diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c index 4af020933aa..95f2b430764 100644 --- a/target/hexagon/idef-parser/parser-helpers.c +++ b/target/hexagon/idef-parser/parser-helpers.c @@ -1541,10 +1541,8 @@ void gen_circ_op(Context *c, HexValue *increment, HexValue *modifier) { - HexValue cs = gen_tmp(c, locp, 32, UNSIGNED); HexValue increment_m = *increment; increment_m = rvalue_materialize(c, locp, &increment_m); - OUT(c, locp, "gen_read_reg(", &cs, ", HEX_REG_CS0 + MuN);\n"); OUT(c, locp, "gen_helper_fcircadd(", @@ -1555,7 +1553,7 @@ void gen_circ_op(Context *c, &increment_m, ", ", modifier); - OUT(c, locp, ", ", &cs, ");\n"); + OUT(c, locp, ", CS);\n"); } HexValue gen_locnt_op(Context *c, YYLTYPE *locp, HexValue *src) @@ -2080,9 +2078,9 @@ void emit_arg(Context *c, YYLTYPE *locp, HexValue *arg) char reg_id[5]; reg_compose(c, locp, &(arg->reg), reg_id); EMIT_SIG(c, ", %s %s", type, reg_id); - /* MuV register requires also MuN to provide its index */ + /* MuV register requires also CS for circular addressing*/ if (arg->reg.type == MODIFIER) { - EMIT_SIG(c, ", int MuN"); + EMIT_SIG(c, ", TCGv CS"); } } break; diff --git a/target/hexagon/internal.h b/target/hexagon/internal.h index d732b6bb3c7..beb08cb7e38 100644 --- a/target/hexagon/internal.h +++ b/target/hexagon/internal.h @@ -33,8 +33,8 @@ int hexagon_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hexagon_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -int hexagon_hvx_gdb_read_register(CPUHexagonState *env, GByteArray *mem_buf, int n); -int hexagon_hvx_gdb_write_register(CPUHexagonState *env, uint8_t *mem_buf, int n); +int hexagon_hvx_gdb_read_register(CPUState *env, GByteArray *mem_buf, int n); +int hexagon_hvx_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n); void hexagon_debug_vreg(CPUHexagonState *env, int regnum); void hexagon_debug_qreg(CPUHexagonState *env, int regnum); diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h index 9a51b5709be..1376d6ccc18 100644 --- a/target/hexagon/macros.h +++ b/target/hexagon/macros.h @@ -93,13 +93,13 @@ #define CHECK_NOSHUF_PRED(GET_EA, SIZE, PRED) \ do { \ - TCGLabel *label = gen_new_label(); \ - tcg_gen_brcondi_tl(TCG_COND_EQ, PRED, 0, label); \ + TCGLabel *noshuf_label = gen_new_label(); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, PRED, 0, noshuf_label); \ GET_EA; \ if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \ probe_noshuf_load(EA, SIZE, ctx->mem_idx); \ } \ - gen_set_label(label); \ + gen_set_label(noshuf_label); \ if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \ process_store(ctx, 1); \ } \ @@ -462,8 +462,7 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) #define fPM_CIRI(REG, IMM, MVAL) \ do { \ TCGv tcgv_siV = tcg_constant_tl(siV); \ - gen_helper_fcircadd(REG, REG, tcgv_siV, MuV, \ - hex_gpr[HEX_REG_CS0 + MuN]); \ + gen_helper_fcircadd(REG, REG, tcgv_siV, MuV, CS); \ } while (0) #else #define fEA_IMM(IMM) do { EA = (IMM); } while (0) diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build index da8e608d006..fb480afc03b 100644 --- a/target/hexagon/meson.build +++ b/target/hexagon/meson.build @@ -122,16 +122,149 @@ hexagon_ss.add(iset_py) # # Step 4 -# We use the dectree.py script to generate the decode tree header file +# Generate the input to the QEMU decodetree.py script # -dectree_generated = custom_target( - 'dectree_generated.h.inc', - output: 'dectree_generated.h.inc', - depends: [iset_py], +normal_decode_generated = custom_target( + 'normal_decode_generated', + output: 'normal_decode_generated', + depends: [iset_py, semantics_generated], env: {'PYTHONPATH': meson.current_build_dir()}, - command: [python, files('dectree.py'), '@OUTPUT@'], + command: [python, files('gen_decodetree.py'), semantics_generated, 'NORMAL', '@OUTPUT@'], ) -hexagon_ss.add(dectree_generated) +hexagon_ss.add(normal_decode_generated) + +hvx_decode_generated = custom_target( + 'hvx_decode_generated', + output: 'hvx_decode_generated', + depends: [iset_py, semantics_generated], + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files('gen_decodetree.py'), semantics_generated, 'EXT_mmvec', '@OUTPUT@'], +) +hexagon_ss.add(hvx_decode_generated) + +subinsn_a_decode_generated = custom_target( + 'subinsn_a_decode_generated', + output: 'subinsn_a_decode_generated', + depends: [iset_py, semantics_generated], + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files('gen_decodetree.py'), semantics_generated, 'SUBINSN_A', '@OUTPUT@'], +) +hexagon_ss.add(subinsn_a_decode_generated) + +subinsn_l1_decode_generated = custom_target( + 'subinsn_l1_decode_generated', + output: 'subinsn_l1_decode_generated', + depends: [iset_py, semantics_generated], + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files('gen_decodetree.py'), semantics_generated, 'SUBINSN_L1', '@OUTPUT@'], +) +hexagon_ss.add(subinsn_l1_decode_generated) + +subinsn_l2_decode_generated = custom_target( + 'subinsn_l2_decode_generated', + output: 'subinsn_l2_decode_generated', + depends: [iset_py, semantics_generated], + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files('gen_decodetree.py'), semantics_generated, 'SUBINSN_L2', '@OUTPUT@'], +) +hexagon_ss.add(subinsn_l2_decode_generated) + +subinsn_s1_decode_generated = custom_target( + 'subinsn_s1_decode_generated', + output: 'subinsn_s1_decode_generated', + depends: [iset_py, semantics_generated], + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files('gen_decodetree.py'), semantics_generated, 'SUBINSN_S1', '@OUTPUT@'], +) +hexagon_ss.add(subinsn_s1_decode_generated) + +subinsn_s2_decode_generated = custom_target( + 'subinsn_s2_decode_generated', + output: 'subinsn_s2_decode_generated', + depends: [iset_py, semantics_generated], + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files('gen_decodetree.py'), semantics_generated, 'SUBINSN_S2', '@OUTPUT@'], +) +hexagon_ss.add(subinsn_s2_decode_generated) + +# +# Run the QEMU decodetree.py script to produce the instruction decoder +# +decodetree_py = meson.current_source_dir() / '../../scripts/decodetree.py' +decode_normal_generated = custom_target( + 'decode_normal_generated.c.inc', + output: 'decode_normal_generated.c.inc', + input: normal_decode_generated, + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files(decodetree_py), normal_decode_generated, '--static-decode=decode_normal', '-o', '@OUTPUT@'], +) +hexagon_ss.add(decode_normal_generated) + +decode_hvx_generated = custom_target( + 'decode_hvx_generated.c.inc', + output: 'decode_hvx_generated.c.inc', + input: hvx_decode_generated, + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files(decodetree_py), hvx_decode_generated, '--static-decode=decode_hvx', '-o', '@OUTPUT@'], +) +hexagon_ss.add(decode_hvx_generated) + +decode_subinsn_a_generated = custom_target( + 'decode_subinsn_a_generated.c.inc', + output: 'decode_subinsn_a_generated.c.inc', + input: subinsn_a_decode_generated, + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files(decodetree_py), subinsn_a_decode_generated, ['--static-decode=decode_subinsn_a', '--insnwidth=16'], '-o', '@OUTPUT@'], +) +hexagon_ss.add(decode_subinsn_a_generated) + +decode_subinsn_l1_generated = custom_target( + 'decode_subinsn_l1_generated.c.inc', + output: 'decode_subinsn_l1_generated.c.inc', + input: subinsn_l1_decode_generated, + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files(decodetree_py), subinsn_l1_decode_generated, ['--static-decode=decode_subinsn_l1', '--insnwidth=16'], '-o', '@OUTPUT@'], +) +hexagon_ss.add(decode_subinsn_l1_generated) + +decode_subinsn_l2_generated = custom_target( + 'decode_subinsn_l2_generated.c.inc', + output: 'decode_subinsn_l2_generated.c.inc', + input: subinsn_l2_decode_generated, + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files(decodetree_py), subinsn_l2_decode_generated, ['--static-decode=decode_subinsn_l2', '--insnwidth=16'], '-o', '@OUTPUT@'], +) +hexagon_ss.add(decode_subinsn_l2_generated) + +decode_subinsn_s1_generated = custom_target( + 'decode_subinsn_s1_generated.c.inc', + output: 'decode_subinsn_s1_generated.c.inc', + input: subinsn_s1_decode_generated, + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files(decodetree_py), subinsn_s1_decode_generated, ['--static-decode=decode_subinsn_s1', '--insnwidth=16'], '-o', '@OUTPUT@'], +) +hexagon_ss.add(decode_subinsn_s1_generated) + +decode_subinsn_s2_generated = custom_target( + 'decode_subinsn_s2_generated.c.inc', + output: 'decode_subinsn_s2_generated.c.inc', + input: subinsn_s2_decode_generated, + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files(decodetree_py), subinsn_s2_decode_generated, ['--static-decode=decode_subinsn_s2', '--insnwidth=16'], '-o', '@OUTPUT@'], +) +hexagon_ss.add(decode_subinsn_s2_generated) + +# +# Generate the trans_* functions that the decoder will use +# +decodetree_trans_funcs_generated = custom_target( + 'decodetree_trans_funcs_generated.c.inc', + output: 'decodetree_trans_funcs_generated.c.inc', + depends: [iset_py, semantics_generated], + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files('gen_trans_funcs.py'), semantics_generated, '@OUTPUT@'], +) +hexagon_ss.add(decodetree_trans_funcs_generated) hexagon_ss.add(files( 'cpu.c', diff --git a/target/hexagon/mmvec/decode_ext_mmvec.c b/target/hexagon/mmvec/decode_ext_mmvec.c index 174eb3b78b2..202d84c7c00 100644 --- a/target/hexagon/mmvec/decode_ext_mmvec.c +++ b/target/hexagon/mmvec/decode_ext_mmvec.c @@ -33,7 +33,6 @@ check_new_value(Packet *pkt) const char *dststr = NULL; uint16_t def_opcode; char letter; - int def_regnum; for (i = 1; i < pkt->num_insns; i++) { uint16_t use_opcode = pkt->insn[i].opcode; @@ -78,7 +77,6 @@ check_new_value(Packet *pkt) } } if ((dststr == NULL) && GET_ATTRIB(def_opcode, A_CVI_GATHER)) { - def_regnum = 0; pkt->insn[i].regno[use_regidx] = def_oreg; pkt->insn[i].new_value_producer_slot = pkt->insn[def_idx].slot; } else { @@ -86,7 +84,7 @@ check_new_value(Packet *pkt) /* still not there, we have a bad packet */ g_assert_not_reached(); } - def_regnum = pkt->insn[def_idx].regno[dststr - reginfo]; + int def_regnum = pkt->insn[def_idx].regno[dststr - reginfo]; /* Now patch up the consumer with the register number */ pkt->insn[i].regno[use_regidx] = def_regnum ^ def_oreg; /* special case for (Vx,Vy) */ diff --git a/target/hexagon/opcodes.c b/target/hexagon/opcodes.c index 35d790cdd5b..1f7f3def38d 100644 --- a/target/hexagon/opcodes.c +++ b/target/hexagon/opcodes.c @@ -111,33 +111,4 @@ void opcode_init(void) #include "op_attribs_generated.h.inc" #undef OP_ATTRIB #undef ATTRIBS - - decode_init(); -} - - -#define NEEDLE "IMMEXT(" - -int opcode_which_immediate_is_extended(Opcode opcode) -{ - const char *p; - - g_assert(opcode < XX_LAST_OPCODE); - g_assert(GET_ATTRIB(opcode, A_EXTENDABLE)); - - p = opcode_short_semantics[opcode]; - p = strstr(p, NEEDLE); - g_assert(p); - p += strlen(NEEDLE); - while (isspace(*p)) { - p++; - } - /* lower is always imm 0, upper always imm 1. */ - if (islower(*p)) { - return 0; - } else if (isupper(*p)) { - return 1; - } else { - g_assert_not_reached(); - } } diff --git a/target/hexagon/opcodes.h b/target/hexagon/opcodes.h index 6e90e00fe22..fa7e3219504 100644 --- a/target/hexagon/opcodes.h +++ b/target/hexagon/opcodes.h @@ -53,6 +53,4 @@ extern const OpcodeEncoding opcode_encodings[XX_LAST_OPCODE]; void opcode_init(void); -int opcode_which_immediate_is_extended(Opcode opcode); - #endif diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c index 666c0611802..f163eefe97d 100644 --- a/target/hexagon/translate.c +++ b/target/hexagon/translate.c @@ -234,7 +234,8 @@ static int read_packet_words(CPUHexagonState *env, DisasContext *ctx, g_assert(ctx->base.num_insns == 1); } - HEX_DEBUG_LOG("decode_packet: pc = 0x%x\n", ctx->base.pc_next); + HEX_DEBUG_LOG("decode_packet: pc = 0x%" VADDR_PRIx "\n", + ctx->base.pc_next); HEX_DEBUG_LOG(" words = { "); for (int i = 0; i < nwords; i++) { HEX_DEBUG_LOG("0x%x, ", words[i]); @@ -1033,10 +1034,10 @@ static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx) return; } - if (decode_packet(nwords, words, &pkt, false) > 0) { + ctx->pkt = &pkt; + if (decode_packet(ctx, nwords, words, &pkt, false) > 0) { pkt.pc = ctx->base.pc_next; HEX_DEBUG_PRINT_PKT(&pkt); - ctx->pkt = &pkt; gen_start_packet(ctx); for (i = 0; i < pkt.num_insns; i++) { ctx->insn = &pkt.insn[i]; @@ -1154,7 +1155,7 @@ static const TranslatorOps hexagon_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index fda32d7f590..3831cb6db27 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -94,6 +94,17 @@ static bool hppa_cpu_has_work(CPUState *cs) return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); } +static int hppa_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + CPUHPPAState *env = cpu_env(cs); + + if (env->psw & (ifetch ? PSW_C : PSW_D)) { + return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P); + } + /* mmu disabled */ + return env->psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; +} + static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info) { info->mach = bfd_mach_hppa20; @@ -110,9 +121,10 @@ void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr, CPUHPPAState *env = &cpu->env; cs->exception_index = EXCP_UNALIGN; + cpu_restore_state(cs, retaddr); hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx)); - cpu_loop_exit_restore(cs, retaddr); + cpu_loop_exit(cs); } #endif /* CONFIG_USER_ONLY */ @@ -156,38 +168,8 @@ static void hppa_cpu_initfn(Object *obj) static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model) { g_autofree char *typename = g_strconcat(cpu_model, "-cpu", NULL); - ObjectClass *oc = object_class_by_name(typename); - - if (oc && - !object_class_is_abstract(oc) && - object_class_dynamic_cast(oc, TYPE_HPPA_CPU)) { - return oc; - } - return NULL; -} - -static void hppa_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CPUClass *cc = CPU_CLASS(oc); - const char *tname = object_class_get_name(oc); - g_autofree char *name = g_strndup(tname, strchr(tname, '-') - tname); - - if (cc->deprecation_note) { - qemu_printf(" %s (deprecated)\n", name); - } else { - qemu_printf(" %s\n", name); - } -} - -void hppa_cpu_list(void) -{ - GSList *list; - list = object_class_get_list_sorted(TYPE_HPPA_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, hppa_cpu_list_entry, NULL); - g_slist_free(list); + return object_class_by_name(typename); } #ifndef CONFIG_USER_ONLY @@ -200,7 +182,7 @@ static const struct SysemuCPUOps hppa_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps hppa_tcg_ops = { +static const TCGCPUOps hppa_tcg_ops = { .initialize = hppa_translate_init, .synchronize_from_tb = hppa_cpu_synchronize_from_tb, .restore_state_to_opc = hppa_restore_state_to_opc, @@ -210,6 +192,7 @@ static const struct TCGCPUOps hppa_tcg_ops = { .cpu_exec_interrupt = hppa_cpu_exec_interrupt, .do_interrupt = hppa_cpu_do_interrupt, .do_unaligned_access = hppa_cpu_do_unaligned_access, + .do_transaction_failed = hppa_cpu_do_transaction_failed, #endif /* !CONFIG_USER_ONLY */ }; @@ -224,6 +207,7 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = hppa_cpu_class_by_name; cc->has_work = hppa_cpu_has_work; + cc->mmu_index = hppa_cpu_mmu_index; cc->dump_state = hppa_cpu_dump_state; cc->set_pc = hppa_cpu_set_pc; cc->get_pc = hppa_cpu_get_pc; diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 9556e95fab4..a072d0bb636 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -281,31 +281,24 @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env) return hppa_is_pa20(env) ? 0 : PA10_BTLB_FIXED + PA10_BTLB_VARIABLE; } -static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch) -{ -#ifdef CONFIG_USER_ONLY - return MMU_USER_IDX; -#else - if (env->psw & (ifetch ? PSW_C : PSW_D)) { - return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P); - } - /* mmu disabled */ - return env->psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; -#endif -} - void hppa_translate_init(void); #define CPU_RESOLVING_TYPE TYPE_HPPA_CPU +static inline uint64_t gva_offset_mask(target_ulong psw) +{ + return (psw & PSW_W + ? MAKE_64BIT_MASK(0, 62) + : MAKE_64BIT_MASK(0, 32)); +} + static inline target_ulong hppa_form_gva_psw(target_ulong psw, uint64_t spc, target_ulong off) { #ifdef CONFIG_USER_ONLY return off; #else - off &= psw & PSW_W ? MAKE_64BIT_MASK(0, 62) : MAKE_64BIT_MASK(0, 32); - return spc | off; + return spc | (off & gva_offset_mask(psw)); #endif } @@ -394,6 +387,11 @@ bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req); int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, int type, hwaddr *pphys, int *pprot, HPPATLBEntry **tlb_entry); +void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); extern const MemoryRegionOps hppa_io_eir_ops; extern const VMStateDescription vmstate_hppa_cpu; void hppa_cpu_alarm_timer(void *); @@ -403,7 +401,4 @@ G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra); #define CPU_RESOLVING_TYPE TYPE_HPPA_CPU -#define cpu_list hppa_cpu_list -void hppa_cpu_list(void); - #endif /* HPPA_CPU_H */ diff --git a/target/hppa/helper.c b/target/hppa/helper.c index 859644c47af..9d217d051c1 100644 --- a/target/hppa/helper.c +++ b/target/hppa/helper.c @@ -76,7 +76,8 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw) } psw &= ~reserved; - env->psw = psw & ~(PSW_N | PSW_V | PSW_CB); + env->psw = psw & (uint32_t)~(PSW_N | PSW_V | PSW_CB); + env->psw_n = (psw / PSW_N) & 1; env->psw_v = -((psw / PSW_V) & 1); diff --git a/target/hppa/helper.h b/target/hppa/helper.h index 20698f68ed0..5900fd70bcb 100644 --- a/target/hppa/helper.h +++ b/target/hppa/helper.h @@ -86,12 +86,10 @@ DEF_HELPER_FLAGS_0(read_interval_timer, TCG_CALL_NO_RWG, tl) #ifndef CONFIG_USER_ONLY DEF_HELPER_1(halt, noreturn, env) DEF_HELPER_1(reset, noreturn, env) -DEF_HELPER_1(getshadowregs, void, env) DEF_HELPER_1(rfi, void, env) DEF_HELPER_1(rfi_r, void, env) DEF_HELPER_FLAGS_2(write_interval_timer, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(write_eirr, TCG_CALL_NO_RWG, void, env, tl) -DEF_HELPER_FLAGS_2(write_eiem, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(swap_system_mask, TCG_CALL_NO_RWG, tl, env, tl) DEF_HELPER_FLAGS_3(itlba_pa11, TCG_CALL_NO_RWG, void, env, tl, tl) DEF_HELPER_FLAGS_3(itlbp_pa11, TCG_CALL_NO_RWG, void, env, tl, tl) @@ -103,4 +101,5 @@ DEF_HELPER_FLAGS_1(ptlbe, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(lpa, TCG_CALL_NO_WG, tl, env, tl) DEF_HELPER_FLAGS_1(change_prot_id, TCG_CALL_NO_RWG, void, env) DEF_HELPER_1(diag_btlb, void, env) +DEF_HELPER_1(diag_console_output, void, env) #endif diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode index f5a3f02fd15..71074a64c18 100644 --- a/target/hppa/insns.decode +++ b/target/hppa/insns.decode @@ -24,16 +24,17 @@ %assemble_sr3 13:1 14:2 %assemble_sr3x 13:1 14:2 !function=expand_sr3x -%assemble_11a 0:s1 4:10 !function=expand_shl3 +%assemble_11a 4:12 0:1 !function=expand_11a %assemble_12 0:s1 2:1 3:10 !function=expand_shl2 -%assemble_12a 0:s1 3:11 !function=expand_shl2 +%assemble_12a 3:13 0:1 !function=expand_12a +%assemble_16 0:16 !function=expand_16 %assemble_17 0:s1 16:5 2:1 3:10 !function=expand_shl2 %assemble_22 0:s1 16:10 2:1 3:10 !function=expand_shl2 +%assemble_sp 14:2 !function=sp0_if_wide %assemble_21 0:s1 1:11 14:2 16:5 12:2 !function=expand_shl11 %lowsign_11 0:s1 1:10 -%lowsign_14 0:s1 1:13 %sm_imm 16:10 !function=expand_sm_imm @@ -56,11 +57,16 @@ %neg_to_m 0:1 !function=neg_to_m %a_to_m 2:1 !function=neg_to_m %cmpbid_c 13:2 !function=cmpbid_c +%d_5 5:1 !function=pa20_d +%d_11 11:1 !function=pa20_d +%d_13 13:1 !function=pa20_d #### # Argument set definitions #### +&empty + # All insns that need to form a virtual address should use this set. &ldst t b x disp sp m scale size @@ -83,15 +89,16 @@ # Format definitions #### -@rr_cf_d ...... r:5 ..... cf:4 ...... d:1 t:5 &rr_cf_d +@rr_cf_d ...... r:5 ..... cf:4 ...... . t:5 &rr_cf_d d=%d_5 @rrr ...... r2:5 r1:5 .... ....... t:5 &rrr @rrr_cf ...... r2:5 r1:5 cf:4 ....... t:5 &rrr_cf -@rrr_cf_d ...... r2:5 r1:5 cf:4 ...... d:1 t:5 &rrr_cf_d +@rrr_cf_d ...... r2:5 r1:5 cf:4 ...... . t:5 &rrr_cf_d d=%d_5 @rrr_sh ...... r2:5 r1:5 ........ sh:2 . t:5 &rrr_sh -@rrr_cf_d_sh ...... r2:5 r1:5 cf:4 .... sh:2 d:1 t:5 &rrr_cf_d_sh -@rrr_cf_d_sh0 ...... r2:5 r1:5 cf:4 ...... d:1 t:5 &rrr_cf_d_sh sh=0 +@rrr_cf_d_sh ...... r2:5 r1:5 cf:4 .... sh:2 . t:5 &rrr_cf_d_sh d=%d_5 +@rrr_cf_d_sh0 ...... r2:5 r1:5 cf:4 ...... . t:5 &rrr_cf_d_sh d=%d_5 sh=0 @rri_cf ...... r:5 t:5 cf:4 . ........... &rri_cf i=%lowsign_11 -@rri_cf_d ...... r:5 t:5 cf:4 d:1 ........... &rri_cf_d i=%lowsign_11 +@rri_cf_d ...... r:5 t:5 cf:4 . ........... \ + &rri_cf_d d=%d_11 i=%lowsign_11 @rrb_cf ...... r2:5 r1:5 c:3 ........... n:1 . \ &rrb_c_f disp=%assemble_12 @@ -143,9 +150,9 @@ getshadowregs 1111 1111 1111 1101 1110 1010 1101 0010 nop 000001 ----- ----- -- 11001010 0 ----- # fdc, disp nop_addrx 000001 ..... ..... -- 01001010 . ----- @addrx # fdc, index nop_addrx 000001 ..... ..... -- 01001011 . ----- @addrx # fdce -nop_addrx 000001 ..... ..... --- 0001010 . ----- @addrx # fic 0x0a -nop_addrx 000001 ..... ..... -- 01001111 . 00000 @addrx # fic 0x4f -nop_addrx 000001 ..... ..... --- 0001011 . ----- @addrx # fice +fic 000001 ..... ..... --- 0001010 . ----- @addrx # fic 0x0a +fic 000001 ..... ..... -- 01001111 . 00000 @addrx # fic 0x4f +fic 000001 ..... ..... --- 0001011 . ----- @addrx # fice nop_addrx 000001 ..... ..... -- 01001110 . 00000 @addrx # pdc probe 000001 b:5 ri:5 sp:2 imm:1 100011 write:1 0 t:5 @@ -221,7 +228,7 @@ sub_b_tsv 000010 ..... ..... .... 110100 . ..... @rrr_cf_d ldil 001000 t:5 ..................... i=%assemble_21 addil 001010 r:5 ..................... i=%assemble_21 -ldo 001101 b:5 t:5 -- .............. i=%lowsign_14 +ldo 001101 b:5 t:5 ................ i=%assemble_16 addi 101101 ..... ..... .... 0 ........... @rri_cf addi_tsv 101101 ..... ..... .... 1 ........... @rri_cf @@ -304,14 +311,18 @@ fstd 001011 ..... ..... .. . 1 -- 100 0 . ..... @fldstdi # Offset Mem #### -@ldstim11 ...... b:5 t:5 sp:2 .............. \ - &ldst disp=%assemble_11a m=%ma2_to_m x=0 scale=0 size=3 -@ldstim14 ...... b:5 t:5 sp:2 .............. \ - &ldst disp=%lowsign_14 x=0 scale=0 m=0 -@ldstim14m ...... b:5 t:5 sp:2 .............. \ - &ldst disp=%lowsign_14 x=0 scale=0 m=%neg_to_m -@ldstim12m ...... b:5 t:5 sp:2 .............. \ - &ldst disp=%assemble_12a x=0 scale=0 m=%pos_to_m +@ldstim11 ...... b:5 t:5 ................ \ + &ldst sp=%assemble_sp disp=%assemble_11a \ + m=%ma2_to_m x=0 scale=0 size=3 +@ldstim14 ...... b:5 t:5 ................ \ + &ldst sp=%assemble_sp disp=%assemble_16 \ + x=0 scale=0 m=0 +@ldstim14m ...... b:5 t:5 ................ \ + &ldst sp=%assemble_sp disp=%assemble_16 \ + x=0 scale=0 m=%neg_to_m +@ldstim12m ...... b:5 t:5 ................ \ + &ldst sp=%assemble_sp disp=%assemble_12a \ + x=0 scale=0 m=%pos_to_m # LDB, LDH, LDW, LDWM ld 010000 ..... ..... .. .............. @ldstim14 size=0 @@ -327,15 +338,19 @@ st 011010 ..... ..... .. .............. @ldstim14 size=2 st 011011 ..... ..... .. .............. @ldstim14m size=2 st 011111 ..... ..... .. ...........10. @ldstim12m size=2 -fldw 010110 b:5 ..... sp:2 .............. \ - &ldst disp=%assemble_12a t=%rm64 m=%a_to_m x=0 scale=0 size=2 -fldw 010111 b:5 ..... sp:2 ...........0.. \ - &ldst disp=%assemble_12a t=%rm64 m=0 x=0 scale=0 size=2 +fldw 010110 b:5 ..... ................ \ + &ldst disp=%assemble_12a sp=%assemble_sp \ + t=%rm64 m=%a_to_m x=0 scale=0 size=2 +fldw 010111 b:5 ..... .............0.. \ + &ldst disp=%assemble_12a sp=%assemble_sp \ + t=%rm64 m=0 x=0 scale=0 size=2 -fstw 011110 b:5 ..... sp:2 .............. \ - &ldst disp=%assemble_12a t=%rm64 m=%a_to_m x=0 scale=0 size=2 -fstw 011111 b:5 ..... sp:2 ...........0.. \ - &ldst disp=%assemble_12a t=%rm64 m=0 x=0 scale=0 size=2 +fstw 011110 b:5 ..... ................ \ + &ldst disp=%assemble_12a sp=%assemble_sp \ + t=%rm64 m=%a_to_m x=0 scale=0 size=2 +fstw 011111 b:5 ..... .............0.. \ + &ldst disp=%assemble_12a sp=%assemble_sp \ + t=%rm64 m=0 x=0 scale=0 size=2 ld 010100 ..... ..... .. ............0. @ldstim11 fldd 010100 ..... ..... .. ............1. @ldstim11 @@ -359,8 +374,10 @@ fmpysub_d 100110 ..... ..... ..... ..... 1 ..... @mpyadd # Conditional Branches #### -bb_sar 110000 00000 r:5 c:1 1 d:1 ........... n:1 . disp=%assemble_12 -bb_imm 110001 p:5 r:5 c:1 1 d:1 ........... n:1 . disp=%assemble_12 +bb_sar 110000 00000 r:5 c:1 1 . ........... n:1 . \ + disp=%assemble_12 d=%d_13 +bb_imm 110001 p:5 r:5 c:1 1 . ........... n:1 . \ + disp=%assemble_12 d=%d_13 movb 110010 ..... ..... ... ........... . . @rrb_cf f=0 movbi 110011 ..... ..... ... ........... . . @rib_cf f=0 @@ -619,4 +636,18 @@ fdiv_d 001110 ..... ..... 011 ..... ... ..... @f0e_d_3 xmpyu 001110 ..... ..... 010 .0111 .00 t:5 r1=%ra64 r2=%rb64 # diag -diag 000101 i:26 +{ + [ + diag_btlb 000101 00 0000 0000 0000 0001 0000 0000 + diag_cout 000101 00 0000 0000 0000 0001 0000 0001 + + # For 32-bit PA-7300LC (PCX-L2) + diag_getshadowregs_pa1 000101 00 0000 0000 0001 1010 0000 0000 + diag_putshadowregs_pa1 000101 00 0000 0000 0001 1010 0100 0000 + + # For 64-bit PA8700 (PCX-W2) + diag_getshadowregs_pa2 000101 00 0111 1000 0001 1000 0100 0000 + diag_putshadowregs_pa2 000101 00 0111 0000 0001 1000 0100 0000 + ] + diag_unimp 000101 i:26 +} diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c index 98e9d688f64..a667ee380d7 100644 --- a/target/hppa/int_helper.c +++ b/target/hppa/int_helper.c @@ -28,7 +28,7 @@ static void eval_interrupt(HPPACPU *cpu) { CPUState *cs = CPU(cpu); - if (cpu->env.cr[CR_EIRR] & cpu->env.cr[CR_EIEM]) { + if (cpu->env.cr[CR_EIRR]) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); @@ -84,17 +84,9 @@ void hppa_cpu_alarm_timer(void *opaque) void HELPER(write_eirr)(CPUHPPAState *env, target_ulong val) { env->cr[CR_EIRR] &= ~val; - qemu_mutex_lock_iothread(); + bql_lock(); eval_interrupt(env_archcpu(env)); - qemu_mutex_unlock_iothread(); -} - -void HELPER(write_eiem)(CPUHPPAState *env, target_ulong val) -{ - env->cr[CR_EIEM] = val; - qemu_mutex_lock_iothread(); - eval_interrupt(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void hppa_cpu_do_interrupt(CPUState *cs) @@ -115,14 +107,10 @@ void hppa_cpu_do_interrupt(CPUState *cs) /* step 3 */ /* - * For pa1.x, IIASQ is simply a copy of IASQ. - * For pa2.0, IIASQ is the top bits of the virtual address, - * or zero if translation is disabled. + * IIASQ is the top bits of the virtual address, or zero if translation + * is disabled -- with PSW_W == 0, this will reduce to the space. */ - if (!hppa_is_pa20(env)) { - env->cr[CR_IIASQ] = env->iasq_f >> 32; - env->cr_back[0] = env->iasq_b >> 32; - } else if (old_psw & PSW_C) { + if (old_psw & PSW_C) { env->cr[CR_IIASQ] = hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32; env->cr_back[0] = @@ -131,8 +119,14 @@ void hppa_cpu_do_interrupt(CPUState *cs) env->cr[CR_IIASQ] = 0; env->cr_back[0] = 0; } - env->cr[CR_IIAOQ] = env->iaoq_f; - env->cr_back[1] = env->iaoq_b; + /* IIAOQ is the full offset for wide mode, or 32 bits for narrow mode. */ + if (old_psw & PSW_W) { + env->cr[CR_IIAOQ] = env->iaoq_f; + env->cr_back[1] = env->iaoq_b; + } else { + env->cr[CR_IIAOQ] = (uint32_t)env->iaoq_f; + env->cr_back[1] = (uint32_t)env->iaoq_b; + } if (old_psw & PSW_Q) { /* step 5 */ @@ -280,7 +274,9 @@ bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } /* If interrupts are requested and enabled, raise them. */ - if ((env->psw & PSW_I) && (interrupt_request & CPU_INTERRUPT_HARD)) { + if ((interrupt_request & CPU_INTERRUPT_HARD) + && (env->psw & PSW_I) + && (env->cr[CR_EIRR] & env->cr[CR_EIEM])) { cs->exception_index = EXCP_EXT_INTERRUPT; hppa_cpu_do_interrupt(cs); return true; diff --git a/target/hppa/machine.c b/target/hppa/machine.c index 15cbc5e6d03..211bfcf6407 100644 --- a/target/hppa/machine.c +++ b/target/hppa/machine.c @@ -201,7 +201,7 @@ static const VMStateField vmstate_env_fields[] = { VMSTATE_END_OF_LIST() }; -static const VMStateDescription *vmstate_env_subsections[] = { +static const VMStateDescription * const vmstate_env_subsections[] = { &vmstate_tlb, NULL }; diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index 4fcc612754b..84785b5a5c6 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -152,6 +152,49 @@ static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env) return ent; } +#define ACCESS_ID_MASK 0xffff + +/* Return the set of protections allowed by a PID match. */ +static int match_prot_id_1(uint32_t access_id, uint32_t prot_id) +{ + if (((access_id ^ (prot_id >> 1)) & ACCESS_ID_MASK) == 0) { + return (prot_id & 1 + ? PAGE_EXEC | PAGE_READ + : PAGE_EXEC | PAGE_READ | PAGE_WRITE); + } + return 0; +} + +static int match_prot_id32(CPUHPPAState *env, uint32_t access_id) +{ + int r, i; + + for (i = CR_PID1; i <= CR_PID4; ++i) { + r = match_prot_id_1(access_id, env->cr[i]); + if (r) { + return r; + } + } + return 0; +} + +static int match_prot_id64(CPUHPPAState *env, uint32_t access_id) +{ + int r, i; + + for (i = CR_PID1; i <= CR_PID4; ++i) { + r = match_prot_id_1(access_id, env->cr[i]); + if (r) { + return r; + } + r = match_prot_id_1(access_id, env->cr[i] >> 32); + if (r) { + return r; + } + } + return 0; +} + int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, int type, hwaddr *pphys, int *pprot, HPPATLBEntry **tlb_entry) @@ -224,29 +267,30 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, break; } + /* + * No guest access type indicates a non-architectural access from + * within QEMU. Bypass checks for access, D, B, P and T bits. + */ + if (type == 0) { + goto egress; + } + /* access_id == 0 means public page and no check is performed */ if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) { - /* If bits [31:1] match, and bit 0 is set, suppress write. */ - int match = ent->access_id * 2 + 1; - - if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] || - match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) { - prot &= PAGE_READ | PAGE_EXEC; - if (type == PAGE_WRITE) { - ret = EXCP_DMPI; - goto egress; - } + int access_prot = (hppa_is_pa20(env) + ? match_prot_id64(env, ent->access_id) + : match_prot_id32(env, ent->access_id)); + if (unlikely(!(type & access_prot))) { + /* Not allowed -- Inst/Data Memory Protection Id Fault. */ + ret = type & PAGE_EXEC ? EXCP_IMP : EXCP_DMPI; + goto egress; } - } - - /* No guest access type indicates a non-architectural access from - within QEMU. Bypass checks for access, D, B and T bits. */ - if (type == 0) { - goto egress; + /* Otherwise exclude permissions not allowed (i.e WD). */ + prot &= access_prot; } if (unlikely(!(prot & type))) { - /* The access isn't allowed -- Inst/Data Memory Protection Fault. */ + /* Not allowed -- Inst/Data Memory Access Rights Fault. */ ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR; goto egress; } @@ -348,9 +392,29 @@ raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr, CPUState *cs = env_cpu(env); cs->exception_index = excp; + cpu_restore_state(cs, retaddr); hppa_set_ior_and_isr(env, addr, mmu_disabled); - cpu_loop_exit_restore(cs, retaddr); + cpu_loop_exit(cs); +} + +void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) +{ + CPUHPPAState *env = cpu_env(cs); + + qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx + " while accessing I/O at %#08" HWADDR_PRIx "\n", + env->iasq_f, env->iaoq_f, physaddr); + + /* FIXME: Enable HPMC exceptions when firmware has clean device probing */ + if (0) { + raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr, + MMU_IDX_MMU_DISABLED(mmu_idx)); + } } bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, @@ -518,7 +582,6 @@ void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) /* Purge (Insn/Data) TLB. */ static void ptlb_work(CPUState *cpu, run_on_cpu_data data) { - CPUHPPAState *env = cpu_env(cpu); vaddr start = data.target_ptr; vaddr end; @@ -532,7 +595,7 @@ static void ptlb_work(CPUState *cpu, run_on_cpu_data data) end = (vaddr)TARGET_PAGE_SIZE << (2 * end); end = start + end - 1; - hppa_flush_tlb_range(env, start, end); + hppa_flush_tlb_range(cpu_env(cpu), start, end); } /* This is local to the current cpu. */ @@ -646,7 +709,7 @@ int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr) void HELPER(diag_btlb)(CPUHPPAState *env) { unsigned int phys_page, len, slot; - int mmu_idx = cpu_mmu_index(env, 0); + int mmu_idx = cpu_mmu_index(env_cpu(env), 0); uintptr_t ra = GETPC(); HPPATLBEntry *btlb; uint64_t virt_page; @@ -665,7 +728,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env) case 0: /* return BTLB parameters */ qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n"); - vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong), + vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t), MMU_DATA_STORE, mmu_idx, ra); if (vaddr == NULL) { env->gr[28] = -10; /* invalid argument */ diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c index ce15469465e..6cf49f33b7c 100644 --- a/target/hppa/op_helper.c +++ b/target/hppa/op_helper.c @@ -59,7 +59,7 @@ void HELPER(tcond)(CPUHPPAState *env, target_ulong cond) static void atomic_store_mask32(CPUHPPAState *env, target_ulong addr, uint32_t val, uint32_t mask, uintptr_t ra) { - int mmu_idx = cpu_mmu_index(env, 0); + int mmu_idx = cpu_mmu_index(env_cpu(env), 0); uint32_t old, new, cmp, *haddr; void *vaddr; @@ -86,7 +86,7 @@ static void atomic_store_mask64(CPUHPPAState *env, target_ulong addr, int size, uintptr_t ra) { #ifdef CONFIG_ATOMIC64 - int mmu_idx = cpu_mmu_index(env, 0); + int mmu_idx = cpu_mmu_index(env_cpu(env), 0); uint64_t old, new, cmp, *haddr; void *vaddr; @@ -235,7 +235,7 @@ static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val, default: /* Nothing is stored, but protection is checked and the cacheline is marked dirty. */ - probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra); + probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra); break; } } @@ -281,22 +281,22 @@ static void do_stdby_e(CPUHPPAState *env, target_ulong addr, uint64_t val, case 3: /* The 3 byte store must appear atomic. */ if (parallel) { - atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra); + atomic_store_mask32(env, addr - 3, val >> 32, 0xffffff00u, ra); } else { - cpu_stw_data_ra(env, addr - 3, val >> 16, ra); - cpu_stb_data_ra(env, addr - 1, val >> 8, ra); + cpu_stw_data_ra(env, addr - 3, val >> 48, ra); + cpu_stb_data_ra(env, addr - 1, val >> 40, ra); } break; case 2: - cpu_stw_data_ra(env, addr - 2, val >> 16, ra); + cpu_stw_data_ra(env, addr - 2, val >> 48, ra); break; case 1: - cpu_stb_data_ra(env, addr - 1, val >> 24, ra); + cpu_stb_data_ra(env, addr - 1, val >> 56, ra); break; default: /* Nothing is stored, but protection is checked and the cacheline is marked dirty. */ - probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra); + probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra); break; } } @@ -351,11 +351,12 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr, excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys, &prot, NULL); if (excp >= 0) { + cpu_restore_state(env_cpu(env), GETPC()); hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx)); if (excp == EXCP_DTLB_MISS) { excp = EXCP_NA_DTLB_MISS; } - hppa_dynamic_excp(env, excp, GETPC()); + helper_excp(env, excp); } return (want & prot) != 0; #endif diff --git a/target/hppa/sys_helper.c b/target/hppa/sys_helper.c index a59245eed36..22d6c899647 100644 --- a/target/hppa/sys_helper.c +++ b/target/hppa/sys_helper.c @@ -23,6 +23,8 @@ #include "exec/helper-proto.h" #include "qemu/timer.h" #include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "chardev/char-fe.h" void HELPER(write_interval_timer)(CPUHPPAState *env, target_ulong val) { @@ -76,24 +78,24 @@ target_ulong HELPER(swap_system_mask)(CPUHPPAState *env, target_ulong nsm) void HELPER(rfi)(CPUHPPAState *env) { - env->iasq_f = (uint64_t)env->cr[CR_IIASQ] << 32; - env->iasq_b = (uint64_t)env->cr_back[0] << 32; - env->iaoq_f = env->cr[CR_IIAOQ]; - env->iaoq_b = env->cr_back[1]; + uint64_t mask; + + cpu_hppa_put_psw(env, env->cr[CR_IPSW]); /* * For pa2.0, IIASQ is the top bits of the virtual address. * To recreate the space identifier, remove the offset bits. + * For pa1.x, the mask reduces to no change to space. */ - if (hppa_is_pa20(env)) { - env->iasq_f &= ~env->iaoq_f; - env->iasq_b &= ~env->iaoq_b; - } + mask = gva_offset_mask(env->psw); - cpu_hppa_put_psw(env, env->cr[CR_IPSW]); + env->iaoq_f = env->cr[CR_IIAOQ]; + env->iaoq_b = env->cr_back[1]; + env->iasq_f = (env->cr[CR_IIASQ] << 32) & ~(env->iaoq_f & mask); + env->iasq_b = (env->cr_back[0] << 32) & ~(env->iaoq_b & mask); } -void HELPER(getshadowregs)(CPUHPPAState *env) +static void getshadowregs(CPUHPPAState *env) { env->gr[1] = env->shadow[0]; env->gr[8] = env->shadow[1]; @@ -106,6 +108,40 @@ void HELPER(getshadowregs)(CPUHPPAState *env) void HELPER(rfi_r)(CPUHPPAState *env) { - helper_getshadowregs(env); + getshadowregs(env); helper_rfi(env); } + +#ifndef CONFIG_USER_ONLY +/* + * diag_console_output() is a helper function used during the initial bootup + * process of the SeaBIOS-hppa firmware. During the bootup phase, addresses of + * serial ports on e.g. PCI busses are unknown and most other devices haven't + * been initialized and configured yet. With help of a simple "diag" assembler + * instruction and an ASCII character code in register %r26 firmware can easily + * print debug output without any dependencies to the first serial port and use + * that as serial console. + */ +void HELPER(diag_console_output)(CPUHPPAState *env) +{ + CharBackend *serial_backend; + Chardev *serial_port; + unsigned char c; + + /* find first serial port */ + serial_port = serial_hd(0); + if (!serial_port) { + return; + } + + /* get serial_backend for the serial port */ + serial_backend = serial_port->be; + if (!serial_backend || + !qemu_chr_fe_backend_connected(serial_backend)) { + return; + } + + c = (unsigned char)env->gr[26]; + qemu_chr_fe_write(serial_backend, &c, sizeof(c)); +} +#endif diff --git a/target/hppa/translate.c b/target/hppa/translate.c index 3ef39b1bd74..42fa4809504 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -44,7 +44,6 @@ typedef struct DisasCond { typedef struct DisasContext { DisasContextBase base; CPUState *cs; - TCGOp *insn_start; uint64_t iaoq_f; uint64_t iaoq_b; @@ -62,6 +61,7 @@ typedef struct DisasContext { int privilege; bool psw_n_nonzero; bool is_pa20; + bool insn_start_updated; #ifdef CONFIG_USER_ONLY MemOp unalign; @@ -121,12 +121,6 @@ static int expand_shl2(DisasContext *ctx, int val) return val << 2; } -/* Used for fp memory ops. */ -static int expand_shl3(DisasContext *ctx, int val) -{ - return val << 3; -} - /* Used for assemble_21. */ static int expand_shl11(DisasContext *ctx, int val) { @@ -144,12 +138,76 @@ static int assemble_6(DisasContext *ctx, int val) return (val ^ 31) + 1; } +/* Expander for assemble_16a(s,cat(im10a,0),i). */ +static int expand_11a(DisasContext *ctx, int val) +{ + /* + * @val is bit 0 and bits [4:15]. + * Swizzle thing around depending on PSW.W. + */ + int im10a = extract32(val, 1, 10); + int s = extract32(val, 11, 2); + int i = (-(val & 1) << 13) | (im10a << 3); + + if (ctx->tb_flags & PSW_W) { + i ^= s << 13; + } + return i; +} + +/* Expander for assemble_16a(s,im11a,i). */ +static int expand_12a(DisasContext *ctx, int val) +{ + /* + * @val is bit 0 and bits [3:15]. + * Swizzle thing around depending on PSW.W. + */ + int im11a = extract32(val, 1, 11); + int s = extract32(val, 12, 2); + int i = (-(val & 1) << 13) | (im11a << 2); + + if (ctx->tb_flags & PSW_W) { + i ^= s << 13; + } + return i; +} + +/* Expander for assemble_16(s,im14). */ +static int expand_16(DisasContext *ctx, int val) +{ + /* + * @val is bits [0:15], containing both im14 and s. + * Swizzle thing around depending on PSW.W. + */ + int s = extract32(val, 14, 2); + int i = (-(val & 1) << 13) | extract32(val, 1, 13); + + if (ctx->tb_flags & PSW_W) { + i ^= s << 13; + } + return i; +} + +/* The sp field is only present with !PSW_W. */ +static int sp0_if_wide(DisasContext *ctx, int sp) +{ + return ctx->tb_flags & PSW_W ? 0 : sp; +} + /* Translate CMPI doubleword conditions to standard. */ static int cmpbid_c(DisasContext *ctx, int val) { return val ? val : 4; /* 0 == "*<<" */ } +/* + * In many places pa1.x did not decode the bit that later became + * the pa2.0 D bit. Suppress D unless the cpu is pa2.0. + */ +static int pa20_d(DisasContext *ctx, int val) +{ + return ctx->is_pa20 & val; +} /* Include the auto-generated decoder. */ #include "decode-insns.c.inc" @@ -242,9 +300,9 @@ void hppa_translate_init(void) static void set_insn_breg(DisasContext *ctx, int breg) { - assert(ctx->insn_start != NULL); - tcg_set_insn_start_param(ctx->insn_start, 2, breg); - ctx->insn_start = NULL; + assert(!ctx->insn_start_updated); + ctx->insn_start_updated = true; + tcg_set_insn_start_param(ctx->base.insn_start, 2, breg); } static DisasCond cond_make_f(void) @@ -536,17 +594,10 @@ static bool nullify_end(DisasContext *ctx) return true; } -static uint64_t gva_offset_mask(DisasContext *ctx) -{ - return (ctx->tb_flags & PSW_W - ? MAKE_64BIT_MASK(0, 62) - : MAKE_64BIT_MASK(0, 32)); -} - static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest, uint64_t ival, TCGv_i64 vval) { - uint64_t mask = gva_offset_mask(ctx); + uint64_t mask = gva_offset_mask(ctx->tb_flags); if (ival != -1) { tcg_gen_movi_i64(dest, ival & mask); @@ -650,19 +701,13 @@ static bool cond_need_cb(int c) return c == 4 || c == 5; } -/* Need extensions from TCGv_i32 to TCGv_i64. */ -static bool cond_need_ext(DisasContext *ctx, bool d) -{ - return !(ctx->is_pa20 && d); -} - /* * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of * the Parisc 1.1 Architecture Reference Manual for details. */ static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, - TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv) + TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv) { DisasCond cond; TCGv_i64 tmp; @@ -672,7 +717,7 @@ static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, cond = cond_make_f(); break; case 1: /* = / <> (Z / !Z) */ - if (cond_need_ext(ctx, d)) { + if (!d) { tmp = tcg_temp_new_i64(); tcg_gen_ext32u_i64(tmp, res); res = tmp; @@ -682,7 +727,7 @@ static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, case 2: /* < / >= (N ^ V / !(N ^ V) */ tmp = tcg_temp_new_i64(); tcg_gen_xor_i64(tmp, res, sv); - if (cond_need_ext(ctx, d)) { + if (!d) { tcg_gen_ext32s_i64(tmp, tmp); } cond = cond_make_0_tmp(TCG_COND_LT, tmp); @@ -699,7 +744,7 @@ static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, */ tmp = tcg_temp_new_i64(); tcg_gen_eqv_i64(tmp, res, sv); - if (cond_need_ext(ctx, d)) { + if (!d) { tcg_gen_sextract_i64(tmp, tmp, 31, 1); tcg_gen_and_i64(tmp, tmp, res); tcg_gen_ext32u_i64(tmp, tmp); @@ -709,21 +754,19 @@ static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, } cond = cond_make_0_tmp(TCG_COND_EQ, tmp); break; - case 4: /* NUV / UV (!C / C) */ - /* Only bit 0 of cb_msb is ever set. */ - cond = cond_make_0(TCG_COND_EQ, cb_msb); + case 4: /* NUV / UV (!UV / UV) */ + cond = cond_make_0(TCG_COND_EQ, uv); break; - case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ + case 5: /* ZNV / VNZ (!UV | Z / UV & !Z) */ tmp = tcg_temp_new_i64(); - tcg_gen_neg_i64(tmp, cb_msb); - tcg_gen_and_i64(tmp, tmp, res); - if (cond_need_ext(ctx, d)) { + tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res); + if (!d) { tcg_gen_ext32u_i64(tmp, tmp); } cond = cond_make_0_tmp(TCG_COND_EQ, tmp); break; case 6: /* SV / NSV (V / !V) */ - if (cond_need_ext(ctx, d)) { + if (!d) { tmp = tcg_temp_new_i64(); tcg_gen_ext32s_i64(tmp, sv); sv = tmp; @@ -784,7 +827,7 @@ static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d, if (cf & 1) { tc = tcg_invert_cond(tc); } - if (cond_need_ext(ctx, d)) { + if (!d) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); @@ -861,7 +904,7 @@ static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d, g_assert_not_reached(); } - if (cond_need_ext(ctx, d)) { + if (!d) { TCGv_i64 tmp = tcg_temp_new_i64(); if (ext_uns) { @@ -893,83 +936,50 @@ static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d, return do_log_cond(ctx, c * 2 + f, d, res); } -/* Similar, but for unit conditions. */ - -static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res, - TCGv_i64 in1, TCGv_i64 in2) +/* Similar, but for unit zero conditions. */ +static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res) { - DisasCond cond; - TCGv_i64 tmp, cb = NULL; + TCGv_i64 tmp; uint64_t d_repl = d ? 0x0000000100000001ull : 1; - - if (cf & 8) { - /* Since we want to test lots of carry-out bits all at once, do not - * do our normal thing and compute carry-in of bit B+1 since that - * leaves us with carry bits spread across two words. - */ - cb = tcg_temp_new_i64(); - tmp = tcg_temp_new_i64(); - tcg_gen_or_i64(cb, in1, in2); - tcg_gen_and_i64(tmp, in1, in2); - tcg_gen_andc_i64(cb, cb, res); - tcg_gen_or_i64(cb, cb, tmp); - } + uint64_t ones = 0, sgns = 0; switch (cf >> 1) { - case 0: /* never / TR */ - case 1: /* undefined */ - case 5: /* undefined */ - cond = cond_make_f(); + case 1: /* SBW / NBW */ + if (d) { + ones = d_repl; + sgns = d_repl << 31; + } break; - case 2: /* SBZ / NBZ */ - /* See hasless(v,1) from - * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord - */ - tmp = tcg_temp_new_i64(); - tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u); - tcg_gen_andc_i64(tmp, tmp, res); - tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u); - cond = cond_make_0(TCG_COND_NE, tmp); + ones = d_repl * 0x01010101u; + sgns = ones << 7; break; - case 3: /* SHZ / NHZ */ - tmp = tcg_temp_new_i64(); - tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u); - tcg_gen_andc_i64(tmp, tmp, res); - tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u); - cond = cond_make_0(TCG_COND_NE, tmp); - break; - - case 4: /* SDC / NDC */ - tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u); - cond = cond_make_0(TCG_COND_NE, cb); - break; - - case 6: /* SBC / NBC */ - tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u); - cond = cond_make_0(TCG_COND_NE, cb); - break; - - case 7: /* SHC / NHC */ - tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u); - cond = cond_make_0(TCG_COND_NE, cb); + ones = d_repl * 0x00010001u; + sgns = ones << 15; break; - - default: - g_assert_not_reached(); } - if (cf & 1) { - cond.c = tcg_invert_cond(cond.c); + if (ones == 0) { + /* Undefined, or 0/1 (never/always). */ + return cf & 1 ? cond_make_t() : cond_make_f(); } - return cond; + /* + * See hasless(v,1) from + * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord + */ + tmp = tcg_temp_new_i64(); + tcg_gen_subi_i64(tmp, res, ones); + tcg_gen_andc_i64(tmp, tmp, res); + tcg_gen_andi_i64(tmp, tmp, sgns); + + return cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, tmp); } static TCGv_i64 get_carry(DisasContext *ctx, bool d, TCGv_i64 cb, TCGv_i64 cb_msb) { - if (cond_need_ext(ctx, d)) { + if (!d) { TCGv_i64 t = tcg_temp_new_i64(); tcg_gen_extract_i64(t, cb, 32, 1); return t; @@ -984,7 +994,8 @@ static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d) /* Compute signed overflow for addition. */ static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res, - TCGv_i64 in1, TCGv_i64 in2) + TCGv_i64 in1, TCGv_i64 in2, + TCGv_i64 orig_in1, int shift, bool d) { TCGv_i64 sv = tcg_temp_new_i64(); TCGv_i64 tmp = tcg_temp_new_i64(); @@ -993,9 +1004,49 @@ static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res, tcg_gen_xor_i64(tmp, in1, in2); tcg_gen_andc_i64(sv, sv, tmp); + switch (shift) { + case 0: + break; + case 1: + /* Shift left by one and compare the sign. */ + tcg_gen_add_i64(tmp, orig_in1, orig_in1); + tcg_gen_xor_i64(tmp, tmp, orig_in1); + /* Incorporate into the overflow. */ + tcg_gen_or_i64(sv, sv, tmp); + break; + default: + { + int sign_bit = d ? 63 : 31; + + /* Compare the sign against all lower bits. */ + tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1); + tcg_gen_xor_i64(tmp, tmp, orig_in1); + /* + * If one of the bits shifting into or through the sign + * differs, then we have overflow. + */ + tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift); + tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero, + tcg_constant_i64(-1), sv); + } + } return sv; } +/* Compute unsigned overflow for addition. */ +static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb, + TCGv_i64 in1, int shift, bool d) +{ + if (shift == 0) { + return get_carry(ctx, d, cb, cb_msb); + } else { + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift); + tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb)); + return tmp; + } +} + /* Compute signed overflow for subtraction. */ static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res, TCGv_i64 in1, TCGv_i64 in2) @@ -1010,19 +1061,19 @@ static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res, return sv; } -static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1, +static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1, TCGv_i64 in2, unsigned shift, bool is_l, bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d) { - TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp; + TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp; unsigned c = cf >> 1; DisasCond cond; dest = tcg_temp_new_i64(); cb = NULL; cb_msb = NULL; - cb_cond = NULL; + in1 = orig_in1; if (shift) { tmp = tcg_temp_new_i64(); tcg_gen_shli_i64(tmp, in1, shift); @@ -1040,9 +1091,6 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1, } tcg_gen_xor_i64(cb, in1, in2); tcg_gen_xor_i64(cb, cb, dest); - if (cond_need_cb(c)) { - cb_cond = get_carry(ctx, d, cb, cb_msb); - } } else { tcg_gen_add_i64(dest, in1, in2); if (is_c) { @@ -1053,15 +1101,23 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1, /* Compute signed overflow if required. */ sv = NULL; if (is_tsv || cond_need_sv(c)) { - sv = do_add_sv(ctx, dest, in1, in2); + sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d); if (is_tsv) { - /* ??? Need to include overflow from shift. */ + if (!d) { + tcg_gen_ext32s_i64(sv, sv); + } gen_helper_tsv(tcg_env, sv); } } + /* Compute unsigned overflow if required. */ + uv = NULL; + if (cond_need_cb(c)) { + uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d); + } + /* Emit any conditional trap before any writeback. */ - cond = do_cond(ctx, cf, d, dest, cb_cond, sv); + cond = do_cond(ctx, cf, d, dest, uv, sv); if (is_tc) { tmp = tcg_temp_new_i64(); tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); @@ -1146,6 +1202,9 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1, if (is_tsv || cond_need_sv(c)) { sv = do_sub_sv(ctx, dest, in1, in2); if (is_tsv) { + if (!d) { + tcg_gen_ext32s_i64(sv, sv); + } gen_helper_tsv(tcg_env, sv); } } @@ -1260,34 +1319,86 @@ static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a, return nullify_end(ctx); } -static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1, - TCGv_i64 in2, unsigned cf, bool d, bool is_tc, - void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) +static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1, + TCGv_i64 in2, unsigned cf, bool d, + bool is_tc, bool is_add) { - TCGv_i64 dest; + TCGv_i64 dest = tcg_temp_new_i64(); + uint64_t test_cb = 0; DisasCond cond; - if (cf == 0) { - dest = dest_gpr(ctx, rt); - fn(dest, in1, in2); - save_gpr(ctx, rt, dest); - cond_free(&ctx->null_cond); - } else { - dest = tcg_temp_new_i64(); - fn(dest, in1, in2); + /* Select which carry-out bits to test. */ + switch (cf >> 1) { + case 4: /* NDC / SDC -- 4-bit carries */ + test_cb = dup_const(MO_8, 0x88); + break; + case 5: /* NWC / SWC -- 32-bit carries */ + if (d) { + test_cb = dup_const(MO_32, INT32_MIN); + } else { + cf &= 1; /* undefined -- map to never/always */ + } + break; + case 6: /* NBC / SBC -- 8-bit carries */ + test_cb = dup_const(MO_8, INT8_MIN); + break; + case 7: /* NHC / SHC -- 16-bit carries */ + test_cb = dup_const(MO_16, INT16_MIN); + break; + } + if (!d) { + test_cb = (uint32_t)test_cb; + } - cond = do_unit_cond(cf, d, dest, in1, in2); + if (!test_cb) { + /* No need to compute carries if we don't need to test them. */ + if (is_add) { + tcg_gen_add_i64(dest, in1, in2); + } else { + tcg_gen_sub_i64(dest, in1, in2); + } + cond = do_unit_zero_cond(cf, d, dest); + } else { + TCGv_i64 cb = tcg_temp_new_i64(); - if (is_tc) { - TCGv_i64 tmp = tcg_temp_new_i64(); - tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); - gen_helper_tcond(tcg_env, tmp); + if (d) { + TCGv_i64 cb_msb = tcg_temp_new_i64(); + if (is_add) { + tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); + tcg_gen_xor_i64(cb, in1, in2); + } else { + /* See do_sub, !is_b. */ + TCGv_i64 one = tcg_constant_i64(1); + tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero); + tcg_gen_eqv_i64(cb, in1, in2); + } + tcg_gen_xor_i64(cb, cb, dest); + tcg_gen_extract2_i64(cb, cb, cb_msb, 1); + } else { + if (is_add) { + tcg_gen_add_i64(dest, in1, in2); + tcg_gen_xor_i64(cb, in1, in2); + } else { + tcg_gen_sub_i64(dest, in1, in2); + tcg_gen_eqv_i64(cb, in1, in2); + } + tcg_gen_xor_i64(cb, cb, dest); + tcg_gen_shri_i64(cb, cb, 1); } - save_gpr(ctx, rt, dest); - cond_free(&ctx->null_cond); - ctx->null_cond = cond; + tcg_gen_andi_i64(cb, cb, test_cb); + cond = cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, cb); } + + if (is_tc) { + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); + gen_helper_tcond(tcg_env, tmp); + } + save_gpr(ctx, rt, dest); + + cond_free(&ctx->null_cond); + ctx->null_cond = cond; } #ifndef CONFIG_USER_ONLY @@ -1353,7 +1464,8 @@ static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs, *pofs = ofs; *pgva = addr = tcg_temp_new_i64(); - tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx)); + tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, + gva_offset_mask(ctx->tb_flags)); #ifndef CONFIG_USER_ONLY if (!is_phys) { tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base)); @@ -1961,7 +2073,7 @@ static bool trans_mfia(DisasContext *ctx, arg_mfia *a) { unsigned rt = a->t; TCGv_i64 tmp = dest_gpr(ctx, rt); - tcg_gen_movi_i64(tmp, ctx->iaoq_f); + tcg_gen_movi_i64(tmp, ctx->iaoq_f & ~3ULL); save_gpr(ctx, rt, tmp); cond_free(&ctx->null_cond); @@ -2005,11 +2117,9 @@ static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) nullify_over(ctx); tmp = dest_gpr(ctx, rt); if (translator_io_start(&ctx->base)) { - gen_helper_read_interval_timer(tmp); ctx->base.is_jmp = DISAS_IAQ_N_STALE; - } else { - gen_helper_read_interval_timer(tmp); } + gen_helper_read_interval_timer(tmp); save_gpr(ctx, rt, tmp); return nullify_end(ctx); case 26: @@ -2085,13 +2195,16 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) switch (ctl) { case CR_IT: + if (translator_io_start(&ctx->base)) { + ctx->base.is_jmp = DISAS_IAQ_N_STALE; + } gen_helper_write_interval_timer(tcg_env, reg); break; case CR_EIRR: + /* Helper modifies interrupt lines and is therefore IO. */ + translator_io_start(&ctx->base); gen_helper_write_eirr(tcg_env, reg); - break; - case CR_EIEM: - gen_helper_write_eiem(tcg_env, reg); + /* Exit to re-evaluate interrupts in the main loop. */ ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; break; @@ -2117,6 +2230,10 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) #endif break; + case CR_EIEM: + /* Exit to re-evaluate interrupts in the main loop. */ + ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; + /* FALLTHRU */ default: tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); break; @@ -2156,10 +2273,16 @@ static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) static bool trans_rsm(DisasContext *ctx, arg_rsm *a) { +#ifdef CONFIG_USER_ONLY CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); -#ifndef CONFIG_USER_ONLY +#else TCGv_i64 tmp; + /* HP-UX 11i and HP ODE use rsm for read-access to PSW */ + if (a->i) { + CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); + } + nullify_over(ctx); tmp = tcg_temp_new_i64(); @@ -2262,14 +2385,37 @@ static bool trans_reset(DisasContext *ctx, arg_reset *a) #endif } -static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) +static bool do_getshadowregs(DisasContext *ctx) { CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); -#ifndef CONFIG_USER_ONLY nullify_over(ctx); - gen_helper_getshadowregs(tcg_env); + tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0])); + tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1])); + tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2])); + tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3])); + tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4])); + tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5])); + tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6])); return nullify_end(ctx); -#endif +} + +static bool do_putshadowregs(DisasContext *ctx) +{ + CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); + nullify_over(ctx); + tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0])); + tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1])); + tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2])); + tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3])); + tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4])); + tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5])); + tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6])); + return nullify_end(ctx); +} + +static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) +{ + return do_getshadowregs(ctx); } static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) @@ -2287,6 +2433,13 @@ static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) return true; } +static bool trans_fic(DisasContext *ctx, arg_ldst *a) +{ + /* End TB for flush instruction cache, so we pick up new insns. */ + ctx->base.is_jmp = DISAS_IAQ_N_STALE; + return trans_nop_addrx(ctx, a); +} + static bool trans_probe(DisasContext *ctx, arg_probe *a) { TCGv_i64 dest, ofs; @@ -2659,14 +2812,24 @@ static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a) static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a) { - TCGv_i64 tcg_r1, tcg_r2; + TCGv_i64 tcg_r1, tcg_r2, dest; if (a->cf) { nullify_over(ctx); } + tcg_r1 = load_gpr(ctx, a->r1); tcg_r2 = load_gpr(ctx, a->r2); - do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64); + dest = dest_gpr(ctx, a->t); + + tcg_gen_xor_i64(dest, tcg_r1, tcg_r2); + save_gpr(ctx, a->t, dest); + + cond_free(&ctx->null_cond); + if (a->cf) { + ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest); + } + return nullify_end(ctx); } @@ -2674,14 +2837,34 @@ static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc) { TCGv_i64 tcg_r1, tcg_r2, tmp; - if (a->cf) { - nullify_over(ctx); + if (a->cf == 0) { + tcg_r2 = load_gpr(ctx, a->r2); + tmp = dest_gpr(ctx, a->t); + + if (a->r1 == 0) { + /* UADDCM r0,src,dst is the common idiom for dst = ~src. */ + tcg_gen_not_i64(tmp, tcg_r2); + } else { + /* + * Recall that r1 - r2 == r1 + ~r2 + 1. + * Thus r1 + ~r2 == r1 - r2 - 1, + * which does not require an extra temporary. + */ + tcg_r1 = load_gpr(ctx, a->r1); + tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2); + tcg_gen_subi_i64(tmp, tmp, 1); + } + save_gpr(ctx, a->t, tmp); + cond_free(&ctx->null_cond); + return true; } + + nullify_over(ctx); tcg_r1 = load_gpr(ctx, a->r1); tcg_r2 = load_gpr(ctx, a->r2); tmp = tcg_temp_new_i64(); tcg_gen_not_i64(tmp, tcg_r2); - do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64); + do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true); return nullify_end(ctx); } @@ -2702,14 +2885,14 @@ static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i) nullify_over(ctx); tmp = tcg_temp_new_i64(); - tcg_gen_shri_i64(tmp, cpu_psw_cb, 3); + tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4); if (!is_i) { tcg_gen_not_i64(tmp, tmp); } tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull); tcg_gen_muli_i64(tmp, tmp, 6); - do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false, - is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64); + do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp, + a->cf, a->d, false, is_i); return nullify_end(ctx); } @@ -2726,7 +2909,6 @@ static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a) static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) { TCGv_i64 dest, add1, add2, addc, in1, in2; - TCGv_i64 cout; nullify_over(ctx); @@ -2763,19 +2945,23 @@ static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) tcg_gen_xor_i64(cpu_psw_cb, add1, add2); tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest); - /* Write back PSW[V] for the division step. */ - cout = get_psw_carry(ctx, false); - tcg_gen_neg_i64(cpu_psw_v, cout); + /* + * Write back PSW[V] for the division step. + * Shift cb{8} from where it lives in bit 32 to bit 31, + * so that it overlaps r2{32} in bit 31. + */ + tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1); tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2); /* Install the new nullification. */ if (a->cf) { - TCGv_i64 sv = NULL; + TCGv_i64 sv = NULL, uv = NULL; if (cond_need_sv(a->cf >> 1)) { - /* ??? The lshift is supposed to contribute to overflow. */ - sv = do_add_sv(ctx, dest, add1, add2); + sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false); + } else if (cond_need_cb(a->cf >> 1)) { + uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false); } - ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv); + ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv); } return nullify_end(ctx); @@ -3079,7 +3265,7 @@ static bool trans_ldc(DisasContext *ctx, arg_ldst *a) dest = dest_gpr(ctx, a->t); } - form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0, + form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0, a->disp, a->sp, a->m, MMU_DISABLED(ctx)); /* @@ -3302,7 +3488,7 @@ static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1, tcg_gen_add_i64(dest, in1, in2); } if (cond_need_sv(c)) { - sv = do_add_sv(ctx, dest, in1, in2); + sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d); } cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv); @@ -3331,12 +3517,12 @@ static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) tmp = tcg_temp_new_i64(); tcg_r = load_gpr(ctx, a->r); - if (cond_need_ext(ctx, a->d)) { + if (a->d) { + tcg_gen_shl_i64(tmp, tcg_r, cpu_sar); + } else { /* Force shift into [32,63] */ tcg_gen_ori_i64(tmp, cpu_sar, 32); tcg_gen_shl_i64(tmp, tcg_r, tmp); - } else { - tcg_gen_shl_i64(tmp, tcg_r, cpu_sar); } cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); @@ -3353,7 +3539,7 @@ static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) tmp = tcg_temp_new_i64(); tcg_r = load_gpr(ctx, a->r); - p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0); + p = a->p | (a->d ? 0 : 32); tcg_gen_shli_i64(tmp, tcg_r, p); cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); @@ -3456,7 +3642,7 @@ static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a) /* Install the new nullification. */ cond_free(&ctx->null_cond); if (a->c) { - ctx->null_cond = do_sed_cond(ctx, a->c, false, dest); + ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); } return nullify_end(ctx); } @@ -3499,7 +3685,7 @@ static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a) /* Install the new nullification. */ cond_free(&ctx->null_cond); if (a->c) { - ctx->null_cond = do_sed_cond(ctx, a->c, false, dest); + ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); } return nullify_end(ctx); } @@ -3754,7 +3940,7 @@ static bool trans_be(DisasContext *ctx, arg_be *a) load_spr(ctx, new_spc, a->sp); if (a->l) { copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var); - tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f); + tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b); } if (a->n && use_nullify_skip(ctx)) { copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp); @@ -3762,6 +3948,7 @@ static bool trans_be(DisasContext *ctx, arg_be *a) copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); tcg_gen_mov_i64(cpu_iasq_f, new_spc); tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f); + nullify_set(ctx, 0); } else { copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); if (ctx->iaoq_b == -1) { @@ -3805,8 +3992,7 @@ static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) #ifndef CONFIG_USER_ONLY if (ctx->tb_flags & PSW_C) { - CPUHPPAState *env = cpu_env(ctx->cs); - int type = hppa_artype_for_page(env, ctx->base.pc_next); + int type = hppa_artype_for_page(cpu_env(ctx->cs), ctx->base.pc_next); /* If we could not find a TLB entry, then we need to generate an ITLB miss exception so the kernel will provide it. The resulting TLB fill operation will invalidate this TB and @@ -3818,7 +4004,7 @@ static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) } /* No change for non-gateway pages or for priv decrease. */ if (type >= 4 && type - 4 < ctx->privilege) { - dest = deposit32(dest, 0, 2, type - 4); + dest = deposit64(dest, 0, 2, type - 4); } } else { dest &= -4; /* priv = 0 */ @@ -4401,17 +4587,51 @@ static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) return nullify_end(ctx); } -static bool trans_diag(DisasContext *ctx, arg_diag *a) +/* Emulate PDC BTLB, called by SeaBIOS-hppa */ +static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a) { CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); #ifndef CONFIG_USER_ONLY - if (a->i == 0x100) { - /* emulate PDC BTLB, called by SeaBIOS-hppa */ - nullify_over(ctx); - gen_helper_diag_btlb(tcg_env); - return nullify_end(ctx); - } + nullify_over(ctx); + gen_helper_diag_btlb(tcg_env); + return nullify_end(ctx); +#endif +} + +/* Print char in %r26 to first serial console, used by SeaBIOS-hppa */ +static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a) +{ + CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); +#ifndef CONFIG_USER_ONLY + nullify_over(ctx); + gen_helper_diag_console_output(tcg_env); + return nullify_end(ctx); #endif +} + +static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a) +{ + return !ctx->is_pa20 && do_getshadowregs(ctx); +} + +static bool trans_diag_getshadowregs_pa2(DisasContext *ctx, arg_empty *a) +{ + return ctx->is_pa20 && do_getshadowregs(ctx); +} + +static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a) +{ + return !ctx->is_pa20 && do_putshadowregs(ctx); +} + +static bool trans_diag_putshadowregs_pa2(DisasContext *ctx, arg_empty *a) +{ + return ctx->is_pa20 && do_putshadowregs(ctx); +} + +static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a) +{ + CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i); return true; } @@ -4474,7 +4694,7 @@ static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) DisasContext *ctx = container_of(dcbase, DisasContext, base); tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0); - ctx->insn_start = tcg_last_op(); + ctx->insn_start_updated = false; } static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) @@ -4631,7 +4851,7 @@ static const TranslatorOps hppa_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext ctx; translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); diff --git a/target/i386/cpu-sysemu.c b/target/i386/cpu-sysemu.c index 2375e48178f..3f9093d2857 100644 --- a/target/i386/cpu-sysemu.c +++ b/target/i386/cpu-sysemu.c @@ -25,6 +25,7 @@ #include "qapi/error.h" #include "qapi/qapi-visit-run-state.h" #include "qapi/qmp/qdict.h" +#include "qapi/qobject-input-visitor.h" #include "qom/qom-qobject.h" #include "qapi/qapi-commands-machine-target.h" #include "hw/qdev-properties.h" @@ -129,20 +130,36 @@ static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) } } -static void object_apply_props(Object *obj, QDict *props, Error **errp) +static void object_apply_props(Object *obj, QObject *props, + const char *props_arg_name, Error **errp) { + Visitor *visitor; + QDict *qdict; const QDictEntry *prop; - for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { - if (!object_property_set_qobject(obj, qdict_entry_key(prop), - qdict_entry_value(prop), errp)) { - break; + visitor = qobject_input_visitor_new(props); + if (!visit_start_struct(visitor, props_arg_name, NULL, 0, errp)) { + visit_free(visitor); + return; + } + + qdict = qobject_to(QDict, props); + for (prop = qdict_first(qdict); prop; prop = qdict_next(qdict, prop)) { + if (!object_property_set(obj, qdict_entry_key(prop), + visitor, errp)) { + goto out; } } + + visit_check_struct(visitor, errp); +out: + visit_end_struct(visitor, NULL); + visit_free(visitor); } /* Create X86CPU object according to model+props specification */ -static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) +static X86CPU *x86_cpu_from_model(const char *model, QObject *props, + const char *props_arg_name, Error **errp) { X86CPU *xc = NULL; X86CPUClass *xcc; @@ -156,7 +173,7 @@ static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); if (props) { - object_apply_props(OBJECT(xc), props, &err); + object_apply_props(OBJECT(xc), props, props_arg_name, &err); if (err) { goto out; } @@ -187,8 +204,7 @@ qmp_query_cpu_model_expansion(CpuModelExpansionType type, QDict *props = NULL; const char *base_name; - xc = x86_cpu_from_model(model->name, qobject_to(QDict, model->props), - &err); + xc = x86_cpu_from_model(model->name, model->props, "model.props", &err); if (err) { goto out; } @@ -235,6 +251,16 @@ void cpu_clear_apic_feature(CPUX86State *env) env->features[FEAT_1_EDX] &= ~CPUID_APIC; } +void cpu_set_apic_feature(CPUX86State *env) +{ + env->features[FEAT_1_EDX] |= CPUID_APIC; +} + +bool cpu_has_x2apic_feature(CPUX86State *env) +{ + return env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC; +} + bool cpu_is_bsp(X86CPU *cpu) { return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; @@ -281,11 +307,17 @@ void x86_cpu_apic_create(X86CPU *cpu, Error **errp) OBJECT(cpu->apic_state)); object_unref(OBJECT(cpu->apic_state)); - qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); /* TODO: convert to link<> */ apic = APIC_COMMON(cpu->apic_state); apic->cpu = cpu; apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; + + /* + * apic_common_set_id needs to check if the CPU has x2APIC + * feature in case APIC ID >= 255, so we need to set apic->cpu + * before setting APIC ID + */ + qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); } void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 941e9209413..e4f25ffecaa 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -24,7 +24,6 @@ #include "qemu/hw-version.h" #include "cpu.h" #include "tcg/helper-tcg.h" -#include "sysemu/reset.h" #include "sysemu/hvf.h" #include "hvf/hvf-i386.h" #include "kvm/kvm_i386.h" @@ -37,6 +36,7 @@ #include "hw/qdev-properties.h" #include "hw/i386/topology.h" #ifndef CONFIG_USER_ONLY +#include "sysemu/reset.h" #include "qapi/qapi-commands-machine-target.h" #include "exec/address-spaces.h" #include "hw/boards.h" @@ -634,8 +634,8 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, * in CPL=3; remove them if they are ever implemented for system emulation. */ #if defined CONFIG_USER_ONLY -#define CPUID_EXT_KERNEL_FEATURES (CPUID_EXT_PCID | CPUID_EXT_TSC_DEADLINE_TIMER | \ - CPUID_EXT_X2APIC) +#define CPUID_EXT_KERNEL_FEATURES \ + (CPUID_EXT_PCID | CPUID_EXT_TSC_DEADLINE_TIMER) #else #define CPUID_EXT_KERNEL_FEATURES 0 #endif @@ -645,12 +645,13 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ CPUID_EXT_RDRAND | CPUID_EXT_AVX | CPUID_EXT_F16C | \ - CPUID_EXT_FMA | CPUID_EXT_KERNEL_FEATURES) + CPUID_EXT_FMA | CPUID_EXT_X2APIC | CPUID_EXT_KERNEL_FEATURES) /* missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, - CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER */ + CPUID_EXT_TSC_DEADLINE_TIMER + */ #ifdef TARGET_X86_64 #define TCG_EXT2_X86_64_FEATURES CPUID_EXT2_LM @@ -741,7 +742,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_7_0_EDX_FEATURES (CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_KERNEL_FEATURES) #define TCG_7_1_EAX_FEATURES (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | \ - CPUID_7_1_EAX_FSRC) + CPUID_7_1_EAX_FSRC | CPUID_7_1_EAX_CMPCCXADD) #define TCG_7_1_EDX_FEATURES 0 #define TCG_7_2_EDX_FEATURES 0 #define TCG_APM_FEATURES 0 @@ -859,7 +860,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .feat_names = { "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", - NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", + NULL, "kvm-pv-tlb-flush", "kvm-asyncpf-vmexit", "kvm-pv-ipi", "kvm-poll-control", "kvm-pv-sched-yield", "kvm-asyncpf-int", "kvm-msi-ext-dest-id", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -1747,8 +1748,7 @@ static char *x86_cpu_class_get_model_name(X86CPUClass *cc) { const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); - return g_strndup(class_name, - strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); + return cpu_model_from_type(class_name); } typedef struct X86CPUVersionDefinition { @@ -2182,7 +2182,7 @@ static const CPUCaches epyc_genoa_cache_info = { * Conceal VM entries from PT * Enable ENCLS exiting * Mode-based execute control (XS/XU) - s TSC scaling (Skylake Server and newer) + * TSC scaling (Skylake Server and newer) * GPA translation for PT (IceLake and newer) * User wait and pause * ENCLV exiting @@ -6415,6 +6415,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; } + /* + * If these are changed, they should stay in sync with + * x86_cpu_filter_features(). + */ if (count == 0) { *eax = INTEL_PT_MAX_SUBLEAF; *ebx = INTEL_PT_MINIMAL_EBX; @@ -6694,9 +6698,9 @@ static void x86_cpu_set_sgxlepubkeyhash(CPUX86State *env) static void x86_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - X86CPU *cpu = X86_CPU(s); - X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); + CPUState *cs = CPU(obj); + X86CPU *cpu = X86_CPU(cs); + X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); CPUX86State *env = &cpu->env; target_ulong cr4; uint64_t xcr0; @@ -6784,8 +6788,8 @@ static void x86_cpu_reset_hold(Object *obj) memset(env->dr, 0, sizeof(env->dr)); env->dr[6] = DR6_FIXED_1; env->dr[7] = DR7_FIXED_1; - cpu_breakpoint_remove_all(s, BP_CPU); - cpu_watchpoint_remove_all(s, BP_CPU); + cpu_breakpoint_remove_all(cs, BP_CPU); + cpu_watchpoint_remove_all(cs, BP_CPU); cr4 = 0; xcr0 = XSTATE_FP_MASK; @@ -6836,9 +6840,9 @@ static void x86_cpu_reset_hold(Object *obj) env->triple_fault_pending = false; #if !defined(CONFIG_USER_ONLY) /* We hard-wire the BSP to the first CPU. */ - apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); + apic_designate_bsp(cpu->apic_state, cs->cpu_index == 0); - s->halted = !cpu_is_bsp(cpu); + cs->halted = !cpu_is_bsp(cpu); if (kvm_enabled()) { kvm_arch_reset_vcpu(cpu); @@ -7159,7 +7163,12 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) mark_unavailable_features(cpu, w, unavailable_features, prefix); } - if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { + /* + * Check that KVM actually allows the processor tracing features that + * are advertised by cpu_x86_cpuid(). Keep these two in sync. + */ + if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && + kvm_enabled()) { uint32_t eax_0, ebx_0, ecx_0, edx_0_unused; uint32_t eax_1, ebx_1, ecx_1_unused, edx_1_unused; @@ -7227,8 +7236,8 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) static bool ht_warned; unsigned requested_lbr_fmt; +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) /* Use pc-relative instructions in system-mode */ -#ifndef CONFIG_USER_ONLY cs->tcg_cflags |= CF_PCREL; #endif @@ -7729,6 +7738,18 @@ static bool x86_cpu_has_work(CPUState *cs) return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; } +static int x86_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + CPUX86State *env = cpu_env(cs); + int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 0 : 1; + int mmu_index_base = + (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER64_IDX : + !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : + (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX; + + return mmu_index_base + mmu_index_32; +} + static void x86_disas_set_info(CPUState *cs, disassemble_info *info) { X86CPU *cpu = X86_CPU(cs); @@ -7963,6 +7984,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) cc->class_by_name = x86_cpu_class_by_name; cc->parse_features = x86_cpu_parse_featurestr; cc->has_work = x86_cpu_has_work; + cc->mmu_index = x86_cpu_mmu_index; cc->dump_state = x86_cpu_dump_state; cc->set_pc = x86_cpu_set_pc; cc->get_pc = x86_cpu_get_pc; @@ -7977,10 +7999,8 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) cc->gdb_arch_name = x86_gdb_arch_name; #ifdef TARGET_X86_64 cc->gdb_core_xml_file = "i386-64bit.xml"; - cc->gdb_num_core_regs = 66; #else cc->gdb_core_xml_file = "i386-32bit.xml"; - cc->gdb_num_core_regs = 50; #endif cc->disas_set_info = x86_disas_set_info; diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 2c6a6554d6b..bf0b5bac99d 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -379,6 +379,10 @@ typedef enum X86Seg { #define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_EXTD (1 << 10) #define MSR_IA32_APICBASE_BASE (0xfffffU<<12) +#define MSR_IA32_APICBASE_RESERVED \ + (~(uint64_t)(MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE \ + | MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_BASE)) + #define MSR_IA32_FEATURE_CONTROL 0x0000003a #define MSR_TSC_ADJUST 0x0000003b #define MSR_IA32_SPEC_CTRL 0x48 @@ -545,6 +549,9 @@ typedef enum X86Seg { #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 #define MSR_IA32_VMX_VMFUNC 0x00000491 +#define MSR_APIC_START 0x00000800 +#define MSR_APIC_END 0x000008ff + #define XSTATE_FP_BIT 0 #define XSTATE_SSE_BIT 1 #define XSTATE_YMM_BIT 2 @@ -1285,6 +1292,7 @@ typedef enum { CC_OP_NB, } CCOp; +QEMU_BUILD_BUG_ON(CC_OP_NB >= 128); typedef struct SegmentCache { uint32_t selector; @@ -2237,8 +2245,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); void cpu_clear_apic_feature(CPUX86State *env); +void cpu_set_apic_feature(CPUX86State *env); void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); +bool cpu_has_x2apic_feature(CPUX86State *env); /* helper.c */ void x86_cpu_set_a20(X86CPU *cpu, int a20_state); @@ -2291,17 +2301,31 @@ uint64_t cpu_get_tsc(CPUX86State *env); #define cpu_list x86_cpu_list /* MMU modes definitions */ -#define MMU_KSMAP_IDX 0 -#define MMU_USER_IDX 1 -#define MMU_KNOSMAP_IDX 2 -#define MMU_NESTED_IDX 3 -#define MMU_PHYS_IDX 4 +#define MMU_KSMAP64_IDX 0 +#define MMU_KSMAP32_IDX 1 +#define MMU_USER64_IDX 2 +#define MMU_USER32_IDX 3 +#define MMU_KNOSMAP64_IDX 4 +#define MMU_KNOSMAP32_IDX 5 +#define MMU_PHYS_IDX 6 +#define MMU_NESTED_IDX 7 + +#ifdef CONFIG_USER_ONLY +#ifdef TARGET_X86_64 +#define MMU_USER_IDX MMU_USER64_IDX +#else +#define MMU_USER_IDX MMU_USER32_IDX +#endif +#endif + +static inline bool is_mmu_index_smap(int mmu_index) +{ + return (mmu_index & ~1) == MMU_KSMAP64_IDX; +} -static inline int cpu_mmu_index(CPUX86State *env, bool ifetch) +static inline bool is_mmu_index_user(int mmu_index) { - return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : - (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) - ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; + return (mmu_index & ~1) == MMU_USER64_IDX; } static inline bool is_mmu_index_32(int mmu_index) @@ -2312,9 +2336,12 @@ static inline bool is_mmu_index_32(int mmu_index) static inline int cpu_mmu_index_kernel(CPUX86State *env) { - return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX : - ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) - ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; + int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1; + int mmu_index_base = + !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : + ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX; + + return mmu_index_base + mmu_index_32; } #define CC_DST (env->cc_dst) @@ -2352,13 +2379,13 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int flags); -uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); +uint32_t cpu_cc_compute_all(CPUX86State *env1); static inline uint32_t cpu_compute_eflags(CPUX86State *env) { uint32_t eflags = env->eflags; if (tcg_enabled()) { - eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); + eflags |= cpu_cc_compute_all(env) | (env->df & DF_MASK); } return eflags; } diff --git a/target/i386/helper.c b/target/i386/helper.c index 2070dd0dda1..23ccb23a5b4 100644 --- a/target/i386/helper.c +++ b/target/i386/helper.c @@ -430,7 +430,7 @@ static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data) if (need_reset) { emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET, ar, recursive); - monitor_puts(params->mon, msg); + monitor_printf(params->mon, "%s", msg); qemu_log_mask(CPU_LOG_RESET, "%s\n", msg); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); return; diff --git a/target/i386/hvf/README.md b/target/i386/hvf/README.md index 2d33477aca5..64a8935237c 100644 --- a/target/i386/hvf/README.md +++ b/target/i386/hvf/README.md @@ -4,4 +4,4 @@ These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desk 1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, xsave_buf) due to historical differences + QEMU needing to handle more emulation targets. 2. Removal of `apic_page` and hyperv-related functionality. -3. More relaxed use of `qemu_mutex_lock_iothread`. +3. More relaxed use of `bql_lock`. diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 20b9ca3ef51..1ed8ed5154a 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -429,9 +429,9 @@ int hvf_vcpu_exec(CPUState *cpu) } vmx_update_tpr(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) { - qemu_mutex_lock_iothread(); + bql_lock(); return EXCP_HLT; } @@ -450,7 +450,7 @@ int hvf_vcpu_exec(CPUState *cpu) rip = rreg(cpu->accel->fd, HV_X86_RIP); env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS); - qemu_mutex_lock_iothread(); + bql_lock(); update_apic_tpr(cpu); current_cpu = cpu; @@ -708,7 +708,7 @@ void hvf_arch_update_guest_debug(CPUState *cpu) { } -inline bool hvf_arch_supports_guest_debug(void) +bool hvf_arch_supports_guest_debug(void) { return false; } diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c index 8ceea6398e7..80e36136d04 100644 --- a/target/i386/hvf/x86.c +++ b/target/i386/hvf/x86.c @@ -46,7 +46,7 @@ return ar; }*/ -bool x86_read_segment_descriptor(struct CPUState *cpu, +bool x86_read_segment_descriptor(CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel) { @@ -76,7 +76,7 @@ bool x86_read_segment_descriptor(struct CPUState *cpu, return true; } -bool x86_write_segment_descriptor(struct CPUState *cpu, +bool x86_write_segment_descriptor(CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel) { @@ -99,7 +99,7 @@ bool x86_write_segment_descriptor(struct CPUState *cpu, return true; } -bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, +bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc, int gate) { target_ulong base = rvmcs(cpu->accel->fd, VMCS_GUEST_IDTR_BASE); @@ -115,30 +115,30 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, return true; } -bool x86_is_protected(struct CPUState *cpu) +bool x86_is_protected(CPUState *cpu) { uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0); return cr0 & CR0_PE_MASK; } -bool x86_is_real(struct CPUState *cpu) +bool x86_is_real(CPUState *cpu) { return !x86_is_protected(cpu); } -bool x86_is_v8086(struct CPUState *cpu) +bool x86_is_v8086(CPUState *cpu) { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; return x86_is_protected(cpu) && (env->eflags & VM_MASK); } -bool x86_is_long_mode(struct CPUState *cpu) +bool x86_is_long_mode(CPUState *cpu) { return rvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA; } -bool x86_is_long64_mode(struct CPUState *cpu) +bool x86_is_long64_mode(CPUState *cpu) { struct vmx_segment desc; vmx_read_segment_descriptor(cpu, &desc, R_CS); @@ -146,24 +146,24 @@ bool x86_is_long64_mode(struct CPUState *cpu) return x86_is_long_mode(cpu) && ((desc.ar >> 13) & 1); } -bool x86_is_paging_mode(struct CPUState *cpu) +bool x86_is_paging_mode(CPUState *cpu) { uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0); return cr0 & CR0_PG_MASK; } -bool x86_is_pae_enabled(struct CPUState *cpu) +bool x86_is_pae_enabled(CPUState *cpu) { uint64_t cr4 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR4); return cr4 & CR4_PAE_MASK; } -target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, X86Seg seg) +target_ulong linear_addr(CPUState *cpu, target_ulong addr, X86Seg seg) { return vmx_read_segment_base(cpu, seg) + addr; } -target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size, +target_ulong linear_addr_size(CPUState *cpu, target_ulong addr, int size, X86Seg seg) { switch (size) { @@ -179,7 +179,7 @@ target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size, return linear_addr(cpu, addr, seg); } -target_ulong linear_rip(struct CPUState *cpu, target_ulong rip) +target_ulong linear_rip(CPUState *cpu, target_ulong rip) { return linear_addr(cpu, rip, R_CS); } diff --git a/target/i386/hvf/x86.h b/target/i386/hvf/x86.h index 947b98da414..3570f29aa9d 100644 --- a/target/i386/hvf/x86.h +++ b/target/i386/hvf/x86.h @@ -248,30 +248,30 @@ typedef struct x68_segment_selector { #define BH(cpu) RH(cpu, R_EBX) /* deal with GDT/LDT descriptors in memory */ -bool x86_read_segment_descriptor(struct CPUState *cpu, +bool x86_read_segment_descriptor(CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel); -bool x86_write_segment_descriptor(struct CPUState *cpu, +bool x86_write_segment_descriptor(CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel); -bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, +bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc, int gate); /* helpers */ -bool x86_is_protected(struct CPUState *cpu); -bool x86_is_real(struct CPUState *cpu); -bool x86_is_v8086(struct CPUState *cpu); -bool x86_is_long_mode(struct CPUState *cpu); -bool x86_is_long64_mode(struct CPUState *cpu); -bool x86_is_paging_mode(struct CPUState *cpu); -bool x86_is_pae_enabled(struct CPUState *cpu); +bool x86_is_protected(CPUState *cpu); +bool x86_is_real(CPUState *cpu); +bool x86_is_v8086(CPUState *cpu); +bool x86_is_long_mode(CPUState *cpu); +bool x86_is_long64_mode(CPUState *cpu); +bool x86_is_paging_mode(CPUState *cpu); +bool x86_is_pae_enabled(CPUState *cpu); enum X86Seg; -target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, enum X86Seg seg); -target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size, +target_ulong linear_addr(CPUState *cpu, target_ulong addr, enum X86Seg seg); +target_ulong linear_addr_size(CPUState *cpu, target_ulong addr, int size, enum X86Seg seg); -target_ulong linear_rip(struct CPUState *cpu, target_ulong rip); +target_ulong linear_rip(CPUState *cpu, target_ulong rip); static inline uint64_t rdtscp(void) { diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c index c2d2e9ee844..f33836d6cba 100644 --- a/target/i386/hvf/x86_descr.c +++ b/target/i386/hvf/x86_descr.c @@ -67,12 +67,12 @@ x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg) return sel; } -void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, X86Seg seg) +void vmx_write_segment_selector(CPUState *cpu, x68_segment_selector selector, X86Seg seg) { wvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector, selector.sel); } -void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, X86Seg seg) +void vmx_read_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Seg seg) { desc->sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector); desc->base = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base); @@ -90,7 +90,9 @@ void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Se wvmcs(cpu->accel->fd, sf->ar_bytes, desc->ar); } -void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc) +void x86_segment_descriptor_to_vmx(CPUState *cpu, x68_segment_selector selector, + struct x86_segment_descriptor *desc, + struct vmx_segment *vmx_desc) { vmx_desc->sel = selector.sel; vmx_desc->base = x86_segment_base(desc); @@ -107,7 +109,8 @@ void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector se desc->type; } -void vmx_segment_to_x86_descriptor(struct CPUState *cpu, struct vmx_segment *vmx_desc, struct x86_segment_descriptor *desc) +void vmx_segment_to_x86_descriptor(CPUState *cpu, struct vmx_segment *vmx_desc, + struct x86_segment_descriptor *desc) { x86_set_segment_limit(desc, vmx_desc->limit); x86_set_segment_base(desc, vmx_desc->base); diff --git a/target/i386/hvf/x86_descr.h b/target/i386/hvf/x86_descr.h index c356932fa4a..9f06014b56a 100644 --- a/target/i386/hvf/x86_descr.h +++ b/target/i386/hvf/x86_descr.h @@ -29,29 +29,29 @@ typedef struct vmx_segment { } vmx_segment; /* deal with vmstate descriptors */ -void vmx_read_segment_descriptor(struct CPUState *cpu, +void vmx_read_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, enum X86Seg seg); void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, enum X86Seg seg); -x68_segment_selector vmx_read_segment_selector(struct CPUState *cpu, +x68_segment_selector vmx_read_segment_selector(CPUState *cpu, enum X86Seg seg); -void vmx_write_segment_selector(struct CPUState *cpu, +void vmx_write_segment_selector(CPUState *cpu, x68_segment_selector selector, enum X86Seg seg); -uint64_t vmx_read_segment_base(struct CPUState *cpu, enum X86Seg seg); -void vmx_write_segment_base(struct CPUState *cpu, enum X86Seg seg, +uint64_t vmx_read_segment_base(CPUState *cpu, enum X86Seg seg); +void vmx_write_segment_base(CPUState *cpu, enum X86Seg seg, uint64_t base); -void x86_segment_descriptor_to_vmx(struct CPUState *cpu, +void x86_segment_descriptor_to_vmx(CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc); uint32_t vmx_read_segment_limit(CPUState *cpu, enum X86Seg seg); uint32_t vmx_read_segment_ar(CPUState *cpu, enum X86Seg seg); -void vmx_segment_to_x86_descriptor(struct CPUState *cpu, +void vmx_segment_to_x86_descriptor(CPUState *cpu, struct vmx_segment *vmx_desc, struct x86_segment_descriptor *desc); diff --git a/target/i386/hvf/x86_emu.h b/target/i386/hvf/x86_emu.h index 4b846ba80ee..8bd97608c42 100644 --- a/target/i386/hvf/x86_emu.h +++ b/target/i386/hvf/x86_emu.h @@ -26,8 +26,8 @@ void init_emu(void); bool exec_instruction(CPUX86State *env, struct x86_decode *ins); -void load_regs(struct CPUState *cpu); -void store_regs(struct CPUState *cpu); +void load_regs(CPUState *cpu); +void store_regs(CPUState *cpu); void simulate_rdmsr(CPUX86State *env); void simulate_wrmsr(CPUX86State *env); diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c index 8cd08622a1e..649074a7d24 100644 --- a/target/i386/hvf/x86_mmu.c +++ b/target/i386/hvf/x86_mmu.c @@ -49,7 +49,7 @@ struct gpt_translation { bool exec_access; }; -static int gpt_top_level(struct CPUState *cpu, bool pae) +static int gpt_top_level(CPUState *cpu, bool pae) { if (!pae) { return 2; @@ -73,7 +73,7 @@ static inline int pte_size(bool pae) } -static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, +static bool get_pt_entry(CPUState *cpu, struct gpt_translation *pt, int level, bool pae) { int index; @@ -95,7 +95,7 @@ static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, } /* test page table entry */ -static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, +static bool test_pt_entry(CPUState *cpu, struct gpt_translation *pt, int level, bool *is_large, bool pae) { uint64_t pte = pt->pte[level]; @@ -166,7 +166,7 @@ static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae) -static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code, +static bool walk_gpt(CPUState *cpu, target_ulong addr, int err_code, struct gpt_translation *pt, bool pae) { int top_level, level; @@ -205,7 +205,7 @@ static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code, } -bool mmu_gva_to_gpa(struct CPUState *cpu, target_ulong gva, uint64_t *gpa) +bool mmu_gva_to_gpa(CPUState *cpu, target_ulong gva, uint64_t *gpa) { bool res; struct gpt_translation pt; @@ -225,7 +225,7 @@ bool mmu_gva_to_gpa(struct CPUState *cpu, target_ulong gva, uint64_t *gpa) return false; } -void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes) +void vmx_write_mem(CPUState *cpu, target_ulong gva, void *data, int bytes) { uint64_t gpa; @@ -246,7 +246,7 @@ void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes } } -void vmx_read_mem(struct CPUState *cpu, void *data, target_ulong gva, int bytes) +void vmx_read_mem(CPUState *cpu, void *data, target_ulong gva, int bytes) { uint64_t gpa; diff --git a/target/i386/hvf/x86_mmu.h b/target/i386/hvf/x86_mmu.h index 9ae8a548de3..9447ae072cd 100644 --- a/target/i386/hvf/x86_mmu.h +++ b/target/i386/hvf/x86_mmu.h @@ -36,9 +36,9 @@ #define MMU_PAGE_US (1 << 2) #define MMU_PAGE_NX (1 << 3) -bool mmu_gva_to_gpa(struct CPUState *cpu, target_ulong gva, uint64_t *gpa); +bool mmu_gva_to_gpa(CPUState *cpu, target_ulong gva, uint64_t *gpa); -void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes); -void vmx_read_mem(struct CPUState *cpu, void *data, target_ulong gva, int bytes); +void vmx_write_mem(CPUState *cpu, target_ulong gva, void *data, int bytes); +void vmx_read_mem(CPUState *cpu, void *data, target_ulong gva, int bytes); #endif /* X86_MMU_H */ diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 3b1ef5f49a8..be2c46246e9 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -408,7 +408,7 @@ bool hvf_inject_interrupts(CPUState *cs) if (!(env->hflags & HF_INHIBIT_IRQ_MASK) && (cs->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) { - int line = cpu_get_pic_interrupt(&x86cpu->env); + int line = cpu_get_pic_interrupt(env); cs->interrupt_request &= ~CPU_INTERRUPT_HARD; if (line >= 0) { wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, line | diff --git a/target/i386/kvm/hyperv-stub.c b/target/i386/kvm/hyperv-stub.c index 778ed782e6f..3263dcf05d3 100644 --- a/target/i386/kvm/hyperv-stub.c +++ b/target/i386/kvm/hyperv-stub.c @@ -52,3 +52,7 @@ void hyperv_x86_synic_reset(X86CPU *cpu) void hyperv_x86_synic_update(X86CPU *cpu) { } + +void hyperv_x86_set_vmbus_recommended_features_enabled(void) +{ +} diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c index e3ac978648b..f2a3fe650a1 100644 --- a/target/i386/kvm/hyperv.c +++ b/target/i386/kvm/hyperv.c @@ -45,9 +45,9 @@ void hyperv_x86_synic_update(X86CPU *cpu) static void async_synic_update(CPUState *cs, run_on_cpu_data data) { - qemu_mutex_lock_iothread(); + bql_lock(); hyperv_x86_synic_update(X86_CPU(cs)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) @@ -149,3 +149,8 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) return -1; } } + +void hyperv_x86_set_vmbus_recommended_features_enabled(void) +{ + hyperv_set_vmbus_recommended_features_enabled(); +} diff --git a/target/i386/kvm/hyperv.h b/target/i386/kvm/hyperv.h index 67543296c3a..e3982c8f4dd 100644 --- a/target/i386/kvm/hyperv.h +++ b/target/i386/kvm/hyperv.h @@ -26,4 +26,6 @@ int hyperv_x86_synic_add(X86CPU *cpu); void hyperv_x86_synic_reset(X86CPU *cpu); void hyperv_x86_synic_update(X86CPU *cpu); +void hyperv_x86_set_vmbus_recommended_features_enabled(void); + #endif diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index a0bc9ea7b19..e68cbe92930 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -1650,6 +1650,13 @@ static int hyperv_init_vcpu(X86CPU *cpu) } } + /* Skip SynIC and VP_INDEX since they are hard deps already */ + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_STIMER) && + hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) && + hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) { + hyperv_x86_set_vmbus_recommended_features_enabled(); + } + return 0; } @@ -1926,10 +1933,6 @@ int kvm_arch_init_vcpu(CPUState *cs) break; } - if (i == 0x1f && j == 64) { - break; - } - c->function = i; c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; c->index = j; @@ -4714,9 +4717,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) /* Inject NMI */ if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; - qemu_mutex_unlock_iothread(); + bql_unlock(); DPRINTF("injected NMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_NMI); if (ret < 0) { @@ -4725,9 +4728,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } } if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; - qemu_mutex_unlock_iothread(); + bql_unlock(); DPRINTF("injected SMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_SMI); if (ret < 0) { @@ -4738,7 +4741,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } if (!kvm_pic_in_kernel()) { - qemu_mutex_lock_iothread(); + bql_lock(); } /* Force the VCPU out of its inner loop to process any INIT requests @@ -4791,7 +4794,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) DPRINTF("setting tpr\n"); run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -4839,12 +4842,12 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) /* We need to protect the apic state against concurrent accesses from * different threads in case the userspace irqchip is used. */ if (!kvm_irqchip_in_kernel()) { - qemu_mutex_lock_iothread(); + bql_lock(); } cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); if (!kvm_irqchip_in_kernel()) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } return cpu_get_mem_attrs(env); } @@ -5278,17 +5281,17 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) switch (run->exit_reason) { case KVM_EXIT_HLT: DPRINTF("handle_hlt\n"); - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_handle_halt(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case KVM_EXIT_SET_TPR: ret = 0; break; case KVM_EXIT_TPR_ACCESS: - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_handle_tpr_access(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case KVM_EXIT_FAIL_ENTRY: code = run->fail_entry.hardware_entry_failure_reason; @@ -5314,9 +5317,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) break; case KVM_EXIT_DEBUG: DPRINTF("kvm_exit_debug\n"); - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_handle_debug(cpu, &run->debug.arch); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case KVM_EXIT_HYPERV: ret = kvm_hv_handle_exit(cpu, &run->hyperv); diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c index c0631f9cf43..fc2c2321acd 100644 --- a/target/i386/kvm/xen-emu.c +++ b/target/i386/kvm/xen-emu.c @@ -403,7 +403,7 @@ void kvm_xen_maybe_deassert_callback(CPUState *cs) /* If the evtchn_upcall_pending flag is cleared, turn the GSI off. */ if (!vi->evtchn_upcall_pending) { - qemu_mutex_lock_iothread(); + bql_lock(); /* * Check again now we have the lock, because it may have been * asserted in the interim. And we don't want to take the lock @@ -413,7 +413,7 @@ void kvm_xen_maybe_deassert_callback(CPUState *cs) X86_CPU(cs)->env.xen_callback_asserted = false; xen_evtchn_set_callback_level(0); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -581,7 +581,7 @@ static int xen_set_shared_info(uint64_t gfn) uint64_t gpa = gfn << TARGET_PAGE_BITS; int i, err; - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); /* * The xen_overlay device tells KVM about it too, since it had to @@ -773,9 +773,9 @@ static bool handle_set_param(struct kvm_xen_exit *exit, X86CPU *cpu, switch (hp.index) { case HVM_PARAM_CALLBACK_IRQ: - qemu_mutex_lock_iothread(); + bql_lock(); err = xen_evtchn_set_callback_param(hp.value); - qemu_mutex_unlock_iothread(); + bql_unlock(); xen_set_long_mode(exit->u.hcall.longmode); break; default: @@ -1408,7 +1408,7 @@ int kvm_xen_soft_reset(void) CPUState *cpu; int err; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); trace_kvm_xen_soft_reset(); @@ -1481,9 +1481,9 @@ static int schedop_shutdown(CPUState *cs, uint64_t arg) break; case SHUTDOWN_soft_reset: - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_xen_soft_reset(); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; default: diff --git a/target/i386/machine.c b/target/i386/machine.c index a1041ef828c..c3ae3208147 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -18,7 +18,7 @@ static const VMStateDescription vmstate_segment = { .name = "segment", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(selector, SegmentCache), VMSTATE_UINTTL(base, SegmentCache), VMSTATE_UINT32(limit, SegmentCache), @@ -43,7 +43,7 @@ static const VMStateDescription vmstate_xmm_reg = { .name = "xmm_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ZMM_Q(0), ZMMReg), VMSTATE_UINT64(ZMM_Q(1), ZMMReg), VMSTATE_END_OF_LIST() @@ -59,7 +59,7 @@ static const VMStateDescription vmstate_ymmh_reg = { .name = "ymmh_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ZMM_Q(2), ZMMReg), VMSTATE_UINT64(ZMM_Q(3), ZMMReg), VMSTATE_END_OF_LIST() @@ -74,7 +74,7 @@ static const VMStateDescription vmstate_zmmh_reg = { .name = "zmmh_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ZMM_Q(4), ZMMReg), VMSTATE_UINT64(ZMM_Q(5), ZMMReg), VMSTATE_UINT64(ZMM_Q(6), ZMMReg), @@ -92,7 +92,7 @@ static const VMStateDescription vmstate_hi16_zmm_reg = { .name = "hi16_zmm_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ZMM_Q(0), ZMMReg), VMSTATE_UINT64(ZMM_Q(1), ZMMReg), VMSTATE_UINT64(ZMM_Q(2), ZMMReg), @@ -114,7 +114,7 @@ static const VMStateDescription vmstate_bnd_regs = { .name = "bnd_regs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(lb, BNDReg), VMSTATE_UINT64(ub, BNDReg), VMSTATE_END_OF_LIST() @@ -128,7 +128,7 @@ static const VMStateDescription vmstate_mtrr_var = { .name = "mtrr_var", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(base, MTRRVar), VMSTATE_UINT64(mask, MTRRVar), VMSTATE_END_OF_LIST() @@ -142,7 +142,7 @@ static const VMStateDescription vmstate_lbr_records_var = { .name = "lbr_records_var", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(from, LBREntry), VMSTATE_UINT64(to, LBREntry), VMSTATE_UINT64(info, LBREntry), @@ -201,7 +201,7 @@ static const VMStateDescription vmstate_fpreg_tmp = { .name = "fpreg_tmp", .post_load = fpreg_post_load, .pre_save = fpreg_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp), VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp), VMSTATE_END_OF_LIST() @@ -210,7 +210,7 @@ static const VMStateDescription vmstate_fpreg_tmp = { static const VMStateDescription vmstate_fpreg = { .name = "fpreg", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp), VMSTATE_END_OF_LIST() } @@ -453,7 +453,7 @@ static const VMStateDescription vmstate_exception_info = { .version_id = 1, .minimum_version_id = 1, .needed = exception_info_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(env.exception_pending, X86CPU), VMSTATE_UINT8(env.exception_injected, X86CPU), VMSTATE_UINT8(env.exception_has_payload, X86CPU), @@ -475,7 +475,7 @@ static const VMStateDescription vmstate_steal_time_msr = { .version_id = 1, .minimum_version_id = 1, .needed = steal_time_msr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.steal_time_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -486,7 +486,7 @@ static const VMStateDescription vmstate_async_pf_msr = { .version_id = 1, .minimum_version_id = 1, .needed = async_pf_msr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.async_pf_en_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -497,7 +497,7 @@ static const VMStateDescription vmstate_async_pf_int_msr = { .version_id = 1, .minimum_version_id = 1, .needed = async_pf_int_msr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.async_pf_int_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -508,7 +508,7 @@ static const VMStateDescription vmstate_pv_eoi_msr = { .version_id = 1, .minimum_version_id = 1, .needed = pv_eoi_msr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -519,7 +519,7 @@ static const VMStateDescription vmstate_poll_control_msr = { .version_id = 1, .minimum_version_id = 1, .needed = poll_control_msr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.poll_control_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -538,7 +538,7 @@ static const VMStateDescription vmstate_fpop_ip_dp = { .version_id = 1, .minimum_version_id = 1, .needed = fpop_ip_dp_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(env.fpop, X86CPU), VMSTATE_UINT64(env.fpip, X86CPU), VMSTATE_UINT64(env.fpdp, X86CPU), @@ -559,7 +559,7 @@ static const VMStateDescription vmstate_msr_tsc_adjust = { .version_id = 1, .minimum_version_id = 1, .needed = tsc_adjust_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.tsc_adjust, X86CPU), VMSTATE_END_OF_LIST() } @@ -578,7 +578,7 @@ static const VMStateDescription vmstate_msr_smi_count = { .version_id = 1, .minimum_version_id = 1, .needed = msr_smi_count_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_smi_count, X86CPU), VMSTATE_END_OF_LIST() } @@ -597,7 +597,7 @@ static const VMStateDescription vmstate_msr_tscdeadline = { .version_id = 1, .minimum_version_id = 1, .needed = tscdeadline_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.tsc_deadline, X86CPU), VMSTATE_END_OF_LIST() } @@ -624,7 +624,7 @@ static const VMStateDescription vmstate_msr_ia32_misc_enable = { .version_id = 1, .minimum_version_id = 1, .needed = misc_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU), VMSTATE_END_OF_LIST() } @@ -635,7 +635,7 @@ static const VMStateDescription vmstate_msr_ia32_feature_control = { .version_id = 1, .minimum_version_id = 1, .needed = feature_control_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU), VMSTATE_END_OF_LIST() } @@ -670,7 +670,7 @@ static const VMStateDescription vmstate_msr_architectural_pmu = { .version_id = 1, .minimum_version_id = 1, .needed = pmu_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU), VMSTATE_UINT64(env.msr_global_ctrl, X86CPU), VMSTATE_UINT64(env.msr_global_status, X86CPU), @@ -706,7 +706,7 @@ static const VMStateDescription vmstate_mpx = { .version_id = 1, .minimum_version_id = 1, .needed = mpx_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4), VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU), VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU), @@ -728,7 +728,7 @@ static const VMStateDescription vmstate_msr_hyperv_hypercall = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_hypercall_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU), VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU), VMSTATE_END_OF_LIST() @@ -748,7 +748,7 @@ static const VMStateDescription vmstate_msr_hyperv_vapic = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_vapic_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_vapic, X86CPU), VMSTATE_END_OF_LIST() } @@ -767,7 +767,7 @@ static const VMStateDescription vmstate_msr_hyperv_time = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_time_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_tsc, X86CPU), VMSTATE_END_OF_LIST() } @@ -792,7 +792,7 @@ static const VMStateDescription vmstate_msr_hyperv_crash = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_crash_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS), VMSTATE_END_OF_LIST() } @@ -815,7 +815,7 @@ static const VMStateDescription vmstate_msr_hyperv_runtime = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_runtime_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_runtime, X86CPU), VMSTATE_END_OF_LIST() } @@ -855,7 +855,7 @@ static const VMStateDescription vmstate_msr_hyperv_synic = { .minimum_version_id = 1, .needed = hyperv_synic_enable_needed, .post_load = hyperv_synic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU), VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU), VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU), @@ -883,7 +883,7 @@ static const VMStateDescription vmstate_msr_hyperv_stimer = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_stimer_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU, HV_STIMER_COUNT), VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT), @@ -926,7 +926,7 @@ static const VMStateDescription vmstate_msr_hyperv_reenlightenment = { .minimum_version_id = 1, .needed = hyperv_reenlightenment_enable_needed, .post_load = hyperv_reenlightenment_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU), VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU), VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU), @@ -970,7 +970,7 @@ static const VMStateDescription vmstate_avx512 = { .version_id = 1, .minimum_version_id = 1, .needed = avx512_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS), VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0), #ifdef TARGET_X86_64 @@ -993,7 +993,7 @@ static const VMStateDescription vmstate_xss = { .version_id = 1, .minimum_version_id = 1, .needed = xss_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.xss, X86CPU), VMSTATE_END_OF_LIST() } @@ -1012,7 +1012,7 @@ static const VMStateDescription vmstate_umwait = { .version_id = 1, .minimum_version_id = 1, .needed = umwait_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.umwait, X86CPU), VMSTATE_END_OF_LIST() } @@ -1031,7 +1031,7 @@ static const VMStateDescription vmstate_pkru = { .version_id = 1, .minimum_version_id = 1, .needed = pkru_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32(env.pkru, X86CPU), VMSTATE_END_OF_LIST() } @@ -1050,7 +1050,7 @@ static const VMStateDescription vmstate_pkrs = { .version_id = 1, .minimum_version_id = 1, .needed = pkrs_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32(env.pkrs, X86CPU), VMSTATE_END_OF_LIST() } @@ -1070,7 +1070,7 @@ static const VMStateDescription vmstate_tsc_khz = { .version_id = 1, .minimum_version_id = 1, .needed = tsc_khz_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(env.tsc_khz, X86CPU), VMSTATE_END_OF_LIST() } @@ -1090,7 +1090,7 @@ static const VMStateDescription vmstate_vmx_vmcs12 = { .version_id = 1, .minimum_version_id = 1, .needed = vmx_vmcs12_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12, struct kvm_nested_state, KVM_STATE_NESTED_VMX_VMCS_SIZE), @@ -1110,7 +1110,7 @@ static const VMStateDescription vmstate_vmx_shadow_vmcs12 = { .version_id = 1, .minimum_version_id = 1, .needed = vmx_shadow_vmcs12_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12, struct kvm_nested_state, KVM_STATE_NESTED_VMX_VMCS_SIZE), @@ -1131,13 +1131,13 @@ static const VMStateDescription vmstate_vmx_nested_state = { .version_id = 1, .minimum_version_id = 1, .needed = vmx_nested_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state), VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state), VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_vmx_vmcs12, &vmstate_vmx_shadow_vmcs12, NULL, @@ -1162,7 +1162,7 @@ static const VMStateDescription vmstate_svm_nested_state = { .version_id = 1, .minimum_version_id = 1, .needed = svm_nested_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state), VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12, struct kvm_nested_state, @@ -1232,13 +1232,13 @@ static const VMStateDescription vmstate_kvm_nested_state = { .name = "cpu/kvm_nested_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_U16(flags, struct kvm_nested_state), VMSTATE_U16(format, struct kvm_nested_state), VMSTATE_U32(size, struct kvm_nested_state), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_vmx_nested_state, &vmstate_svm_nested_state, NULL @@ -1251,7 +1251,7 @@ static const VMStateDescription vmstate_nested_state = { .minimum_version_id = 1, .needed = nested_state_needed, .post_load = nested_state_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU, vmstate_kvm_nested_state, struct kvm_nested_state), @@ -1269,7 +1269,7 @@ static const VMStateDescription vmstate_xen_vcpu = { .version_id = 1, .minimum_version_id = 1, .needed = xen_vcpu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU), VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU), VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU), @@ -1295,7 +1295,7 @@ static const VMStateDescription vmstate_mcg_ext_ctl = { .version_id = 1, .minimum_version_id = 1, .needed = mcg_ext_ctl_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU), VMSTATE_END_OF_LIST() } @@ -1314,7 +1314,7 @@ static const VMStateDescription vmstate_spec_ctrl = { .version_id = 1, .minimum_version_id = 1, .needed = spec_ctrl_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT64(env.spec_ctrl, X86CPU), VMSTATE_END_OF_LIST() } @@ -1334,7 +1334,7 @@ static const VMStateDescription amd_tsc_scale_msr_ctrl = { .version_id = 1, .minimum_version_id = 1, .needed = amd_tsc_scale_msr_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT64(env.amd_tsc_scale_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -1367,7 +1367,7 @@ static const VMStateDescription vmstate_msr_intel_pt = { .version_id = 1, .minimum_version_id = 1, .needed = intel_pt_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU), VMSTATE_UINT64(env.msr_rtit_status, X86CPU), VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU), @@ -1391,7 +1391,7 @@ static const VMStateDescription vmstate_msr_virt_ssbd = { .version_id = 1, .minimum_version_id = 1, .needed = virt_ssbd_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT64(env.virt_ssbd, X86CPU), VMSTATE_END_OF_LIST() } @@ -1410,7 +1410,7 @@ static const VMStateDescription vmstate_svm_npt = { .version_id = 1, .minimum_version_id = 1, .needed = svm_npt_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT64(env.nested_cr3, X86CPU), VMSTATE_UINT32(env.nested_pg_mode, X86CPU), VMSTATE_END_OF_LIST() @@ -1430,7 +1430,7 @@ static const VMStateDescription vmstate_svm_guest = { .version_id = 1, .minimum_version_id = 1, .needed = svm_guest_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32(env.int_ctl, X86CPU), VMSTATE_END_OF_LIST() } @@ -1450,7 +1450,7 @@ static const VMStateDescription vmstate_efer32 = { .version_id = 1, .minimum_version_id = 1, .needed = intel_efer32_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.efer, X86CPU), VMSTATE_END_OF_LIST() } @@ -1470,7 +1470,7 @@ static const VMStateDescription vmstate_msr_tsx_ctrl = { .version_id = 1, .minimum_version_id = 1, .needed = msr_tsx_ctrl_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.tsx_ctrl, X86CPU), VMSTATE_END_OF_LIST() } @@ -1489,7 +1489,7 @@ static const VMStateDescription vmstate_msr_intel_sgx = { .version_id = 1, .minimum_version_id = 1, .needed = intel_sgx_msrs_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.msr_ia32_sgxlepubkeyhash, X86CPU, 4), VMSTATE_END_OF_LIST() } @@ -1517,7 +1517,7 @@ static const VMStateDescription vmstate_pdptrs = { .minimum_version_id = 1, .needed = pdptrs_needed, .post_load = pdptrs_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.pdptrs, X86CPU, 4), VMSTATE_END_OF_LIST() } @@ -1536,7 +1536,7 @@ static const VMStateDescription vmstate_msr_xfd = { .version_id = 1, .minimum_version_id = 1, .needed = xfd_msrs_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_xfd, X86CPU), VMSTATE_UINT64(env.msr_xfd_err, X86CPU), VMSTATE_END_OF_LIST() @@ -1557,7 +1557,7 @@ static const VMStateDescription vmstate_amx_xtile = { .version_id = 1, .minimum_version_id = 1, .needed = amx_xtile_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64), VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192), VMSTATE_END_OF_LIST() @@ -1578,7 +1578,7 @@ static const VMStateDescription vmstate_arch_lbr = { .version_id = 1, .minimum_version_id = 1, .needed = arch_lbr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_lbr_ctl, X86CPU), VMSTATE_UINT64(env.msr_lbr_depth, X86CPU), VMSTATE_LBR_VARS(env.lbr_records, X86CPU, ARCH_LBR_NR_ENTRIES, 1), @@ -1599,7 +1599,7 @@ static const VMStateDescription vmstate_triple_fault = { .version_id = 1, .minimum_version_id = 1, .needed = triple_fault_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(env.triple_fault_pending, X86CPU), VMSTATE_END_OF_LIST() } @@ -1611,7 +1611,7 @@ const VMStateDescription vmstate_x86_cpu = { .minimum_version_id = 11, .pre_save = cpu_pre_save, .post_load = cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS), VMSTATE_UINTTL(env.eip, X86CPU), VMSTATE_UINTTL(env.eflags, X86CPU), @@ -1699,7 +1699,7 @@ const VMStateDescription vmstate_x86_cpu = { VMSTATE_END_OF_LIST() /* The above list is not sorted /wrt version numbers, watch out! */ }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_exception_info, &vmstate_async_pf_msr, &vmstate_async_pf_int_msr, diff --git a/target/i386/monitor.c b/target/i386/monitor.c index 950ff9ccbc5..3a281dab027 100644 --- a/target/i386/monitor.c +++ b/target/i386/monitor.c @@ -33,7 +33,6 @@ #include "qapi/error.h" #include "qapi/qapi-commands-misc-target.h" #include "qapi/qapi-commands-misc.h" -#include "hw/i386/pc.h" /* Perform linear address sign extension */ static hwaddr addr_canonical(CPUArchState *env, hwaddr addr) diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c index 6c46101ac1a..6b2bfd9b9c3 100644 --- a/target/i386/nvmm/nvmm-accel-ops.c +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -25,7 +25,7 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); current_cpu = cpu; @@ -48,14 +48,14 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg) } } while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait_iothread(cpu->halt_cond); + qemu_cond_wait_bql(cpu->halt_cond); } qemu_wait_io_event_common(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); nvmm_destroy_vcpu(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 7d752bc5e00..49a3a3b9169 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -340,7 +340,6 @@ nvmm_get_registers(CPUState *cpu) static bool nvmm_can_take_int(CPUState *cpu) { - CPUX86State *env = cpu_env(cpu); AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_machine *mach = get_nvmm_mach(); @@ -349,7 +348,7 @@ nvmm_can_take_int(CPUState *cpu) return false; } - if (qcpu->int_shadow || !(env->eflags & IF_MASK)) { + if (qcpu->int_shadow || !(cpu_env(cpu)->eflags & IF_MASK)) { struct nvmm_x64_state *state = vcpu->state; /* Exit on interrupt window. */ @@ -399,7 +398,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) uint8_t tpr; int ret; - qemu_mutex_lock_iothread(); + bql_lock(); tpr = cpu_get_apic_tpr(x86_cpu->apic_state); if (tpr != qcpu->tpr) { @@ -462,7 +461,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* @@ -485,9 +484,9 @@ nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) tpr = exit->exitstate.cr8; if (qcpu->tpr != tpr) { qcpu->tpr = tpr; - qemu_mutex_lock_iothread(); + bql_lock(); cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -645,20 +644,19 @@ static int nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - CPUX86State *env = cpu_env(cpu); int ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && - (env->eflags & IF_MASK)) && + (cpu_env(cpu)->eflags & IF_MASK)) && !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { cpu->exception_index = EXCP_HLT; cpu->halted = true; ret = 1; } - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -721,7 +719,7 @@ nvmm_vcpu_loop(CPUState *cpu) return 0; } - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_start(cpu); /* @@ -806,16 +804,16 @@ nvmm_vcpu_loop(CPUState *cpu) error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]", exit->reason, exit->u.inv.hwcode); nvmm_get_registers(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_system_guest_panicked(cpu_get_crash_info(cpu)); - qemu_mutex_unlock_iothread(); + bql_unlock(); ret = -1; break; } } while (ret == 0); cpu_exec_end(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qatomic_set(&cpu->exit_request, false); diff --git a/target/i386/sev.c b/target/i386/sev.c index 9a712466825..72930ff0dcc 100644 --- a/target/i386/sev.c +++ b/target/i386/sev.c @@ -167,7 +167,7 @@ sev_ioctl(int fd, int cmd, void *data, int *error) input.id = cmd; input.sev_fd = fd; - input.data = (__u64)(unsigned long)data; + input.data = (uintptr_t)data; r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &input); @@ -240,7 +240,7 @@ sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size, return; } - range.addr = (__u64)(unsigned long)host; + range.addr = (uintptr_t)host; range.size = max_size; trace_kvm_memcrypt_register_region(host, max_size); @@ -270,7 +270,7 @@ sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size, return; } - range.addr = (__u64)(unsigned long)host; + range.addr = (uintptr_t)host; range.size = max_size; trace_kvm_memcrypt_unregister_region(host, max_size); @@ -767,7 +767,7 @@ sev_launch_update_data(SevGuestState *sev, uint8_t *addr, uint64_t len) return 1; } - update.uaddr = (__u64)(unsigned long)addr; + update.uaddr = (uintptr_t)addr; update.len = len; trace_kvm_sev_launch_update_data(addr, len); ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA, @@ -1044,6 +1044,7 @@ sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp) int sev_inject_launch_secret(const char *packet_hdr, const char *secret, uint64_t gpa, Error **errp) { + ERRP_GUARD(); struct kvm_sev_launch_secret input; g_autofree guchar *data = NULL, *hdr = NULL; int error, ret = 1; diff --git a/target/i386/tcg/cc_helper.c b/target/i386/tcg/cc_helper.c index c310bd842f1..f76e9cb8cfb 100644 --- a/target/i386/tcg/cc_helper.c +++ b/target/i386/tcg/cc_helper.c @@ -220,9 +220,9 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1, } } -uint32_t cpu_cc_compute_all(CPUX86State *env, int op) +uint32_t cpu_cc_compute_all(CPUX86State *env) { - return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op); + return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, CC_OP); } target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1, @@ -335,7 +335,7 @@ target_ulong helper_read_eflags(CPUX86State *env) { uint32_t eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); eflags |= (env->df & DF_MASK); eflags |= env->eflags & ~(VM_MASK | RF_MASK); return eflags; diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc index 2bdbb1bba0f..426c4594120 100644 --- a/target/i386/tcg/decode-new.c.inc +++ b/target/i386/tcg/decode-new.c.inc @@ -26,6 +26,13 @@ * size (X86_SIZE_*) codes used in the manual. There are a few differences * though. * + * Operand sizes + * ------------- + * + * The manual lists d64 ("cannot encode 32-bit size in 64-bit mode") and f64 + * ("cannot encode 16-bit or 32-bit size in 64-bit mode") as modifiers of the + * "v" or "z" sizes. The decoder simply makes them separate operand sizes. + * * Vector operands * --------------- * @@ -44,6 +51,11 @@ * if the difference is expressed via prefixes. Individual instructions * are separated by prefix in the generator functions. * + * There is a custom size "xh" used to address half of a SSE/AVX operand. + * This points to a 64-bit operand for SSE operations, 128-bit operand + * for 256-bit AVX operands, etc. It is used for conversion operations + * such as VCVTPH2PS or VCVTSS2SD. + * * There are a couple cases in which instructions (e.g. MOVD) write the * whole XMM or MM register but are established incorrectly in the manual * as "d" or "q". These have to be fixed for the decoder to work correctly. @@ -139,10 +151,13 @@ #define cpuid(feat) .cpuid = X86_FEAT_##feat, #define xchg .special = X86_SPECIAL_Locked, +#define lock .special = X86_SPECIAL_HasLock, #define mmx .special = X86_SPECIAL_MMX, -#define zext0 .special = X86_SPECIAL_ZExtOp0, -#define zext2 .special = X86_SPECIAL_ZExtOp2, +#define op0_Rd .special = X86_SPECIAL_Op0_Rd, +#define op2_Ry .special = X86_SPECIAL_Op2_Ry, #define avx_movx .special = X86_SPECIAL_AVXExtMov, +#define sextT0 .special = X86_SPECIAL_SExtT0, +#define zextT0 .special = X86_SPECIAL_ZExtT0, #define vex1 .vex_class = 1, #define vex1_rep3 .vex_class = 1, .vex_special = X86_VEX_REPScalar, @@ -523,6 +538,28 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0xdd] = X86_OP_ENTRY3(VAESENCLAST, V,x, H,x, W,x, vex4 cpuid(AES) p_66), [0xde] = X86_OP_ENTRY3(VAESDEC, V,x, H,x, W,x, vex4 cpuid(AES) p_66), [0xdf] = X86_OP_ENTRY3(VAESDECLAST, V,x, H,x, W,x, vex4 cpuid(AES) p_66), + + /* + * REG selects srcdest2 operand, VEX.vvvv selects src3. VEX class not found + * in manual, assumed to be 13 from the VEX.L0 constraint. + */ + [0xe0] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe1] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe2] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe3] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe4] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe5] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe6] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe7] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + + [0xe8] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe9] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xea] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xeb] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xec] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xed] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xee] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xef] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), }; /* five rows for no prefix, 66, F3, F2, 66+F2 */ @@ -558,8 +595,8 @@ static const X86OpEntry opcodes_0F38_F0toFF[16][5] = { [5] = { X86_OP_ENTRY3(BZHI, G,y, E,y, B,y, vex13 cpuid(BMI1)), {}, - X86_OP_ENTRY3(PEXT, G,y, B,y, E,y, vex13 cpuid(BMI2)), - X86_OP_ENTRY3(PDEP, G,y, B,y, E,y, vex13 cpuid(BMI2)), + X86_OP_ENTRY3(PEXT, G,y, B,y, E,y, vex13 zextT0 cpuid(BMI2)), + X86_OP_ENTRY3(PDEP, G,y, B,y, E,y, vex13 zextT0 cpuid(BMI2)), {}, }, [6] = { @@ -570,10 +607,10 @@ static const X86OpEntry opcodes_0F38_F0toFF[16][5] = { {}, }, [7] = { - X86_OP_ENTRY3(BEXTR, G,y, E,y, B,y, vex13 cpuid(BMI1)), + X86_OP_ENTRY3(BEXTR, G,y, E,y, B,y, vex13 zextT0 cpuid(BMI1)), X86_OP_ENTRY3(SHLX, G,y, E,y, B,y, vex13 cpuid(BMI1)), - X86_OP_ENTRY3(SARX, G,y, E,y, B,y, vex13 cpuid(BMI1)), - X86_OP_ENTRY3(SHRX, G,y, E,y, B,y, vex13 cpuid(BMI1)), + X86_OP_ENTRY3(SARX, G,y, E,y, B,y, vex13 sextT0 cpuid(BMI1)), + X86_OP_ENTRY3(SHRX, G,y, E,y, B,y, vex13 zextT0 cpuid(BMI1)), {}, }, }; @@ -619,13 +656,13 @@ static const X86OpEntry opcodes_0F3A[256] = { [0x05] = X86_OP_ENTRY3(VPERMILPD_i, V,x, W,x, I,b, vex6 chk(W0) cpuid(AVX) p_66), [0x06] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX) p_66), - [0x14] = X86_OP_ENTRY3(PEXTRB, E,b, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66), - [0x15] = X86_OP_ENTRY3(PEXTRW, E,w, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66), + [0x14] = X86_OP_ENTRY3(PEXTRB, E,b, V,dq, I,b, vex5 cpuid(SSE41) op0_Rd p_66), + [0x15] = X86_OP_ENTRY3(PEXTRW, E,w, V,dq, I,b, vex5 cpuid(SSE41) op0_Rd p_66), [0x16] = X86_OP_ENTRY3(PEXTR, E,y, V,dq, I,b, vex5 cpuid(SSE41) p_66), [0x17] = X86_OP_ENTRY3(VEXTRACTPS, E,d, V,dq, I,b, vex5 cpuid(SSE41) p_66), [0x1d] = X86_OP_ENTRY3(VCVTPS2PH, W,xh, V,x, I,b, vex11 chk(W0) cpuid(F16C) p_66), - [0x20] = X86_OP_ENTRY4(PINSRB, V,dq, H,dq, E,b, vex5 cpuid(SSE41) zext2 p_66), + [0x20] = X86_OP_ENTRY4(PINSRB, V,dq, H,dq, E,b, vex5 cpuid(SSE41) op2_Ry p_66), [0x21] = X86_OP_GROUP0(VINSERTPS), [0x22] = X86_OP_ENTRY4(PINSR, V,dq, H,dq, E,y, vex5 cpuid(SSE41) p_66), @@ -1091,10 +1128,6 @@ static int decode_modrm(DisasContext *s, CPUX86State *env, X86DecodedInsn *decod { int modrm = get_modrm(s, env); if ((modrm >> 6) == 3) { - if (s->prefix & PREFIX_LOCK) { - decode->e.gen = gen_illegal; - return 0xff; - } op->n = (modrm & 7); if (type != X86_TYPE_Q && type != X86_TYPE_N) { op->n |= REX_B(s); @@ -1201,6 +1234,8 @@ static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, case X86_TYPE_None: /* Implicit or absent */ case X86_TYPE_A: /* Implicit */ case X86_TYPE_F: /* EFLAGS/RFLAGS */ + case X86_TYPE_X: /* string source */ + case X86_TYPE_Y: /* string destination */ break; case X86_TYPE_B: /* VEX.vvvv selects a GPR */ @@ -1316,43 +1351,15 @@ static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, } case X86_TYPE_I: /* Immediate */ - op->unit = X86_OP_IMM; - decode->immediate = insn_get_signed(env, s, op->ot); - break; - case X86_TYPE_J: /* Relative offset for a jump */ op->unit = X86_OP_IMM; decode->immediate = insn_get_signed(env, s, op->ot); - decode->immediate += s->pc - s->cs_base; - if (s->dflag == MO_16) { - decode->immediate &= 0xffff; - } else if (!CODE64(s)) { - decode->immediate &= 0xffffffffu; - } break; case X86_TYPE_L: /* The upper 4 bits of the immediate select a 128-bit register */ op->n = insn_get(env, s, op->ot) >> 4; break; - case X86_TYPE_X: /* string source */ - op->n = -1; - decode->mem = (AddressParts) { - .def_seg = R_DS, - .base = R_ESI, - .index = -1, - }; - break; - - case X86_TYPE_Y: /* string destination */ - op->n = -1; - decode->mem = (AddressParts) { - .def_seg = R_ES, - .base = R_EDI, - .index = -1, - }; - break; - case X86_TYPE_2op: *op = decode->op[0]; break; @@ -1518,6 +1525,9 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid) return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_AVX2); case X86_FEAT_SHA_NI: return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SHA_NI); + + case X86_FEAT_CMPCCXADD: + return (s->cpuid_7_1_eax_features & CPUID_7_1_EAX_CMPCCXADD); } g_assert_not_reached(); } @@ -1677,6 +1687,7 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) bool first = true; X86DecodedInsn decode; X86DecodeFunc decode_func = decode_root; + uint8_t cc_live; s->has_modrm = false; @@ -1830,6 +1841,7 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) } memset(&decode, 0, sizeof(decode)); + decode.cc_op = -1; decode.b = b; if (!decode_insn(s, env, decode_func, &decode)) { goto illegal_op; @@ -1869,19 +1881,22 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) if (decode.op[0].has_ea) { s->prefix |= PREFIX_LOCK; } + decode.e.special = X86_SPECIAL_HasLock; + /* fallthrough */ + case X86_SPECIAL_HasLock: break; - case X86_SPECIAL_ZExtOp0: + case X86_SPECIAL_Op0_Rd: assert(decode.op[0].unit == X86_OP_INT); if (!decode.op[0].has_ea) { decode.op[0].ot = MO_32; } break; - case X86_SPECIAL_ZExtOp2: + case X86_SPECIAL_Op2_Ry: assert(decode.op[2].unit == X86_OP_INT); if (!decode.op[2].has_ea) { - decode.op[2].ot = MO_32; + decode.op[2].ot = s->dflag == MO_16 ? MO_32 : s->dflag; } break; @@ -1893,10 +1908,22 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) } break; + case X86_SPECIAL_SExtT0: + case X86_SPECIAL_ZExtT0: + /* Handled in gen_load. */ + assert(decode.op[1].unit == X86_OP_INT); + break; + default: break; } + if (s->prefix & PREFIX_LOCK) { + if (decode.e.special != X86_SPECIAL_HasLock || !decode.op[0].has_ea) { + goto illegal_op; + } + } + if (!validate_vex(s, &decode)) { return; } @@ -1940,9 +1967,6 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) gen_load_ea(s, &decode.mem, decode.e.vex_class == 12); } if (s->prefix & PREFIX_LOCK) { - if (decode.op[0].unit != X86_OP_INT || !decode.op[0].has_ea) { - goto illegal_op; - } gen_load(s, &decode, 2, s->T1); decode.e.gen(s, env, &decode); } else { @@ -1956,6 +1980,38 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) decode.e.gen(s, env, &decode); gen_writeback(s, &decode, 0, s->T0); } + + /* + * Write back flags after last memory access. Some newer ALU instructions, as + * well as SSE instructions, write flags in the gen_* function, but that can + * cause incorrect tracking of CC_OP for instructions that write to both memory + * and flags. + */ + if (decode.cc_op != -1) { + if (decode.cc_dst) { + tcg_gen_mov_tl(cpu_cc_dst, decode.cc_dst); + } + if (decode.cc_src) { + tcg_gen_mov_tl(cpu_cc_src, decode.cc_src); + } + if (decode.cc_src2) { + tcg_gen_mov_tl(cpu_cc_src2, decode.cc_src2); + } + if (decode.cc_op == CC_OP_DYNAMIC) { + tcg_gen_mov_i32(cpu_cc_op, decode.cc_op_dynamic); + } + set_cc_op(s, decode.cc_op); + cc_live = cc_op_live[decode.cc_op]; + } else { + cc_live = 0; + } + if (decode.cc_op != CC_OP_DYNAMIC) { + assert(!decode.cc_op_dynamic); + assert(!!decode.cc_dst == !!(cc_live & USES_CC_DST)); + assert(!!decode.cc_src == !!(cc_live & USES_CC_SRC)); + assert(!!decode.cc_src2 == !!(cc_live & USES_CC_SRC2)); + } + return; gp_fault: gen_exception_gpf(s); diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h index e6c904a3192..15e6bfef4b1 100644 --- a/target/i386/tcg/decode-new.h +++ b/target/i386/tcg/decode-new.h @@ -104,6 +104,7 @@ typedef enum X86CPUIDFeature { X86_FEAT_AVX2, X86_FEAT_BMI1, X86_FEAT_BMI2, + X86_FEAT_CMPCCXADD, X86_FEAT_F16C, X86_FEAT_FMA, X86_FEAT_MOVBE, @@ -158,15 +159,27 @@ typedef enum X86InsnCheck { typedef enum X86InsnSpecial { X86_SPECIAL_None, + /* Accepts LOCK prefix; LOCKed operations do not load or writeback operand 0 */ + X86_SPECIAL_HasLock, + /* Always locked if it has a memory operand (XCHG) */ X86_SPECIAL_Locked, /* - * Register operand 0/2 is zero extended to 32 bits. Rd/Mb or Rd/Mw - * in the manual. + * Rd/Mb or Rd/Mw in the manual: register operand 0 is treated as 32 bits + * (and writeback zero-extends it to 64 bits if applicable). PREFIX_DATA + * does not trigger 16-bit writeback and, as a side effect, high-byte + * registers are never used. + */ + X86_SPECIAL_Op0_Rd, + + /* + * Ry/Mb in the manual (PINSRB). However, the high bits are never used by + * the instruction in either the register or memory cases; the *real* effect + * of this modifier is that high-byte registers are never used, even without + * a REX prefix. Therefore, PINSRW does not need it despite having Ry/Mw. */ - X86_SPECIAL_ZExtOp0, - X86_SPECIAL_ZExtOp2, + X86_SPECIAL_Op2_Ry, /* * Register operand 2 is extended to full width, while a memory operand @@ -179,6 +192,10 @@ typedef enum X86InsnSpecial { * become P/P/Q/N, and size "x" becomes "q". */ X86_SPECIAL_MMX, + + /* When loaded into s->T0, register operand 1 is zero/sign extended. */ + X86_SPECIAL_SExtT0, + X86_SPECIAL_ZExtT0, } X86InsnSpecial; /* @@ -267,6 +284,10 @@ struct X86DecodedInsn { target_ulong immediate; AddressParts mem; + TCGv cc_dst, cc_src, cc_src2; + TCGv_i32 cc_op_dynamic; + int8_t cc_op; + uint8_t b; }; diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc index 82da5488d47..6bcf88ecd71 100644 --- a/target/i386/tcg/emit.c.inc +++ b/target/i386/tcg/emit.c.inc @@ -55,11 +55,6 @@ static void gen_NM_exception(DisasContext *s) gen_exception(s, EXCP07_PREX); } -static void gen_illegal(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) -{ - gen_illegal_opcode(s); -} - static void gen_load_ea(DisasContext *s, AddressParts *mem, bool is_vsib) { TCGv ea = gen_lea_modrm_1(s, *mem, is_vsib); @@ -237,9 +232,30 @@ static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v) break; case X86_OP_INT: if (op->has_ea) { - gen_op_ld_v(s, op->ot, v, s->A0); + if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) { + gen_op_ld_v(s, op->ot | MO_SIGN, v, s->A0); + } else { + gen_op_ld_v(s, op->ot, v, s->A0); + } + + } else if (op->ot == MO_8 && byte_reg_is_xH(s, op->n)) { + if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) { + tcg_gen_sextract_tl(v, cpu_regs[op->n - 4], 8, 8); + } else { + tcg_gen_extract_tl(v, cpu_regs[op->n - 4], 8, 8); + } + + } else if (op->ot < MO_TL && v == s->T0 && + (decode->e.special == X86_SPECIAL_SExtT0 || + decode->e.special == X86_SPECIAL_ZExtT0)) { + if (decode->e.special == X86_SPECIAL_SExtT0) { + tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot | MO_SIGN); + } else { + tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot); + } + } else { - gen_op_mov_v_reg(s, op->ot, v, op->n); + tcg_gen_mov_tl(v, cpu_regs[op->n]); } break; case X86_OP_IMM: @@ -323,6 +339,19 @@ static inline int vector_len(DisasContext *s, X86DecodedInsn *decode) return s->vex_l ? 32 : 16; } +static void prepare_update1_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op) +{ + decode->cc_dst = s->T0; + decode->cc_op = op; +} + +static void prepare_update2_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op) +{ + decode->cc_src = s->T1; + decode->cc_dst = s->T0; + decode->cc_op = op; +} + static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs) { MemOp ot = decode->op[0].ot; @@ -1011,6 +1040,7 @@ static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decod VSIB_AVX(VPGATHERD, vpgatherd) VSIB_AVX(VPGATHERQ, vpgatherq) +/* ADCX/ADOX do not have memory operands and can use set_cc_op. */ static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op) { int opposite_cc_op; @@ -1073,8 +1103,7 @@ static void gen_ANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) MemOp ot = decode->op[0].ot; tcg_gen_andc_tl(s->T0, s->T1, s->T0); - gen_op_update1_cc(s); - set_cc_op(s, CC_OP_LOGICB + ot); + prepare_update1_cc(decode, s, CC_OP_LOGICB + ot); } static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1089,9 +1118,6 @@ static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) * Shifts larger than operand size get zeros. */ tcg_gen_ext8u_tl(s->A0, s->T1); - if (TARGET_LONG_BITS == 64 && ot == MO_32) { - tcg_gen_ext32u_tl(s->T0, s->T0); - } tcg_gen_shr_tl(s->T0, s->T0, s->A0); tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero); @@ -1105,10 +1131,10 @@ static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) tcg_gen_movcond_tl(TCG_COND_LEU, s->T1, s->A0, bound, s->T1, zero); tcg_gen_andc_tl(s->T0, s->T0, s->T1); - gen_op_update1_cc(s); - set_cc_op(s, CC_OP_LOGICB + ot); + prepare_update1_cc(decode, s, CC_OP_LOGICB + ot); } +/* BLSI do not have memory operands and can use set_cc_op. */ static void gen_BLSI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; @@ -1120,6 +1146,7 @@ static void gen_BLSI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) set_cc_op(s, CC_OP_BMILGB + ot); } +/* BLSMSK do not have memory operands and can use set_cc_op. */ static void gen_BLSMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; @@ -1131,6 +1158,7 @@ static void gen_BLSMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode set_cc_op(s, CC_OP_BMILGB + ot); } +/* BLSR do not have memory operands and can use set_cc_op. */ static void gen_BLSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; @@ -1151,18 +1179,119 @@ static void gen_BZHI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) tcg_gen_ext8u_tl(s->T1, s->T1); + tcg_gen_shl_tl(s->A0, mone, s->T1); + tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->T1, bound, s->A0, zero); + tcg_gen_andc_tl(s->T0, s->T0, s->A0); /* * Note that since we're using BMILG (in order to get O * cleared) we need to store the inverse into C. */ - tcg_gen_setcond_tl(TCG_COND_LEU, cpu_cc_src, s->T1, bound); + tcg_gen_setcond_tl(TCG_COND_LEU, s->T1, s->T1, bound); + prepare_update2_cc(decode, s, CC_OP_BMILGB + ot); +} + +static void gen_CMPccXADD(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGLabel *label_top = gen_new_label(); + TCGLabel *label_bottom = gen_new_label(); + TCGv oldv = tcg_temp_new(); + TCGv newv = tcg_temp_new(); + TCGv cmpv = tcg_temp_new(); + TCGCond cond; + + TCGv cmp_lhs, cmp_rhs; + MemOp ot, ot_full; + + int jcc_op = (decode->b >> 1) & 7; + static const TCGCond cond_table[8] = { + [JCC_O] = TCG_COND_LT, /* test sign bit by comparing against 0 */ + [JCC_B] = TCG_COND_LTU, + [JCC_Z] = TCG_COND_EQ, + [JCC_BE] = TCG_COND_LEU, + [JCC_S] = TCG_COND_LT, /* test sign bit by comparing against 0 */ + [JCC_P] = TCG_COND_EQ, /* even parity - tests low bit of popcount */ + [JCC_L] = TCG_COND_LT, + [JCC_LE] = TCG_COND_LE, + }; - tcg_gen_shl_tl(s->A0, mone, s->T1); - tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->T1, bound, s->A0, zero); - tcg_gen_andc_tl(s->T0, s->T0, s->A0); + cond = cond_table[jcc_op]; + if (decode->b & 1) { + cond = tcg_invert_cond(cond); + } - gen_op_update1_cc(s); - set_cc_op(s, CC_OP_BMILGB + ot); + ot = decode->op[0].ot; + ot_full = ot | MO_LE; + if (jcc_op >= JCC_S) { + /* + * Sign-extend values before subtracting for S, P (zero/sign extension + * does not matter there) L, LE and their inverses. + */ + ot_full |= MO_SIGN; + } + + /* + * cmpv will be moved to cc_src *after* cpu_regs[] is written back, so use + * tcg_gen_ext_tl instead of gen_ext_tl. + */ + tcg_gen_ext_tl(cmpv, cpu_regs[decode->op[1].n], ot_full); + + /* + * Cmpxchg loop starts here. + * - s->T1: addition operand (from decoder) + * - s->A0: dest address (from decoder) + * - s->cc_srcT: memory operand (lhs for comparison) + * - cmpv: rhs for comparison + */ + gen_set_label(label_top); + gen_op_ld_v(s, ot_full, s->cc_srcT, s->A0); + tcg_gen_sub_tl(s->T0, s->cc_srcT, cmpv); + + /* Compute the comparison result by hand, to avoid clobbering cc_*. */ + switch (jcc_op) { + case JCC_O: + /* (src1 ^ src2) & (src1 ^ dst). newv is only used here for a moment */ + tcg_gen_xor_tl(newv, s->cc_srcT, s->T0); + tcg_gen_xor_tl(s->tmp0, s->cc_srcT, cmpv); + tcg_gen_and_tl(s->tmp0, s->tmp0, newv); + tcg_gen_sextract_tl(s->tmp0, s->tmp0, 0, 8 << ot); + cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0); + break; + + case JCC_P: + tcg_gen_ext8u_tl(s->tmp0, s->T0); + tcg_gen_ctpop_tl(s->tmp0, s->tmp0); + tcg_gen_andi_tl(s->tmp0, s->tmp0, 1); + cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0); + break; + + case JCC_S: + tcg_gen_sextract_tl(s->tmp0, s->T0, 0, 8 << ot); + cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0); + break; + + default: + cmp_lhs = s->cc_srcT, cmp_rhs = cmpv; + break; + } + + /* Compute new value: if condition does not hold, just store back s->cc_srcT */ + tcg_gen_add_tl(newv, s->cc_srcT, s->T1); + tcg_gen_movcond_tl(cond, newv, cmp_lhs, cmp_rhs, newv, s->cc_srcT); + tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, s->cc_srcT, newv, s->mem_index, ot_full); + + /* Exit unconditionally if cmpxchg succeeded. */ + tcg_gen_brcond_tl(TCG_COND_EQ, oldv, s->cc_srcT, label_bottom); + + /* Try again if there was actually a store to make. */ + tcg_gen_brcond_tl(cond, cmp_lhs, cmp_rhs, label_top); + gen_set_label(label_bottom); + + /* Store old value to registers only after a successful store. */ + gen_writeback(s, decode, 1, s->cc_srcT); + + decode->cc_dst = s->T0; + decode->cc_src = cmpv; + decode->cc_op = CC_OP_SUBB + ot; } static void gen_CRC32(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1242,9 +1371,7 @@ static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decod static void gen_MASKMOV(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { - tcg_gen_mov_tl(s->A0, cpu_regs[R_EDI]); - gen_extu(s->aflag, s->A0); - gen_add_A0_ds_seg(s); + gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_DS, s->override); if (s->prefix & PREFIX_DATA) { gen_helper_maskmov_xmm(tcg_env, OP_PTR1, OP_PTR2, s->A0); @@ -1355,7 +1482,8 @@ static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) /* low part of result in VEX.vvvv, high in MODRM */ switch (ot) { - default: + case MO_32: +#ifdef TARGET_X86_64 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32, @@ -1363,13 +1491,15 @@ static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32); tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32); break; -#ifdef TARGET_X86_64 + case MO_64: - tcg_gen_mulu2_i64(cpu_regs[s->vex_v], s->T0, s->T0, s->T1); - break; #endif - } + tcg_gen_mulu2_tl(cpu_regs[s->vex_v], s->T0, s->T0, s->T1); + break; + default: + g_assert_not_reached(); + } } static void gen_PALIGNR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1432,19 +1562,11 @@ static void gen_PCMPISTRM(DisasContext *s, CPUX86State *env, X86DecodedInsn *dec static void gen_PDEP(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { - MemOp ot = decode->op[1].ot; - if (ot < MO_64) { - tcg_gen_ext32u_tl(s->T0, s->T0); - } gen_helper_pdep(s->T0, s->T0, s->T1); } static void gen_PEXT(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { - MemOp ot = decode->op[1].ot; - if (ot < MO_64) { - tcg_gen_ext32u_tl(s->T0, s->T0); - } gen_helper_pext(s->T0, s->T0, s->T1); } @@ -1772,14 +1894,24 @@ static void gen_PSLLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco static void gen_RORX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; - int b = decode->immediate; + int mask = ot == MO_64 ? 63 : 31; + int b = decode->immediate & mask; - if (ot == MO_64) { - tcg_gen_rotri_tl(s->T0, s->T0, b & 63); - } else { + switch (ot) { + case MO_32: +#ifdef TARGET_X86_64 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b & 31); + tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b); tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); + break; + + case MO_64: +#endif + tcg_gen_rotri_tl(s->T0, s->T0, b); + break; + + default: + g_assert_not_reached(); } } @@ -1790,9 +1922,6 @@ static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) mask = ot == MO_64 ? 63 : 31; tcg_gen_andi_tl(s->T1, s->T1, mask); - if (ot != MO_64) { - tcg_gen_ext32s_tl(s->T0, s->T0); - } tcg_gen_sar_tl(s->T0, s->T0, s->T1); } @@ -1867,9 +1996,6 @@ static void gen_SHRX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) mask = ot == MO_64 ? 63 : 31; tcg_gen_andi_tl(s->T1, s->T1, mask); - if (ot != MO_64) { - tcg_gen_ext32u_tl(s->T0, s->T0); - } tcg_gen_shr_tl(s->T0, s->T0, s->T1); } diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c index 7c3c8dc7fe8..65e37ae2a0c 100644 --- a/target/i386/tcg/excp_helper.c +++ b/target/i386/tcg/excp_helper.c @@ -28,7 +28,7 @@ G_NORETURN void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) { - raise_interrupt(env, intno, 1, 0, next_eip_addend); + raise_interrupt(env, intno, next_eip_addend); } G_NORETURN void helper_raise_exception(CPUX86State *env, int exception_index) @@ -112,10 +112,9 @@ void raise_interrupt2(CPUX86State *env, int intno, /* shortcuts to generate exceptions */ -G_NORETURN void raise_interrupt(CPUX86State *env, int intno, int is_int, - int error_code, int next_eip_addend) +G_NORETURN void raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) { - raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0); + raise_interrupt2(env, intno, 1, 0, next_eip_addend, 0); } G_NORETURN void raise_exception_err(CPUX86State *env, int exception_index, diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c index 4430d3d380c..4b965a5d6c4 100644 --- a/target/i386/tcg/fpu_helper.c +++ b/target/i386/tcg/fpu_helper.c @@ -484,9 +484,8 @@ void helper_fcomi_ST0_FT0(CPUX86State *env) FloatRelation ret; ret = floatx80_compare(ST0, FT0, &env->fp_status); - eflags = cpu_cc_compute_all(env, CC_OP); - eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; - CC_SRC = eflags; + eflags = cpu_cc_compute_all(env) & ~(CC_Z | CC_P | CC_C); + CC_SRC = eflags | fcomi_ccval[ret + 1]; merge_exception_flags(env, old_flags); } @@ -497,9 +496,8 @@ void helper_fucomi_ST0_FT0(CPUX86State *env) FloatRelation ret; ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); - eflags = cpu_cc_compute_all(env, CC_OP); - eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; - CC_SRC = eflags; + eflags = cpu_cc_compute_all(env) & ~(CC_Z | CC_P | CC_C); + CC_SRC = eflags | fcomi_ccval[ret + 1]; merge_exception_flags(env, old_flags); } diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h index cd1723389ad..effc2c1c984 100644 --- a/target/i386/tcg/helper-tcg.h +++ b/target/i386/tcg/helper-tcg.h @@ -39,6 +39,8 @@ QEMU_BUILD_BUG_ON(TCG_PHYS_ADDR_BITS > TARGET_PHYS_ADDR_SPACE_BITS); */ void x86_cpu_do_interrupt(CPUState *cpu); #ifndef CONFIG_USER_ONLY +void x86_cpu_exec_halt(CPUState *cpu); +bool x86_need_replay_interrupt(int interrupt_request); bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); #endif @@ -65,8 +67,7 @@ G_NORETURN void raise_exception_err(CPUX86State *env, int exception_index, int error_code); G_NORETURN void raise_exception_err_ra(CPUX86State *env, int exception_index, int error_code, uintptr_t retaddr); -G_NORETURN void raise_interrupt(CPUX86State *nenv, int intno, int is_int, - int error_code, int next_eip_addend); +G_NORETURN void raise_interrupt(CPUX86State *nenv, int intno, int next_eip_addend); G_NORETURN void handle_unaligned_access(CPUX86State *env, vaddr vaddr, MMUAccessType access_type, uintptr_t retaddr); diff --git a/target/i386/tcg/int_helper.c b/target/i386/tcg/int_helper.c index 05418f181f1..ab85dc55400 100644 --- a/target/i386/tcg/int_helper.c +++ b/target/i386/tcg/int_helper.c @@ -190,7 +190,7 @@ void helper_aaa(CPUX86State *env) int al, ah, af; int eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); af = eflags & CC_A; al = env->regs[R_EAX] & 0xff; ah = (env->regs[R_EAX] >> 8) & 0xff; @@ -214,7 +214,7 @@ void helper_aas(CPUX86State *env) int al, ah, af; int eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); af = eflags & CC_A; al = env->regs[R_EAX] & 0xff; ah = (env->regs[R_EAX] >> 8) & 0xff; @@ -237,7 +237,7 @@ void helper_daa(CPUX86State *env) int old_al, al, af, cf; int eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); cf = eflags & CC_C; af = eflags & CC_A; old_al = al = env->regs[R_EAX] & 0xff; @@ -264,7 +264,7 @@ void helper_das(CPUX86State *env) int al, al1, af, cf; int eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); cf = eflags & CC_C; af = eflags & CC_A; al = env->regs[R_EAX] & 0xff; diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c index babff061864..b0f0f7b893b 100644 --- a/target/i386/tcg/misc_helper.c +++ b/target/i386/tcg/misc_helper.c @@ -41,9 +41,9 @@ void helper_into(CPUX86State *env, int next_eip_addend) { int eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); if (eflags & CC_O) { - raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend); + raise_interrupt(env, EXCP04_INTO, next_eip_addend); } } diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c index eb29a1fd4e7..34ccabd8ce3 100644 --- a/target/i386/tcg/seg_helper.c +++ b/target/i386/tcg/seg_helper.c @@ -2230,7 +2230,7 @@ target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) int rpl, dpl, cpl, type; selector = selector1 & 0xffff; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); if ((selector & 0xfffc) == 0) { goto fail; } @@ -2277,7 +2277,7 @@ target_ulong helper_lar(CPUX86State *env, target_ulong selector1) int rpl, dpl, cpl, type; selector = selector1 & 0xffff; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); if ((selector & 0xfffc) == 0) { goto fail; } @@ -2326,7 +2326,7 @@ void helper_verr(CPUX86State *env, target_ulong selector1) int rpl, dpl, cpl; selector = selector1 & 0xffff; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); if ((selector & 0xfffc) == 0) { goto fail; } @@ -2364,7 +2364,7 @@ void helper_verw(CPUX86State *env, target_ulong selector1) int rpl, dpl, cpl; selector = selector1 & 0xffff; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); if ((selector & 0xfffc) == 0) { goto fail; } diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c index e16d3a69d19..7a57b7dd10b 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -59,14 +59,14 @@ typedef struct PTETranslate { hwaddr gaddr; } PTETranslate; -static bool ptw_translate(PTETranslate *inout, hwaddr addr) +static bool ptw_translate(PTETranslate *inout, hwaddr addr, uint64_t ra) { CPUTLBEntryFull *full; int flags; inout->gaddr = addr; flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE, - inout->ptw_idx, true, &inout->haddr, &full, 0); + inout->ptw_idx, true, &inout->haddr, &full, ra); if (unlikely(flags & TLB_INVALID_MASK)) { TranslateFault *err = inout->err; @@ -82,20 +82,20 @@ static bool ptw_translate(PTETranslate *inout, hwaddr addr) return true; } -static inline uint32_t ptw_ldl(const PTETranslate *in) +static inline uint32_t ptw_ldl(const PTETranslate *in, uint64_t ra) { if (likely(in->haddr)) { return ldl_p(in->haddr); } - return cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0); + return cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra); } -static inline uint64_t ptw_ldq(const PTETranslate *in) +static inline uint64_t ptw_ldq(const PTETranslate *in, uint64_t ra) { if (likely(in->haddr)) { return ldq_p(in->haddr); } - return cpu_ldq_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0); + return cpu_ldq_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra); } /* @@ -132,11 +132,12 @@ static inline bool ptw_setl(const PTETranslate *in, uint32_t old, uint32_t set) } static bool mmu_translate(CPUX86State *env, const TranslateParams *in, - TranslateResult *out, TranslateFault *err) + TranslateResult *out, TranslateFault *err, + uint64_t ra) { const target_ulong addr = in->addr; const int pg_mode = in->pg_mode; - const bool is_user = (in->mmu_idx == MMU_USER_IDX); + const bool is_user = is_mmu_index_user(in->mmu_idx); const MMUAccessType access_type = in->access_type; uint64_t ptep, pte, rsvd_mask; PTETranslate pte_trans = { @@ -164,11 +165,11 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 5 */ pte_addr = (in->cr3 & ~0xfff) + (((addr >> 48) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr)) { + if (!ptw_translate(&pte_trans, pte_addr, ra)) { return false; } restart_5: - pte = ptw_ldq(&pte_trans); + pte = ptw_ldq(&pte_trans, ra); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } @@ -188,11 +189,11 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 4 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr)) { + if (!ptw_translate(&pte_trans, pte_addr, ra)) { return false; } restart_4: - pte = ptw_ldq(&pte_trans); + pte = ptw_ldq(&pte_trans, ra); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } @@ -208,11 +209,11 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 3 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr)) { + if (!ptw_translate(&pte_trans, pte_addr, ra)) { return false; } restart_3_lma: - pte = ptw_ldq(&pte_trans); + pte = ptw_ldq(&pte_trans, ra); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } @@ -235,12 +236,12 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 3 */ pte_addr = (in->cr3 & 0xffffffe0ULL) + ((addr >> 27) & 0x18); - if (!ptw_translate(&pte_trans, pte_addr)) { + if (!ptw_translate(&pte_trans, pte_addr, ra)) { return false; } rsvd_mask |= PG_HI_USER_MASK; restart_3_nolma: - pte = ptw_ldq(&pte_trans); + pte = ptw_ldq(&pte_trans, ra); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } @@ -257,11 +258,11 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 2 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr)) { + if (!ptw_translate(&pte_trans, pte_addr, ra)) { return false; } restart_2_pae: - pte = ptw_ldq(&pte_trans); + pte = ptw_ldq(&pte_trans, ra); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } @@ -283,10 +284,10 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 1 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr)) { + if (!ptw_translate(&pte_trans, pte_addr, ra)) { return false; } - pte = ptw_ldq(&pte_trans); + pte = ptw_ldq(&pte_trans, ra); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } @@ -301,11 +302,11 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 2 */ pte_addr = (in->cr3 & 0xfffff000ULL) + ((addr >> 20) & 0xffc); - if (!ptw_translate(&pte_trans, pte_addr)) { + if (!ptw_translate(&pte_trans, pte_addr, ra)) { return false; } restart_2_nopae: - pte = ptw_ldl(&pte_trans); + pte = ptw_ldl(&pte_trans, ra); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } @@ -330,10 +331,10 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 1 */ pte_addr = (pte & ~0xfffu) + ((addr >> 10) & 0xffc); - if (!ptw_translate(&pte_trans, pte_addr)) { + if (!ptw_translate(&pte_trans, pte_addr, ra)) { return false; } - pte = ptw_ldl(&pte_trans); + pte = ptw_ldl(&pte_trans, ra); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } @@ -357,7 +358,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, } int prot = 0; - if (in->mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) { + if (!is_mmu_index_smap(in->mmu_idx) || !(ptep & PG_USER_MASK)) { prot |= PAGE_READ; if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) { prot |= PAGE_WRITE; @@ -526,7 +527,8 @@ static G_NORETURN void raise_stage2(CPUX86State *env, TranslateFault *err, static bool get_physical_address(CPUX86State *env, vaddr addr, MMUAccessType access_type, int mmu_idx, - TranslateResult *out, TranslateFault *err) + TranslateResult *out, TranslateFault *err, + uint64_t ra) { TranslateParams in; bool use_stage2 = env->hflags2 & HF2_NPT_MASK; @@ -542,10 +544,11 @@ static bool get_physical_address(CPUX86State *env, vaddr addr, if (likely(use_stage2)) { in.cr3 = env->nested_cr3; in.pg_mode = env->nested_pg_mode; - in.mmu_idx = MMU_USER_IDX; + in.mmu_idx = + env->nested_pg_mode & PG_MODE_LMA ? MMU_USER64_IDX : MMU_USER32_IDX; in.ptw_idx = MMU_PHYS_IDX; - if (!mmu_translate(env, &in, out, err)) { + if (!mmu_translate(env, &in, out, err, ra)) { err->stage2 = S2_GPA; return false; } @@ -576,7 +579,7 @@ static bool get_physical_address(CPUX86State *env, vaddr addr, return false; } } - return mmu_translate(env, &in, out, err); + return mmu_translate(env, &in, out, err, ra); } break; } @@ -596,7 +599,8 @@ bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, TranslateResult out; TranslateFault err; - if (get_physical_address(env, addr, access_type, mmu_idx, &out, &err)) { + if (get_physical_address(env, addr, access_type, mmu_idx, &out, &err, + retaddr)) { /* * Even if 4MB pages, we map only one 4KB page in the cache to * avoid filling it too fast. diff --git a/target/i386/tcg/sysemu/fpu_helper.c b/target/i386/tcg/sysemu/fpu_helper.c index 93506cdd94e..e0305ba2345 100644 --- a/target/i386/tcg/sysemu/fpu_helper.c +++ b/target/i386/tcg/sysemu/fpu_helper.c @@ -32,9 +32,9 @@ void x86_register_ferr_irq(qemu_irq irq) void fpu_check_raise_ferr_irq(CPUX86State *env) { if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) { - qemu_mutex_lock_iothread(); + bql_lock(); qemu_irq_raise(ferr_irq); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } } @@ -49,7 +49,7 @@ void cpu_set_ignne(void) { CPUX86State *env = &X86_CPU(first_cpu)->env; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); env->hflags2 |= HF2_IGNNE_MASK; /* diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c index 1901712ecef..edb7c3d8940 100644 --- a/target/i386/tcg/sysemu/misc_helper.c +++ b/target/i386/tcg/sysemu/misc_helper.c @@ -25,6 +25,7 @@ #include "exec/address-spaces.h" #include "exec/exec-all.h" #include "tcg/helper-tcg.h" +#include "hw/i386/apic.h" void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) { @@ -118,9 +119,9 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) break; case 8: if (!(env->hflags2 & HF2_VINTR_MASK)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); - qemu_mutex_unlock_iothread(); + bql_unlock(); } env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK); @@ -157,9 +158,19 @@ void helper_wrmsr(CPUX86State *env) case MSR_IA32_SYSENTER_EIP: env->sysenter_eip = val; break; - case MSR_IA32_APICBASE: - cpu_set_apic_base(env_archcpu(env)->apic_state, val); + case MSR_IA32_APICBASE: { + int ret; + + if (val & MSR_IA32_APICBASE_RESERVED) { + goto error; + } + + ret = cpu_set_apic_base(env_archcpu(env)->apic_state, val); + if (ret < 0) { + goto error; + } break; + } case MSR_EFER: { uint64_t update_mask; @@ -292,6 +303,19 @@ void helper_wrmsr(CPUX86State *env) env->msr_bndcfgs = val; cpu_sync_bndcs_hflags(env); break; + case MSR_APIC_START ... MSR_APIC_END: { + int ret; + int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; + + bql_lock(); + ret = apic_msr_write(index, val); + bql_unlock(); + if (ret < 0) { + goto error; + } + + break; + } default: if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + @@ -458,6 +482,19 @@ void helper_rdmsr(CPUX86State *env) val = (cs->nr_threads * cs->nr_cores) | (cs->nr_cores << 16); break; } + case MSR_APIC_START ... MSR_APIC_END: { + int ret; + int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; + + bql_lock(); + ret = apic_msr_read(index, &val); + bql_unlock(); + if (ret < 0) { + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); + } + + break; + } default: if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + diff --git a/target/i386/tcg/sysemu/seg_helper.c b/target/i386/tcg/sysemu/seg_helper.c index 1cb5a0db45c..2db8083748e 100644 --- a/target/i386/tcg/sysemu/seg_helper.c +++ b/target/i386/tcg/sysemu/seg_helper.c @@ -20,6 +20,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" +#include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" @@ -127,6 +128,28 @@ void x86_cpu_do_interrupt(CPUState *cs) } } +void x86_cpu_exec_halt(CPUState *cpu) +{ + if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + X86CPU *x86_cpu = X86_CPU(cpu); + + bql_lock(); + apic_poll_irq(x86_cpu->apic_state); + cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); + bql_unlock(); + } +} + +bool x86_need_replay_interrupt(int interrupt_request) +{ + /* + * CPU_INTERRUPT_POLL is a virtual event which gets converted into a + * "real" interrupt event later. It does not need to be recorded for + * replay purposes. + */ + return !(interrupt_request & CPU_INTERRUPT_POLL); +} + bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { X86CPU *cpu = X86_CPU(cs); diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index 1d54164bdfa..cca19cd40e8 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -106,7 +106,7 @@ static bool x86_debug_check_breakpoint(CPUState *cs) #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps x86_tcg_ops = { +static const TCGCPUOps x86_tcg_ops = { .initialize = tcg_x86_init, .synchronize_from_tb = x86_cpu_synchronize_from_tb, .restore_state_to_opc = x86_restore_state_to_opc, @@ -119,25 +119,27 @@ static const struct TCGCPUOps x86_tcg_ops = { #else .tlb_fill = x86_cpu_tlb_fill, .do_interrupt = x86_cpu_do_interrupt, + .cpu_exec_halt = x86_cpu_exec_halt, .cpu_exec_interrupt = x86_cpu_exec_interrupt, .do_unaligned_access = x86_cpu_do_unaligned_access, .debug_excp_handler = breakpoint_handler, .debug_check_breakpoint = x86_debug_check_breakpoint, + .need_replay_interrupt = x86_need_replay_interrupt, #endif /* !CONFIG_USER_ONLY */ }; -static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) +static void x86_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) { /* for x86, all cpus use the same set of operations */ cc->tcg_ops = &x86_tcg_ops; } -static void tcg_cpu_class_init(CPUClass *cc) +static void x86_tcg_cpu_class_init(CPUClass *cc) { - cc->init_accel_cpu = tcg_cpu_init_ops; + cc->init_accel_cpu = x86_tcg_cpu_init_ops; } -static void tcg_cpu_xsave_init(void) +static void x86_tcg_cpu_xsave_init(void) { #define XO(bit, field) \ x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field); @@ -159,25 +161,25 @@ static void tcg_cpu_xsave_init(void) * TCG-specific defaults that override cpudef models when using TCG. * Only for builtin_x86_defs models initialized with x86_register_cpudef_types. */ -static PropValue tcg_default_props[] = { +static PropValue x86_tcg_default_props[] = { { "vme", "off" }, { NULL, NULL }, }; -static void tcg_cpu_instance_init(CPUState *cs) +static void x86_tcg_cpu_instance_init(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); if (xcc->model) { /* Special cases not set in the X86CPUDefinition structs: */ - x86_cpu_apply_props(cpu, tcg_default_props); + x86_cpu_apply_props(cpu, x86_tcg_default_props); } - tcg_cpu_xsave_init(); + x86_tcg_cpu_xsave_init(); } -static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) +static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); @@ -185,18 +187,18 @@ static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) acc->cpu_target_realize = tcg_cpu_realizefn; #endif /* CONFIG_USER_ONLY */ - acc->cpu_class_init = tcg_cpu_class_init; - acc->cpu_instance_init = tcg_cpu_instance_init; + acc->cpu_class_init = x86_tcg_cpu_class_init; + acc->cpu_instance_init = x86_tcg_cpu_instance_init; } -static const TypeInfo tcg_cpu_accel_type_info = { +static const TypeInfo x86_tcg_cpu_accel_type_info = { .name = ACCEL_CPU_NAME("tcg"), .parent = TYPE_ACCEL_CPU, - .class_init = tcg_cpu_accel_class_init, + .class_init = x86_tcg_cpu_accel_class_init, .abstract = true, }; -static void tcg_cpu_accel_register_types(void) +static void x86_tcg_cpu_accel_register_types(void) { - type_register_static(&tcg_cpu_accel_type_info); + type_register_static(&x86_tcg_cpu_accel_type_info); } -type_init(tcg_cpu_accel_register_types); +type_init(x86_tcg_cpu_accel_register_types); diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index f6fb2ebbddf..83b9fa49390 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -122,6 +122,7 @@ typedef struct DisasContext { int cpuid_ext3_features; int cpuid_7_0_ebx_features; int cpuid_7_0_ecx_features; + int cpuid_7_1_eax_features; int cpuid_xsave_features; /* TCG local temps */ @@ -138,6 +139,7 @@ typedef struct DisasContext { TCGv_i64 tmp1_i64; sigjmp_buf jmpbuf; + TCGOp *prev_insn_start; TCGOp *prev_insn_end; } DisasContext; @@ -522,9 +524,9 @@ void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) gen_op_mov_reg_v(s, size, reg, s->tmp0); } -static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg) +static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val) { - tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0); + tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val); gen_op_mov_reg_v(s, size, reg, s->tmp0); } @@ -635,17 +637,17 @@ static TCGv eip_cur_tl(DisasContext *s) } } -/* Compute SEG:REG into A0. SEG is selected from the override segment +/* Compute SEG:REG into DEST. SEG is selected from the override segment (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to indicate no override. */ -static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, - int def_seg, int ovr_seg) +static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0, + int def_seg, int ovr_seg) { switch (aflag) { #ifdef TARGET_X86_64 case MO_64: if (ovr_seg < 0) { - tcg_gen_mov_tl(s->A0, a0); + tcg_gen_mov_tl(dest, a0); return; } break; @@ -656,14 +658,14 @@ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, ovr_seg = def_seg; } if (ovr_seg < 0) { - tcg_gen_ext32u_tl(s->A0, a0); + tcg_gen_ext32u_tl(dest, a0); return; } break; case MO_16: /* 16 bit address */ - tcg_gen_ext16u_tl(s->A0, a0); - a0 = s->A0; + tcg_gen_ext16u_tl(dest, a0); + a0 = dest; if (ovr_seg < 0) { if (ADDSEG(s)) { ovr_seg = def_seg; @@ -680,17 +682,23 @@ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, TCGv seg = cpu_seg_base[ovr_seg]; if (aflag == MO_64) { - tcg_gen_add_tl(s->A0, a0, seg); + tcg_gen_add_tl(dest, a0, seg); } else if (CODE64(s)) { - tcg_gen_ext32u_tl(s->A0, a0); - tcg_gen_add_tl(s->A0, s->A0, seg); + tcg_gen_ext32u_tl(dest, a0); + tcg_gen_add_tl(dest, dest, seg); } else { - tcg_gen_add_tl(s->A0, a0, seg); - tcg_gen_ext32u_tl(s->A0, s->A0); + tcg_gen_add_tl(dest, a0, seg); + tcg_gen_ext32u_tl(dest, dest); } } } +static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, + int def_seg, int ovr_seg) +{ + gen_lea_v_seg_dest(s, aflag, s->A0, a0, def_seg, ovr_seg); +} + static inline void gen_string_movl_A0_ESI(DisasContext *s) { gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override); @@ -701,10 +709,12 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s) gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1); } -static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot) +static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot) { - tcg_gen_ld32s_tl(s->T0, tcg_env, offsetof(CPUX86State, df)); - tcg_gen_shli_tl(s->T0, s->T0, ot); + TCGv dshift = tcg_temp_new(); + tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df)); + tcg_gen_shli_tl(dshift, dshift, ot); + return dshift; }; static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) @@ -712,6 +722,9 @@ static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) if (size == MO_TL) { return src; } + if (!dst) { + dst = tcg_temp_new(); + } tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0)); return dst; } @@ -728,9 +741,9 @@ static void gen_exts(MemOp ot, TCGv reg) static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1) { - tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); - gen_extu(s->aflag, s->tmp0); - tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1); + TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false); + + tcg_gen_brcondi_tl(cond, tmp, 0, label1); } static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1) @@ -812,13 +825,16 @@ static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port, static void gen_movs(DisasContext *s, MemOp ot) { + TCGv dshift; + gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); gen_string_movl_A0_EDI(s); gen_op_st_v(s, ot, s->T0, s->A0); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_ESI); - gen_op_add_reg_T0(s, s->aflag, R_EDI); + + dshift = gen_compute_Dshift(s, ot); + gen_op_add_reg(s, s->aflag, R_ESI, dshift); + gen_op_add_reg(s, s->aflag, R_EDI, dshift); } static void gen_op_update1_cc(DisasContext *s) @@ -851,22 +867,22 @@ static void gen_op_update_neg_cc(DisasContext *s) tcg_gen_movi_tl(s->cc_srcT, 0); } -/* compute all eflags to cc_src */ -static void gen_compute_eflags(DisasContext *s) +/* compute all eflags to reg */ +static void gen_mov_eflags(DisasContext *s, TCGv reg) { - TCGv zero, dst, src1, src2; + TCGv dst, src1, src2; + TCGv_i32 cc_op; int live, dead; if (s->cc_op == CC_OP_EFLAGS) { + tcg_gen_mov_tl(reg, cpu_cc_src); return; } if (s->cc_op == CC_OP_CLR) { - tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P); - set_cc_op(s, CC_OP_EFLAGS); + tcg_gen_movi_tl(reg, CC_Z | CC_P); return; } - zero = NULL; dst = cpu_cc_dst; src1 = cpu_cc_src; src2 = cpu_cc_src2; @@ -875,7 +891,7 @@ static void gen_compute_eflags(DisasContext *s) live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); if (dead) { - zero = tcg_constant_tl(0); + TCGv zero = tcg_constant_tl(0); if (dead & USES_CC_DST) { dst = zero; } @@ -887,8 +903,18 @@ static void gen_compute_eflags(DisasContext *s) } } - gen_update_cc_op(s); - gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op); + if (s->cc_op != CC_OP_DYNAMIC) { + cc_op = tcg_constant_i32(s->cc_op); + } else { + cc_op = cpu_cc_op; + } + gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op); +} + +/* compute all eflags to cc_src */ +static void gen_compute_eflags(DisasContext *s) +{ + gen_mov_eflags(s, cpu_cc_src); set_cc_op(s, CC_OP_EFLAGS); } @@ -1020,6 +1046,9 @@ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg) case CC_OP_CLR: case CC_OP_POPCNT: return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; + case CC_OP_MULB ... CC_OP_MULQ: + return (CCPrepare) { .cond = TCG_COND_NE, + .reg = cpu_cc_src, .mask = -1 }; default: gen_compute_eflags(s); return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, @@ -1126,10 +1155,9 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) if (reg == cpu_cc_src) { reg = s->tmp0; } - tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ - tcg_gen_xor_tl(reg, reg, cpu_cc_src); + tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S); cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, - .mask = CC_S }; + .mask = CC_O }; break; default: case JCC_LE: @@ -1137,10 +1165,9 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) if (reg == cpu_cc_src) { reg = s->tmp0; } - tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ - tcg_gen_xor_tl(reg, reg, cpu_cc_src); + tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S); cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, - .mask = CC_S | CC_Z }; + .mask = CC_O | CC_Z }; break; } break; @@ -1239,11 +1266,9 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s) static void gen_stos(DisasContext *s, MemOp ot) { - gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); gen_string_movl_A0_EDI(s); gen_op_st_v(s, ot, s->T0, s->A0); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_EDI); + gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); } static void gen_lods(DisasContext *s, MemOp ot) @@ -1251,28 +1276,33 @@ static void gen_lods(DisasContext *s, MemOp ot) gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); gen_op_mov_reg_v(s, ot, R_EAX, s->T0); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_ESI); + gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot)); } static void gen_scas(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); - gen_op(s, OP_CMPL, ot, R_EAX); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_EDI); + tcg_gen_mov_tl(cpu_cc_src, s->T1); + tcg_gen_mov_tl(s->cc_srcT, s->T0); + tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1); + set_cc_op(s, CC_OP_SUBB + ot); + + gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); } static void gen_cmps(DisasContext *s, MemOp ot) { + TCGv dshift; + gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); gen_string_movl_A0_ESI(s); gen_op(s, OP_CMPL, ot, OR_TMP0); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_ESI); - gen_op_add_reg_T0(s, s->aflag, R_EDI); + + dshift = gen_compute_Dshift(s, ot); + gen_op_add_reg(s, s->aflag, R_ESI, dshift); + gen_op_add_reg(s, s->aflag, R_EDI, dshift); } static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) @@ -1300,8 +1330,7 @@ static void gen_ins(DisasContext *s, MemOp ot) tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); gen_helper_in_func(ot, s->T0, s->tmp2_i32); gen_op_st_v(s, ot, s->T0, s->A0); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_EDI); + gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); gen_bpt_io(s, s->tmp2_i32, ot); } @@ -1314,8 +1343,7 @@ static void gen_outs(DisasContext *s, MemOp ot) tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0); gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_ESI); + gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot)); gen_bpt_io(s, s->tmp2_i32, ot); } @@ -2475,14 +2503,10 @@ static void gen_jcc(DisasContext *s, int b, int diff) gen_jmp_rel(s, s->dflag, diff, 0); } -static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, - int modrm, int reg) +static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src) { - CCPrepare cc; + CCPrepare cc = gen_prepare_cc(s, b, s->T1); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - - cc = gen_prepare_cc(s, b, s->T1); if (cc.mask != -1) { TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cc.reg, cc.mask); @@ -2492,9 +2516,7 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, cc.reg2 = tcg_constant_tl(cc.imm); } - tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2, - s->T0, cpu_regs[reg]); - gen_op_mov_reg_v(s, ot, reg, s->T0); + tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest); } static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg) @@ -2561,7 +2583,7 @@ static void gen_push_v(DisasContext *s, TCGv val) if (!CODE64(s)) { if (ADDSEG(s)) { - new_esp = s->tmp4; + new_esp = tcg_temp_new(); tcg_gen_mov_tl(new_esp, s->A0); } gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); @@ -2576,8 +2598,8 @@ static MemOp gen_pop_T0(DisasContext *s) { MemOp d_ot = mo_pushpop(s, s->dflag); - gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1); - gen_op_ld_v(s, d_ot, s->T0, s->A0); + gen_lea_v_seg_dest(s, mo_stacksize(s), s->T0, cpu_regs[R_ESP], R_SS, -1); + gen_op_ld_v(s, d_ot, s->T0, s->T0); return d_ot; } @@ -3102,6 +3124,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) /* END TODO */ s->base.num_insns--; tcg_remove_ops_after(s->prev_insn_end); + s->base.insn_start = s->prev_insn_start; s->base.is_jmp = DISAS_TOO_MANY; return false; default: @@ -4190,7 +4213,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]); tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]); tcg_gen_add_tl(s->A0, s->A0, s->T0); - gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); gen_op_ld_v(s, MO_8, s->T0, s->A0); gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); @@ -4938,6 +4960,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) case 0xaa: /* stosS */ case 0xab: ot = mo_b_d(b, dflag); + gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_stos(s, ot); } else { @@ -4956,6 +4979,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) case 0xae: /* scasS */ case 0xaf: ot = mo_b_d(b, dflag); + gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); if (prefixes & PREFIX_REPNZ) { gen_repz_scas(s, ot, 1); } else if (prefixes & PREFIX_REPZ) { @@ -5236,7 +5260,9 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | REX_R(s); - gen_cmovcc1(env, s, ot, b, modrm, reg); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_cmovcc1(s, b ^ 1, s->T0, cpu_regs[reg]); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; /************************/ @@ -5869,7 +5895,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) gen_update_cc_op(s); gen_update_eip_cur(s); tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); - gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); gen_helper_monitor(tcg_env, s->A0); break; @@ -6969,13 +6994,14 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->cc_op_dirty = false; dc->popl_esp_hack = 0; /* select memory access functions */ - dc->mem_index = cpu_mmu_index(env, false); + dc->mem_index = cpu_mmu_index(cpu, false); dc->cpuid_features = env->features[FEAT_1_EDX]; dc->cpuid_ext_features = env->features[FEAT_1_ECX]; dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX]; + dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX]; dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) || (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))); @@ -7007,6 +7033,7 @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) DisasContext *dc = container_of(dcbase, DisasContext, base); target_ulong pc_arg = dc->base.pc_next; + dc->prev_insn_start = dc->base.insn_start; dc->prev_insn_end = tcg_last_op(); if (tb_cflags(dcbase->tb) & CF_PCREL) { pc_arg &= ~TARGET_PAGE_MASK; @@ -7101,7 +7128,7 @@ static const TranslatorOps i386_tr_ops = { /* generate intermediate code for basic block 'tb'. */ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc; diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index 67cad867207..189ae0f1406 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -25,7 +25,7 @@ static void *whpx_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); current_cpu = cpu; @@ -48,14 +48,14 @@ static void *whpx_cpu_thread_fn(void *arg) } } while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait_iothread(cpu->halt_cond); + qemu_cond_wait_bql(cpu->halt_cond); } qemu_wait_io_event_common(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); whpx_destroy_vcpu(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index d29ba916a0c..31eec7048c5 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -300,7 +300,6 @@ static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs) /* X64 Extended Control Registers */ static void whpx_set_xcrs(CPUState *cpu) { - CPUX86State *env = cpu_env(cpu); HRESULT hr; struct whpx_state *whpx = &whpx_global; WHV_REGISTER_VALUE xcr0; @@ -311,7 +310,7 @@ static void whpx_set_xcrs(CPUState *cpu) } /* Only xcr0 is supported by the hypervisor currently */ - xcr0.Reg64 = env->xcr0; + xcr0.Reg64 = cpu_env(cpu)->xcr0; hr = whp_dispatch.WHvSetVirtualProcessorRegisters( whpx->partition, cpu->cpu_index, &xcr0_name, 1, &xcr0); if (FAILED(hr)) { @@ -321,7 +320,6 @@ static void whpx_set_xcrs(CPUState *cpu) static int whpx_set_tsc(CPUState *cpu) { - CPUX86State *env = cpu_env(cpu); WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc; WHV_REGISTER_VALUE tsc_val; HRESULT hr; @@ -345,7 +343,7 @@ static int whpx_set_tsc(CPUState *cpu) } } - tsc_val.Reg64 = env->tsc; + tsc_val.Reg64 = cpu_env(cpu)->tsc; hr = whp_dispatch.WHvSetVirtualProcessorRegisters( whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val); if (FAILED(hr)) { @@ -556,7 +554,6 @@ static void whpx_set_registers(CPUState *cpu, int level) static int whpx_get_tsc(CPUState *cpu) { - CPUX86State *env = cpu_env(cpu); WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc; WHV_REGISTER_VALUE tsc_val; HRESULT hr; @@ -569,14 +566,13 @@ static int whpx_get_tsc(CPUState *cpu) return -1; } - env->tsc = tsc_val.Reg64; + cpu_env(cpu)->tsc = tsc_val.Reg64; return 0; } /* X64 Extended Control Registers */ static void whpx_get_xcrs(CPUState *cpu) { - CPUX86State *env = cpu_env(cpu); HRESULT hr; struct whpx_state *whpx = &whpx_global; WHV_REGISTER_VALUE xcr0; @@ -594,7 +590,7 @@ static void whpx_get_xcrs(CPUState *cpu) return; } - env->xcr0 = xcr0.Reg64; + cpu_env(cpu)->xcr0 = xcr0.Reg64; } static void whpx_get_registers(CPUState *cpu) @@ -1324,7 +1320,7 @@ static int whpx_first_vcpu_starting(CPUState *cpu) struct whpx_state *whpx = &whpx_global; HRESULT hr; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (!QTAILQ_EMPTY(&cpu->breakpoints) || (whpx->breakpoints.breakpoints && @@ -1400,8 +1396,7 @@ static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid) { if (cpu->vcpu_dirty) { /* The CPU registers have been modified by other parts of QEMU. */ - CPUArchState *env = cpu_env(cpu); - return env->eip; + return cpu_env(cpu)->eip; } else if (exit_context_valid) { /* * The CPU registers have not been modified by neither other parts @@ -1439,18 +1434,17 @@ static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid) static int whpx_handle_halt(CPUState *cpu) { - CPUX86State *env = cpu_env(cpu); int ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && - (env->eflags & IF_MASK)) && + (cpu_env(cpu)->eflags & IF_MASK)) && !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { cpu->exception_index = EXCP_HLT; cpu->halted = true; ret = 1; } - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -1472,7 +1466,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) memset(&new_int, 0, sizeof(new_int)); memset(reg_values, 0, sizeof(reg_values)); - qemu_mutex_lock_iothread(); + bql_lock(); /* Inject NMI */ if (!vcpu->interruption_pending && @@ -1563,7 +1557,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) reg_count += 1; } - qemu_mutex_unlock_iothread(); + bql_unlock(); vcpu->ready_for_pic_interrupt = false; if (reg_count) { @@ -1590,9 +1584,9 @@ static void whpx_vcpu_post_run(CPUState *cpu) uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8; if (vcpu->tpr != tpr) { vcpu->tpr = tpr; - qemu_mutex_lock_iothread(); + bql_lock(); cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(vcpu->tpr)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } vcpu->interruption_pending = @@ -1652,7 +1646,7 @@ static int whpx_vcpu_run(CPUState *cpu) WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE; int ret; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (whpx->running_cpus++ == 0) { /* Insert breakpoints into memory, update exception exit bitmap. */ @@ -1690,7 +1684,7 @@ static int whpx_vcpu_run(CPUState *cpu) } } - qemu_mutex_unlock_iothread(); + bql_unlock(); if (exclusive_step_mode != WHPX_STEP_NONE) { start_exclusive(); @@ -2028,9 +2022,9 @@ static int whpx_vcpu_run(CPUState *cpu) error_report("WHPX: Unexpected VP exit code %d", vcpu->exit_ctx.ExitReason); whpx_get_registers(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_system_guest_panicked(cpu_get_crash_info(cpu)); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; } @@ -2055,7 +2049,7 @@ static int whpx_vcpu_run(CPUState *cpu) cpu_exec_end(cpu); } - qemu_mutex_lock_iothread(); + bql_lock(); current_cpu = cpu; if (--whpx->running_cpus == 0) { diff --git a/target/i386/whpx/whpx-apic.c b/target/i386/whpx/whpx-apic.c index 8710e37567d..7e14ded9788 100644 --- a/target/i386/whpx/whpx-apic.c +++ b/target/i386/whpx/whpx-apic.c @@ -90,9 +90,10 @@ static void whpx_get_apic_state(APICCommonState *s, apic_next_timer(s, s->initial_count_load_time); } -static void whpx_apic_set_base(APICCommonState *s, uint64_t val) +static int whpx_apic_set_base(APICCommonState *s, uint64_t val) { s->apicbase = val; + return 0; } static void whpx_put_apic_base(CPUState *cpu, uint64_t val) diff --git a/target/loongarch/cpu-csr.h b/target/loongarch/cpu-csr.h index c59d7a9fcbc..0834e91f30e 100644 --- a/target/loongarch/cpu-csr.h +++ b/target/loongarch/cpu-csr.h @@ -67,6 +67,9 @@ FIELD(TLBENTRY, D, 1, 1) FIELD(TLBENTRY, PLV, 2, 2) FIELD(TLBENTRY, MAT, 4, 2) FIELD(TLBENTRY, G, 6, 1) +FIELD(TLBENTRY, HUGE, 6, 1) +FIELD(TLBENTRY, HGLOBAL, 12, 1) +FIELD(TLBENTRY, LEVEL, 13, 2) FIELD(TLBENTRY_32, PPN, 8, 24) FIELD(TLBENTRY_64, PPN, 12, 36) FIELD(TLBENTRY_64, NR, 61, 1) diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index fc075952e63..203a349055c 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -11,15 +11,25 @@ #include "qapi/error.h" #include "qemu/module.h" #include "sysemu/qtest.h" -#include "exec/cpu_ldst.h" +#include "sysemu/tcg.h" +#include "sysemu/kvm.h" +#include "kvm/kvm_loongarch.h" #include "exec/exec-all.h" #include "cpu.h" #include "internals.h" #include "fpu/softfloat-helpers.h" #include "cpu-csr.h" +#ifndef CONFIG_USER_ONLY #include "sysemu/reset.h" -#include "tcg/tcg.h" +#endif #include "vec.h" +#ifdef CONFIG_KVM +#include +#endif +#ifdef CONFIG_TCG +#include "exec/cpu_ldst.h" +#include "tcg/tcg.h" +#endif const char * const regnames[32] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -35,33 +45,45 @@ const char * const fregnames[32] = { "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", }; -static const char * const excp_names[] = { - [EXCCODE_INT] = "Interrupt", - [EXCCODE_PIL] = "Page invalid exception for load", - [EXCCODE_PIS] = "Page invalid exception for store", - [EXCCODE_PIF] = "Page invalid exception for fetch", - [EXCCODE_PME] = "Page modified exception", - [EXCCODE_PNR] = "Page Not Readable exception", - [EXCCODE_PNX] = "Page Not Executable exception", - [EXCCODE_PPI] = "Page Privilege error", - [EXCCODE_ADEF] = "Address error for instruction fetch", - [EXCCODE_ADEM] = "Address error for Memory access", - [EXCCODE_SYS] = "Syscall", - [EXCCODE_BRK] = "Break", - [EXCCODE_INE] = "Instruction Non-Existent", - [EXCCODE_IPE] = "Instruction privilege error", - [EXCCODE_FPD] = "Floating Point Disabled", - [EXCCODE_FPE] = "Floating Point Exception", - [EXCCODE_DBP] = "Debug breakpoint", - [EXCCODE_BCE] = "Bound Check Exception", - [EXCCODE_SXD] = "128 bit vector instructions Disable exception", - [EXCCODE_ASXD] = "256 bit vector instructions Disable exception", +struct TypeExcp { + int32_t exccode; + const char * const name; +}; + +static const struct TypeExcp excp_names[] = { + {EXCCODE_INT, "Interrupt"}, + {EXCCODE_PIL, "Page invalid exception for load"}, + {EXCCODE_PIS, "Page invalid exception for store"}, + {EXCCODE_PIF, "Page invalid exception for fetch"}, + {EXCCODE_PME, "Page modified exception"}, + {EXCCODE_PNR, "Page Not Readable exception"}, + {EXCCODE_PNX, "Page Not Executable exception"}, + {EXCCODE_PPI, "Page Privilege error"}, + {EXCCODE_ADEF, "Address error for instruction fetch"}, + {EXCCODE_ADEM, "Address error for Memory access"}, + {EXCCODE_SYS, "Syscall"}, + {EXCCODE_BRK, "Break"}, + {EXCCODE_INE, "Instruction Non-Existent"}, + {EXCCODE_IPE, "Instruction privilege error"}, + {EXCCODE_FPD, "Floating Point Disabled"}, + {EXCCODE_FPE, "Floating Point Exception"}, + {EXCCODE_DBP, "Debug breakpoint"}, + {EXCCODE_BCE, "Bound Check Exception"}, + {EXCCODE_SXD, "128 bit vector instructions Disable exception"}, + {EXCCODE_ASXD, "256 bit vector instructions Disable exception"}, + {EXCP_HLT, "EXCP_HLT"}, }; const char *loongarch_exception_name(int32_t exception) { - assert(excp_names[exception]); - return excp_names[exception]; + int i; + + for (i = 0; i < ARRAY_SIZE(excp_names); i++) { + if (excp_names[i].exccode == exception) { + return excp_names[i].name; + } + } + return "Unknown"; } void G_NORETURN do_raise_exception(CPULoongArchState *env, @@ -70,7 +92,7 @@ void G_NORETURN do_raise_exception(CPULoongArchState *env, { CPUState *cs = env_cpu(env); - qemu_log_mask(CPU_LOG_INT, "%s: %d (%s)\n", + qemu_log_mask(CPU_LOG_INT, "%s: expection: %d (%s)\n", __func__, exception, loongarch_exception_name(exception)); @@ -81,18 +103,12 @@ void G_NORETURN do_raise_exception(CPULoongArchState *env, static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; - - set_pc(env, value); + set_pc(cpu_env(cs), value); } static vaddr loongarch_cpu_get_pc(CPUState *cs) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; - - return env->pc; + return cpu_env(cs)->pc; } #ifndef CONFIG_USER_ONLY @@ -108,12 +124,15 @@ void loongarch_cpu_set_irq(void *opaque, int irq, int level) return; } - env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); - - if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { - cpu_interrupt(cs, CPU_INTERRUPT_HARD); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + if (kvm_enabled()) { + kvm_loongarch_set_interrupt(cpu, irq, level); + } else if (tcg_enabled()) { + env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); + if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } } } @@ -138,29 +157,25 @@ static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) return (pending & status) != 0; } +#endif +#ifdef CONFIG_TCG +#ifndef CONFIG_USER_ONLY static void loongarch_cpu_do_interrupt(CPUState *cs) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; + CPULoongArchState *env = cpu_env(cs); bool update_badinstr = 1; int cause = -1; - const char *name; bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR); uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS); if (cs->exception_index != EXCCODE_INT) { - if (cs->exception_index < 0 || - cs->exception_index >= ARRAY_SIZE(excp_names)) { - name = "unknown"; - } else { - name = excp_names[cs->exception_index]; - } - qemu_log_mask(CPU_LOG_INT, "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx - " TLBRERA " TARGET_FMT_lx " %s exception\n", __func__, - env->pc, env->CSR_ERA, env->CSR_TLBRERA, name); + " TLBRERA " TARGET_FMT_lx " exception: %d (%s)\n", + __func__, env->pc, env->CSR_ERA, env->CSR_TLBRERA, + cs->exception_index, + loongarch_exception_name(cs->exception_index)); } switch (cs->exception_index) { @@ -292,8 +307,7 @@ static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, MemTxResult response, uintptr_t retaddr) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; + CPULoongArchState *env = cpu_env(cs); if (access_type == MMU_INST_FETCH) { do_raise_exception(env, EXCCODE_ADEF, retaddr); @@ -305,8 +319,7 @@ static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; + CPULoongArchState *env = cpu_env(cs); if (cpu_loongarch_hw_interrupts_enabled(env) && cpu_loongarch_hw_interrupts_pending(env)) { @@ -320,25 +333,18 @@ static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } #endif -#ifdef CONFIG_TCG static void loongarch_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; - tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); - set_pc(env, tb->pc); + set_pc(cpu_env(cs), tb->pc); } static void loongarch_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; - - set_pc(env, data[0]); + set_pc(cpu_env(cs), data[0]); } #endif /* CONFIG_TCG */ @@ -347,12 +353,10 @@ static bool loongarch_cpu_has_work(CPUState *cs) #ifdef CONFIG_USER_ONLY return true; #else - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; bool has_work = false; if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && - cpu_loongarch_hw_interrupts_pending(env)) { + cpu_loongarch_hw_interrupts_pending(cpu_env(cs))) { has_work = true; } @@ -360,6 +364,16 @@ static bool loongarch_cpu_has_work(CPUState *cs) #endif } +static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + CPULoongArchState *env = cpu_env(cs); + + if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) { + return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV); + } + return MMU_DA_IDX; +} + static void loongarch_la464_initfn(Object *obj) { LoongArchCPU *cpu = LOONGARCH_CPU(obj); @@ -481,27 +495,11 @@ static void loongarch_max_initfn(Object *obj) loongarch_la464_initfn(obj); } -static void loongarch_cpu_list_entry(gpointer data, gpointer user_data) -{ - const char *typename = object_class_get_name(OBJECT_CLASS(data)); - - qemu_printf("%s\n", typename); -} - -void loongarch_cpu_list(void) -{ - GSList *list; - list = object_class_get_list_sorted(TYPE_LOONGARCH_CPU, false); - g_slist_foreach(list, loongarch_cpu_list_entry, NULL); - g_slist_free(list); -} - static void loongarch_cpu_reset_hold(Object *obj) { CPUState *cs = CPU(obj); - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(cpu); - CPULoongArchState *env = &cpu->env; + LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(obj); + CPULoongArchState *env = cpu_env(cs); if (lacc->parent_phases.hold) { lacc->parent_phases.hold(obj); @@ -531,10 +529,12 @@ static void loongarch_cpu_reset_hold(Object *obj) env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); + env->CSR_CPUID = cs->cpu_index; env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0); env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); + env->CSR_TID = cs->cpu_index; env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2); env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63); @@ -551,9 +551,14 @@ static void loongarch_cpu_reset_hold(Object *obj) #ifndef CONFIG_USER_ONLY env->pc = 0x1c000000; memset(env->tlb, 0, sizeof(env->tlb)); + if (kvm_enabled()) { + kvm_arch_reset_vcpu(env); + } #endif +#ifdef CONFIG_TCG restore_fp_status(env); +#endif cs->exception_index = -1; } @@ -582,47 +587,6 @@ static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) lacc->parent_realize(dev, errp); } -#ifndef CONFIG_USER_ONLY -static void loongarch_qemu_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ - qemu_log_mask(LOG_UNIMP, "[%s]: Unimplemented reg 0x%" HWADDR_PRIx "\n", - __func__, addr); -} - -static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) -{ - switch (addr) { - case VERSION_REG: - return 0x11ULL; - case FEATURE_REG: - return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | - 1ULL << IOCSRF_CSRIPI; - case VENDOR_REG: - return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ - case CPUNAME_REG: - return 0x303030354133ULL; /* "3A5000" */ - case MISC_FUNC_REG: - return 1ULL << IOCSRM_EXTIOI_EN; - } - return 0ULL; -} - -static const MemoryRegionOps loongarch_qemu_ops = { - .read = loongarch_qemu_read, - .write = loongarch_qemu_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 8, - }, - .impl = { - .min_access_size = 8, - .max_access_size = 8, - }, -}; -#endif - static bool loongarch_get_lsx(Object *obj, Error **errp) { LoongArchCPU *cpu = LOONGARCH_CPU(obj); @@ -693,17 +657,12 @@ static void loongarch_cpu_init(Object *obj) { #ifndef CONFIG_USER_ONLY LoongArchCPU *cpu = LOONGARCH_CPU(obj); - CPULoongArchState *env = &cpu->env; qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS); +#ifdef CONFIG_TCG timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL, &loongarch_constant_timer_cb, cpu); - memory_region_init_io(&env->system_iocsr, OBJECT(cpu), NULL, - env, "iocsr", UINT64_MAX); - address_space_init(&env->address_space_iocsr, &env->system_iocsr, "IOCSR"); - memory_region_init_io(&env->iocsr_mem, OBJECT(cpu), &loongarch_qemu_ops, - NULL, "iocsr_misc", 0x428); - memory_region_add_subregion(&env->system_iocsr, 0, &env->iocsr_mem); +#endif #endif } @@ -716,21 +675,14 @@ static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) g_autofree char *typename = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); - if (!oc) { - return NULL; - } } - if (object_class_dynamic_cast(oc, TYPE_LOONGARCH_CPU)) { - return oc; - } - return NULL; + return oc; } void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; + CPULoongArchState *env = cpu_env(cs); int i; qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); @@ -762,6 +714,8 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY); qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV); qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA); + qemu_fprintf(f, "TCFG=%016" PRIx64 "\n", env->CSR_TCFG); + qemu_fprintf(f, "TVAL=%016" PRIx64 "\n", env->CSR_TVAL); /* fpr */ if (flags & CPU_DUMP_FPU) { @@ -777,7 +731,7 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) #ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" -static struct TCGCPUOps loongarch_tcg_ops = { +static const TCGCPUOps loongarch_tcg_ops = { .initialize = loongarch_translate_init, .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, .restore_state_to_opc = loongarch_restore_state_to_opc, @@ -820,6 +774,7 @@ static void loongarch_cpu_class_init(ObjectClass *c, void *data) cc->class_by_name = loongarch_cpu_class_by_name; cc->has_work = loongarch_cpu_has_work; + cc->mmu_index = loongarch_cpu_mmu_index; cc->dump_state = loongarch_cpu_dump_state; cc->set_pc = loongarch_cpu_set_pc; cc->get_pc = loongarch_cpu_get_pc; @@ -847,7 +802,6 @@ static void loongarch32_cpu_class_init(ObjectClass *c, void *data) { CPUClass *cc = CPU_CLASS(c); - cc->gdb_num_core_regs = 35; cc->gdb_core_xml_file = "loongarch-base32.xml"; cc->gdb_arch_name = loongarch32_gdb_arch_name; } @@ -861,7 +815,6 @@ static void loongarch64_cpu_class_init(ObjectClass *c, void *data) { CPUClass *cc = CPU_CLASS(c); - cc->gdb_num_core_regs = 35; cc->gdb_core_xml_file = "loongarch-base64.xml"; cc->gdb_arch_name = loongarch64_gdb_arch_name; } diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h index 00d1fba597f..ec37579fd6c 100644 --- a/target/loongarch/cpu.h +++ b/target/loongarch/cpu.h @@ -319,6 +319,7 @@ typedef struct CPUArchState { uint64_t CSR_PWCH; uint64_t CSR_STLBPS; uint64_t CSR_RVACFG; + uint64_t CSR_CPUID; uint64_t CSR_PRCFG1; uint64_t CSR_PRCFG2; uint64_t CSR_PRCFG3; @@ -350,16 +351,14 @@ typedef struct CPUArchState { uint64_t CSR_DBG; uint64_t CSR_DERA; uint64_t CSR_DSAVE; - uint64_t CSR_CPUID; #ifndef CONFIG_USER_ONLY LoongArchTLB tlb[LOONGARCH_TLB_MAX]; - AddressSpace address_space_iocsr; - MemoryRegion system_iocsr; - MemoryRegion iocsr_mem; + AddressSpace *address_space_iocsr; bool load_elf; uint64_t elf_address; + uint32_t mp_state; /* Store ipistate to access from this struct */ DeviceState *ipistate; #endif @@ -380,6 +379,8 @@ struct ArchCPU { /* 'compatible' string for this CPU for Linux device trees */ const char *dtb_compatible; + /* used by KVM_REG_LOONGARCH_COUNTER ioctl to access guest time counters */ + uint64_t kvm_state_counter; }; /** @@ -403,21 +404,9 @@ struct LoongArchCPUClass { */ #define MMU_PLV_KERNEL 0 #define MMU_PLV_USER 3 -#define MMU_IDX_KERNEL MMU_PLV_KERNEL -#define MMU_IDX_USER MMU_PLV_USER -#define MMU_IDX_DA 4 - -static inline int cpu_mmu_index(CPULoongArchState *env, bool ifetch) -{ -#ifdef CONFIG_USER_ONLY - return MMU_IDX_USER; -#else - if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) { - return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV); - } - return MMU_IDX_DA; -#endif -} +#define MMU_KERNEL_IDX MMU_PLV_KERNEL +#define MMU_USER_IDX MMU_PLV_USER +#define MMU_DA_IDX 4 static inline bool is_la64(CPULoongArchState *env) { @@ -466,10 +455,6 @@ static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, vaddr *pc, *flags |= is_va32(env) * HW_FLAGS_VA32; } -void loongarch_cpu_list(void); - -#define cpu_list loongarch_cpu_list - #include "exec/cpu-all.h" #define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c new file mode 100644 index 00000000000..960eec95674 --- /dev/null +++ b/target/loongarch/cpu_helper.c @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch CPU helpers for qemu + * + * Copyright (c) 2024 Loongson Technology Corporation Limited + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" +#include "cpu-csr.h" + +static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + int access_type, int index, int mmu_idx) +{ + LoongArchTLB *tlb = &env->tlb[index]; + uint64_t plv = mmu_idx; + uint64_t tlb_entry, tlb_ppn; + uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; + + if (index >= LOONGARCH_STLB) { + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + } else { + tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + } + n = (address >> tlb_ps) & 0x1;/* Odd or even */ + + tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; + tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); + tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); + tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); + if (is_la64(env)) { + tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); + tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); + tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); + tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); + } else { + tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); + tlb_nx = 0; + tlb_nr = 0; + tlb_rplv = 0; + } + + /* Remove sw bit between bit12 -- bit PS*/ + tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1)); + + /* Check access rights */ + if (!tlb_v) { + return TLBRET_INVALID; + } + + if (access_type == MMU_INST_FETCH && tlb_nx) { + return TLBRET_XI; + } + + if (access_type == MMU_DATA_LOAD && tlb_nr) { + return TLBRET_RI; + } + + if (((tlb_rplv == 0) && (plv > tlb_plv)) || + ((tlb_rplv == 1) && (plv != tlb_plv))) { + return TLBRET_PE; + } + + if ((access_type == MMU_DATA_STORE) && !tlb_d) { + return TLBRET_DIRTY; + } + + *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | + (address & MAKE_64BIT_MASK(0, tlb_ps)); + *prot = PAGE_READ; + if (tlb_d) { + *prot |= PAGE_WRITE; + } + if (!tlb_nx) { + *prot |= PAGE_EXEC; + } + return TLBRET_MATCH; +} + +/* + * One tlb entry holds an adjacent odd/even pair, the vpn is the + * content of the virtual page number divided by 2. So the + * compare vpn is bit[47:15] for 16KiB page. while the vppn + * field in tlb entry contains bit[47:13], so need adjust. + * virt_vpn = vaddr[47:13] + */ +bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, + int *index) +{ + LoongArchTLB *tlb; + uint16_t csr_asid, tlb_asid, stlb_idx; + uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; + int i, compare_shift; + uint64_t vpn, tlb_vppn; + + csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); + stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); + stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ + compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + + /* Search STLB */ + for (i = 0; i < 8; ++i) { + tlb = &env->tlb[i * 256 + stlb_idx]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (tlb_e) { + tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + + if ((tlb_g == 1 || tlb_asid == csr_asid) && + (vpn == (tlb_vppn >> compare_shift))) { + *index = i * 256 + stlb_idx; + return true; + } + } + } + + /* Search MTLB */ + for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { + tlb = &env->tlb[i]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (tlb_e) { + tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); + if ((tlb_g == 1 || tlb_asid == csr_asid) && + (vpn == (tlb_vppn >> compare_shift))) { + *index = i; + return true; + } + } + } + return false; +} + +static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + int index, match; + + match = loongarch_tlb_search(env, address, &index); + if (match) { + return loongarch_map_tlb_entry(env, physical, prot, + address, access_type, index, mmu_idx); + } + + return TLBRET_NOMATCH; +} + +static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, + target_ulong dmw) +{ + if (is_la64(env)) { + return va & TARGET_VIRT_MASK; + } else { + uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG); + return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \ + (pseg << R_CSR_DMW_32_VSEG_SHIFT); + } +} + +int get_physical_address(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + int user_mode = mmu_idx == MMU_USER_IDX; + int kernel_mode = mmu_idx == MMU_KERNEL_IDX; + uint32_t plv, base_c, base_v; + int64_t addr_high; + uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); + uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); + + /* Check PG and DA */ + if (da & !pg) { + *physical = address & TARGET_PHYS_MASK; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + + plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); + if (is_la64(env)) { + base_v = address >> R_CSR_DMW_64_VSEG_SHIFT; + } else { + base_v = address >> R_CSR_DMW_32_VSEG_SHIFT; + } + /* Check direct map window */ + for (int i = 0; i < 4; i++) { + if (is_la64(env)) { + base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG); + } else { + base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG); + } + if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { + *physical = dmw_va2pa(env, address, env->CSR_DMW[i]); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + } + + /* Check valid extension */ + addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); + if (!(addr_high == 0 || addr_high == -1)) { + return TLBRET_BADADDR; + } + + /* Mapped address */ + return loongarch_map_address(env, physical, prot, address, + access_type, mmu_idx); +} + +hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + CPULoongArchState *env = cpu_env(cs); + hwaddr phys_addr; + int prot; + + if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, + cpu_mmu_index(cs, false)) != 0) { + return -1; + } + return phys_addr; +} diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c index 2040f3e44db..63989a6282d 100644 --- a/target/loongarch/disas.c +++ b/target/loongarch/disas.c @@ -120,10 +120,15 @@ static const char *get_csr_name(unsigned num) csr_names[num] : "Undefined CSR"; } -#define output(C, INSN, FMT, ...) \ -{ \ - (C)->info->fprintf_func((C)->info->stream, "%08x %-9s\t" FMT, \ - (C)->insn, INSN, ##__VA_ARGS__); \ +#define output(C, INSN, FMT, ...) \ + { \ + if ((C)->info->show_opcodes) { \ + (C)->info->fprintf_func((C)->info->stream, "%08x %-9s\t" FMT,\ + (C)->insn, INSN, ##__VA_ARGS__); \ + } else { \ + (C)->info->fprintf_func((C)->info->stream, "%-9s\t" FMT, \ + INSN, ##__VA_ARGS__); \ + } \ } #include "decode-insns.c.inc" diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c index 5fc2f19e965..a0e1439bd02 100644 --- a/target/loongarch/gdbstub.c +++ b/target/loongarch/gdbstub.c @@ -33,8 +33,7 @@ void write_fcc(CPULoongArchState *env, uint64_t val) int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; + CPULoongArchState *env = cpu_env(cs); uint64_t val; if (0 <= n && n < 32) { @@ -60,8 +59,7 @@ int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; + CPULoongArchState *env = cpu_env(cs); target_ulong tmp; int read_length; int length = 0; @@ -84,9 +82,11 @@ int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return length; } -static int loongarch_gdb_get_fpu(CPULoongArchState *env, - GByteArray *mem_buf, int n) +static int loongarch_gdb_get_fpu(CPUState *cs, GByteArray *mem_buf, int n) { + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + if (0 <= n && n < 32) { return gdb_get_reg64(mem_buf, env->fpr[n].vreg.D(0)); } else if (32 <= n && n < 40) { @@ -97,9 +97,10 @@ static int loongarch_gdb_get_fpu(CPULoongArchState *env, return 0; } -static int loongarch_gdb_set_fpu(CPULoongArchState *env, - uint8_t *mem_buf, int n) +static int loongarch_gdb_set_fpu(CPUState *cs, uint8_t *mem_buf, int n) { + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; int length = 0; if (0 <= n && n < 32) { @@ -118,5 +119,5 @@ static int loongarch_gdb_set_fpu(CPULoongArchState *env, void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs) { gdb_register_coprocessor(cs, loongarch_gdb_get_fpu, loongarch_gdb_set_fpu, - 41, "loongarch-fpu.xml", 0); + gdb_find_static_feature("loongarch-fpu.xml"), 0); } diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h index c492863cc5d..944153b180e 100644 --- a/target/loongarch/internals.h +++ b/target/loongarch/internals.h @@ -16,11 +16,6 @@ #define TARGET_PHYS_MASK MAKE_64BIT_MASK(0, TARGET_PHYS_ADDR_SPACE_BITS) #define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS) -/* Global bit used for lddir/ldpte */ -#define LOONGARCH_PAGE_HUGE_SHIFT 6 -/* Global bit for huge page */ -#define LOONGARCH_HGLOBAL_SHIFT 12 - void loongarch_translate_init(void); void loongarch_cpu_dump_state(CPUState *cpu, FILE *f, int flags); @@ -31,10 +26,23 @@ void G_NORETURN do_raise_exception(CPULoongArchState *env, const char *loongarch_exception_name(int32_t exception); +#ifdef CONFIG_TCG int ieee_ex_to_loongarch(int xcpt); void restore_fp_status(CPULoongArchState *env); +#endif #ifndef CONFIG_USER_ONLY +enum { + TLBRET_MATCH = 0, + TLBRET_BADADDR = 1, + TLBRET_NOMATCH = 2, + TLBRET_INVALID = 3, + TLBRET_DIRTY = 4, + TLBRET_RI = 5, + TLBRET_XI = 6, + TLBRET_PE = 7, +}; + extern const VMStateDescription vmstate_loongarch_cpu; void loongarch_cpu_set_irq(void *opaque, int irq, int level); @@ -44,12 +52,18 @@ uint64_t cpu_loongarch_get_constant_timer_counter(LoongArchCPU *cpu); uint64_t cpu_loongarch_get_constant_timer_ticks(LoongArchCPU *cpu); void cpu_loongarch_store_constant_timer_config(LoongArchCPU *cpu, uint64_t value); +bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, + int *index); +int get_physical_address(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx); +hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +#ifdef CONFIG_TCG bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); - -hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +#endif #endif /* !CONFIG_USER_ONLY */ uint64_t read_fcc(CPULoongArchState *env); diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c new file mode 100644 index 00000000000..d630cc39cb2 --- /dev/null +++ b/target/loongarch/kvm/kvm.c @@ -0,0 +1,784 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch KVM + * + * Copyright (c) 2023 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include +#include + +#include "qemu/timer.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "sysemu/kvm_int.h" +#include "hw/pci/pci.h" +#include "exec/memattrs.h" +#include "exec/address-spaces.h" +#include "hw/boards.h" +#include "hw/irq.h" +#include "qemu/log.h" +#include "hw/loader.h" +#include "sysemu/runstate.h" +#include "cpu-csr.h" +#include "kvm_loongarch.h" +#include "trace.h" + +static bool cap_has_mp_state; +const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + KVM_CAP_LAST_INFO +}; + +static int kvm_loongarch_get_regs_core(CPUState *cs) +{ + int ret = 0; + int i; + struct kvm_regs regs; + CPULoongArchState *env = cpu_env(cs); + + /* Get the current register set as KVM seems it */ + ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); + if (ret < 0) { + trace_kvm_failed_get_regs_core(strerror(errno)); + return ret; + } + /* gpr[0] value is always 0 */ + env->gpr[0] = 0; + for (i = 1; i < 32; i++) { + env->gpr[i] = regs.gpr[i]; + } + + env->pc = regs.pc; + return ret; +} + +static int kvm_loongarch_put_regs_core(CPUState *cs) +{ + int ret = 0; + int i; + struct kvm_regs regs; + CPULoongArchState *env = cpu_env(cs); + + /* Set the registers based on QEMU's view of things */ + for (i = 0; i < 32; i++) { + regs.gpr[i] = env->gpr[i]; + } + + regs.pc = env->pc; + ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); + if (ret < 0) { + trace_kvm_failed_put_regs_core(strerror(errno)); + } + + return ret; +} + +static int kvm_loongarch_get_csr(CPUState *cs) +{ + int ret = 0; + CPULoongArchState *env = cpu_env(cs); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD), + &env->CSR_CRMD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD), + &env->CSR_PRMD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN), + &env->CSR_EUEN); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC), + &env->CSR_MISC); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG), + &env->CSR_ECFG); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT), + &env->CSR_ESTAT); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA), + &env->CSR_ERA); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV), + &env->CSR_BADV); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI), + &env->CSR_BADI); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY), + &env->CSR_EENTRY); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX), + &env->CSR_TLBIDX); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI), + &env->CSR_TLBEHI); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0), + &env->CSR_TLBELO0); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1), + &env->CSR_TLBELO1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID), + &env->CSR_ASID); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL), + &env->CSR_PGDL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH), + &env->CSR_PGDH); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD), + &env->CSR_PGD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL), + &env->CSR_PWCL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH), + &env->CSR_PWCH); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS), + &env->CSR_STLBPS); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG), + &env->CSR_RVACFG); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID), + &env->CSR_CPUID); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1), + &env->CSR_PRCFG1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2), + &env->CSR_PRCFG2); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3), + &env->CSR_PRCFG3); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)), + &env->CSR_SAVE[0]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)), + &env->CSR_SAVE[1]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)), + &env->CSR_SAVE[2]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)), + &env->CSR_SAVE[3]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)), + &env->CSR_SAVE[4]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)), + &env->CSR_SAVE[5]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)), + &env->CSR_SAVE[6]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)), + &env->CSR_SAVE[7]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID), + &env->CSR_TID); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC), + &env->CSR_CNTC); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR), + &env->CSR_TICLR); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL), + &env->CSR_LLBCTL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1), + &env->CSR_IMPCTL1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2), + &env->CSR_IMPCTL2); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY), + &env->CSR_TLBRENTRY); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV), + &env->CSR_TLBRBADV); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA), + &env->CSR_TLBRERA); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE), + &env->CSR_TLBRSAVE); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0), + &env->CSR_TLBRELO0); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1), + &env->CSR_TLBRELO1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI), + &env->CSR_TLBREHI); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD), + &env->CSR_TLBRPRMD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)), + &env->CSR_DMW[0]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)), + &env->CSR_DMW[1]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)), + &env->CSR_DMW[2]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)), + &env->CSR_DMW[3]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL), + &env->CSR_TVAL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG), + &env->CSR_TCFG); + + return ret; +} + +static int kvm_loongarch_put_csr(CPUState *cs, int level) +{ + int ret = 0; + CPULoongArchState *env = cpu_env(cs); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD), + &env->CSR_CRMD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD), + &env->CSR_PRMD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN), + &env->CSR_EUEN); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC), + &env->CSR_MISC); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG), + &env->CSR_ECFG); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT), + &env->CSR_ESTAT); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA), + &env->CSR_ERA); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV), + &env->CSR_BADV); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI), + &env->CSR_BADI); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY), + &env->CSR_EENTRY); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX), + &env->CSR_TLBIDX); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI), + &env->CSR_TLBEHI); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0), + &env->CSR_TLBELO0); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1), + &env->CSR_TLBELO1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID), + &env->CSR_ASID); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL), + &env->CSR_PGDL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH), + &env->CSR_PGDH); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD), + &env->CSR_PGD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL), + &env->CSR_PWCL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH), + &env->CSR_PWCH); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS), + &env->CSR_STLBPS); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG), + &env->CSR_RVACFG); + + /* CPUID is constant after poweron, it should be set only once */ + if (level >= KVM_PUT_FULL_STATE) { + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID), + &env->CSR_CPUID); + } + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1), + &env->CSR_PRCFG1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2), + &env->CSR_PRCFG2); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3), + &env->CSR_PRCFG3); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)), + &env->CSR_SAVE[0]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)), + &env->CSR_SAVE[1]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)), + &env->CSR_SAVE[2]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)), + &env->CSR_SAVE[3]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)), + &env->CSR_SAVE[4]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)), + &env->CSR_SAVE[5]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)), + &env->CSR_SAVE[6]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)), + &env->CSR_SAVE[7]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID), + &env->CSR_TID); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC), + &env->CSR_CNTC); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR), + &env->CSR_TICLR); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL), + &env->CSR_LLBCTL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1), + &env->CSR_IMPCTL1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2), + &env->CSR_IMPCTL2); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY), + &env->CSR_TLBRENTRY); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV), + &env->CSR_TLBRBADV); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA), + &env->CSR_TLBRERA); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE), + &env->CSR_TLBRSAVE); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0), + &env->CSR_TLBRELO0); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1), + &env->CSR_TLBRELO1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI), + &env->CSR_TLBREHI); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD), + &env->CSR_TLBRPRMD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)), + &env->CSR_DMW[0]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)), + &env->CSR_DMW[1]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)), + &env->CSR_DMW[2]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)), + &env->CSR_DMW[3]); + /* + * timer cfg must be put at last since it is used to enable + * guest timer + */ + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL), + &env->CSR_TVAL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG), + &env->CSR_TCFG); + return ret; +} + +static int kvm_loongarch_get_regs_fp(CPUState *cs) +{ + int ret, i; + struct kvm_fpu fpu; + CPULoongArchState *env = cpu_env(cs); + + ret = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); + if (ret < 0) { + trace_kvm_failed_get_fpu(strerror(errno)); + return ret; + } + + env->fcsr0 = fpu.fcsr; + for (i = 0; i < 32; i++) { + env->fpr[i].vreg.UD[0] = fpu.fpr[i].val64[0]; + } + for (i = 0; i < 8; i++) { + env->cf[i] = fpu.fcc & 0xFF; + fpu.fcc = fpu.fcc >> 8; + } + + return ret; +} + +static int kvm_loongarch_put_regs_fp(CPUState *cs) +{ + int ret, i; + struct kvm_fpu fpu; + CPULoongArchState *env = cpu_env(cs); + + fpu.fcsr = env->fcsr0; + fpu.fcc = 0; + for (i = 0; i < 32; i++) { + fpu.fpr[i].val64[0] = env->fpr[i].vreg.UD[0]; + } + + for (i = 0; i < 8; i++) { + fpu.fcc |= env->cf[i] << (8 * i); + } + + ret = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); + if (ret < 0) { + trace_kvm_failed_put_fpu(strerror(errno)); + } + + return ret; +} + +void kvm_arch_reset_vcpu(CPULoongArchState *env) +{ + env->mp_state = KVM_MP_STATE_RUNNABLE; +} + +static int kvm_loongarch_get_mpstate(CPUState *cs) +{ + int ret = 0; + struct kvm_mp_state mp_state; + CPULoongArchState *env = cpu_env(cs); + + if (cap_has_mp_state) { + ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state); + if (ret) { + trace_kvm_failed_get_mpstate(strerror(errno)); + return ret; + } + env->mp_state = mp_state.mp_state; + } + + return ret; +} + +static int kvm_loongarch_put_mpstate(CPUState *cs) +{ + int ret = 0; + struct kvm_mp_state mp_state = { + .mp_state = cpu_env(cs)->mp_state + }; + + if (cap_has_mp_state) { + ret = kvm_vcpu_ioctl(cs, KVM_SET_MP_STATE, &mp_state); + if (ret) { + trace_kvm_failed_put_mpstate(strerror(errno)); + } + } + + return ret; +} + +static int kvm_loongarch_get_cpucfg(CPUState *cs) +{ + int i, ret = 0; + uint64_t val; + CPULoongArchState *env = cpu_env(cs); + + for (i = 0; i < 21; i++) { + ret = kvm_get_one_reg(cs, KVM_IOC_CPUCFG(i), &val); + if (ret < 0) { + trace_kvm_failed_get_cpucfg(strerror(errno)); + } + env->cpucfg[i] = (uint32_t)val; + } + return ret; +} + +static int kvm_check_cpucfg2(CPUState *cs) +{ + int ret; + uint64_t val; + struct kvm_device_attr attr = { + .group = KVM_LOONGARCH_VCPU_CPUCFG, + .attr = 2, + .addr = (uint64_t)&val, + }; + CPULoongArchState *env = cpu_env(cs); + + ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr); + + if (!ret) { + kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr); + env->cpucfg[2] &= val; + + if (FIELD_EX32(env->cpucfg[2], CPUCFG2, FP)) { + /* The FP minimal version is 1. */ + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, FP_VER, 1); + } + + if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LLFTP)) { + /* The LLFTP minimal version is 1. */ + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LLFTP_VER, 1); + } + } + + return ret; +} + +static int kvm_loongarch_put_cpucfg(CPUState *cs) +{ + int i, ret = 0; + CPULoongArchState *env = cpu_env(cs); + uint64_t val; + + for (i = 0; i < 21; i++) { + if (i == 2) { + ret = kvm_check_cpucfg2(cs); + if (ret) { + return ret; + } + } + val = env->cpucfg[i]; + ret = kvm_set_one_reg(cs, KVM_IOC_CPUCFG(i), &val); + if (ret < 0) { + trace_kvm_failed_put_cpucfg(strerror(errno)); + } + } + return ret; +} + +int kvm_arch_get_registers(CPUState *cs) +{ + int ret; + + ret = kvm_loongarch_get_regs_core(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_csr(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_regs_fp(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_mpstate(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_cpucfg(cs); + return ret; +} + +int kvm_arch_put_registers(CPUState *cs, int level) +{ + int ret; + + ret = kvm_loongarch_put_regs_core(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_csr(cs, level); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_regs_fp(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_mpstate(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_cpucfg(cs); + return ret; +} + +static void kvm_loongarch_vm_stage_change(void *opaque, bool running, + RunState state) +{ + int ret; + CPUState *cs = opaque; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + + if (running) { + ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_COUNTER, + &cpu->kvm_state_counter); + if (ret < 0) { + trace_kvm_failed_put_counter(strerror(errno)); + } + } else { + ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_COUNTER, + &cpu->kvm_state_counter); + if (ret < 0) { + trace_kvm_failed_get_counter(strerror(errno)); + } + } +} + +int kvm_arch_init_vcpu(CPUState *cs) +{ + qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs); + return 0; +} + +int kvm_arch_destroy_vcpu(CPUState *cs) +{ + return 0; +} + +unsigned long kvm_arch_vcpu_id(CPUState *cs) +{ + return cs->cpu_index; +} + +int kvm_arch_release_virq_post(int virq) +{ + return 0; +} + +int kvm_arch_msi_data_to_gsi(uint32_t data) +{ + abort(); +} + +int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data, PCIDevice *dev) +{ + return 0; +} + +int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, + int vector, PCIDevice *dev) +{ + return 0; +} + +void kvm_arch_init_irq_routing(KVMState *s) +{ +} + +int kvm_arch_get_default_type(MachineState *ms) +{ + return 0; +} + +int kvm_arch_init(MachineState *ms, KVMState *s) +{ + cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE); + return 0; +} + +int kvm_arch_irqchip_create(KVMState *s) +{ + return 0; +} + +void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) +{ +} + +MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) +{ + return MEMTXATTRS_UNSPECIFIED; +} + +int kvm_arch_process_async_events(CPUState *cs) +{ + return cs->halted; +} + +bool kvm_arch_stop_on_emulation_error(CPUState *cs) +{ + return true; +} + +bool kvm_arch_cpu_check_are_resettable(void) +{ + return true; +} + +int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) +{ + int ret = 0; + CPULoongArchState *env = cpu_env(cs); + MemTxAttrs attrs = {}; + + attrs.requester_id = env_cpu(env)->cpu_index; + + trace_kvm_arch_handle_exit(run->exit_reason); + switch (run->exit_reason) { + case KVM_EXIT_LOONGARCH_IOCSR: + address_space_rw(env->address_space_iocsr, + run->iocsr_io.phys_addr, + attrs, + run->iocsr_io.data, + run->iocsr_io.len, + run->iocsr_io.is_write); + break; + default: + ret = -1; + warn_report("KVM: unknown exit reason %d", run->exit_reason); + break; + } + return ret; +} + +int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level) +{ + struct kvm_interrupt intr; + CPUState *cs = CPU(cpu); + + if (level) { + intr.irq = irq; + } else { + intr.irq = -irq; + } + + trace_kvm_set_intr(irq, level); + return kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); +} + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/loongarch/kvm/kvm_loongarch.h b/target/loongarch/kvm/kvm_loongarch.h new file mode 100644 index 00000000000..d945b6bb822 --- /dev/null +++ b/target/loongarch/kvm/kvm_loongarch.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch kvm interface + * + * Copyright (c) 2023 Loongson Technology Corporation Limited + */ + +#include "cpu.h" + +#ifndef QEMU_KVM_LOONGARCH_H +#define QEMU_KVM_LOONGARCH_H + +int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level); +void kvm_arch_reset_vcpu(CPULoongArchState *env); + +#endif diff --git a/target/loongarch/kvm/meson.build b/target/loongarch/kvm/meson.build new file mode 100644 index 00000000000..2266de6ca97 --- /dev/null +++ b/target/loongarch/kvm/meson.build @@ -0,0 +1 @@ +loongarch_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c')) diff --git a/target/loongarch/loongarch-qmp-cmds.c b/target/loongarch/loongarch-qmp-cmds.c index 645672ff593..8721a5eb136 100644 --- a/target/loongarch/loongarch-qmp-cmds.c +++ b/target/loongarch/loongarch-qmp-cmds.c @@ -10,7 +10,6 @@ #include "qapi/error.h" #include "qapi/qapi-commands-machine-target.h" #include "cpu.h" -#include "qapi/qmp/qerror.h" #include "qapi/qmp/qdict.h" #include "qapi/qobject-input-visitor.h" #include "qom/qom-qobject.h" @@ -22,8 +21,7 @@ static void loongarch_cpu_add_definition(gpointer data, gpointer user_data) CpuDefinitionInfo *info = g_new0(CpuDefinitionInfo, 1); const char *typename = object_class_get_name(oc); - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_LOONGARCH_CPU)); + info->name = cpu_model_from_type(typename); info->q_typename = g_strdup(typename); QAPI_LIST_PREPEND(*cpu_list, info); @@ -49,6 +47,8 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, CpuModelInfo *model, Error **errp) { + Visitor *visitor; + bool ok; CpuModelExpansionInfo *expansion_info; QDict *qdict_out; ObjectClass *oc; @@ -61,6 +61,21 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, return NULL; } + if (model->props) { + visitor = qobject_input_visitor_new(model->props); + if (!visit_start_struct(visitor, "model.props", NULL, 0, errp)) { + visit_free(visitor); + return NULL; + } + + ok = visit_check_struct(visitor, errp); + visit_end_struct(visitor, NULL); + visit_free(visitor); + if (!ok) { + return NULL; + } + } + oc = cpu_class_by_name(TYPE_LOONGARCH_CPU, model->name); if (!oc) { error_setg(errp, "The CPU type '%s' is not a recognized LoongArch CPU type", diff --git a/target/loongarch/machine.c b/target/loongarch/machine.c index 1c4e01d0769..c7029fb9b47 100644 --- a/target/loongarch/machine.c +++ b/target/loongarch/machine.c @@ -14,7 +14,7 @@ static const VMStateDescription vmstate_fpu_reg = { .name = "fpu_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(UD(0), VReg), VMSTATE_END_OF_LIST() } @@ -36,7 +36,7 @@ static const VMStateDescription vmstate_fpu = { .version_id = 1, .minimum_version_id = 1, .needed = fpu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FPU_REGS(env.fpr, LoongArchCPU, 0), VMSTATE_UINT32(env.fcsr0, LoongArchCPU), VMSTATE_BOOL_ARRAY(env.cf, LoongArchCPU, 8), @@ -48,7 +48,7 @@ static const VMStateDescription vmstate_lsxh_reg = { .name = "lsxh_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(UD(1), VReg), VMSTATE_END_OF_LIST() } @@ -70,7 +70,7 @@ static const VMStateDescription vmstate_lsx = { .version_id = 1, .minimum_version_id = 1, .needed = lsx_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_LSXH_REGS(env.fpr, LoongArchCPU, 0), VMSTATE_END_OF_LIST() }, @@ -80,7 +80,7 @@ static const VMStateDescription vmstate_lasxh_reg = { .name = "lasxh_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(UD(2), VReg), VMSTATE_UINT64(UD(3), VReg), VMSTATE_END_OF_LIST() @@ -103,7 +103,7 @@ static const VMStateDescription vmstate_lasx = { .version_id = 1, .minimum_version_id = 1, .needed = lasx_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_LASXH_REGS(env.fpr, LoongArchCPU, 0), VMSTATE_END_OF_LIST() }, @@ -114,7 +114,7 @@ const VMStateDescription vmstate_tlb = { .name = "cpu/tlb", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tlb_misc, LoongArchTLB), VMSTATE_UINT64(tlb_entry0, LoongArchTLB), VMSTATE_UINT64(tlb_entry1, LoongArchTLB), @@ -127,7 +127,7 @@ const VMStateDescription vmstate_loongarch_cpu = { .name = "cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32), VMSTATE_UINTTL(env.pc, LoongArchCPU), @@ -193,7 +193,7 @@ const VMStateDescription vmstate_loongarch_cpu = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fpu, &vmstate_lsx, &vmstate_lasx, diff --git a/target/loongarch/meson.build b/target/loongarch/meson.build index 18e8191e2b6..e002e9aaf65 100644 --- a/target/loongarch/meson.build +++ b/target/loongarch/meson.build @@ -3,31 +3,20 @@ gen = decodetree.process('insns.decode') loongarch_ss = ss.source_set() loongarch_ss.add(files( 'cpu.c', -)) -loongarch_tcg_ss = ss.source_set() -loongarch_tcg_ss.add(gen) -loongarch_tcg_ss.add(files( - 'fpu_helper.c', - 'op_helper.c', - 'translate.c', 'gdbstub.c', - 'vec_helper.c', )) -loongarch_tcg_ss.add(zlib) loongarch_system_ss = ss.source_set() loongarch_system_ss.add(files( + 'cpu_helper.c', 'loongarch-qmp-cmds.c', 'machine.c', - 'tlb_helper.c', - 'constant_timer.c', - 'csr_helper.c', - 'iocsr_helper.c', )) common_ss.add(when: 'CONFIG_LOONGARCH_DIS', if_true: [files('disas.c'), gen]) -loongarch_ss.add_all(when: 'CONFIG_TCG', if_true: [loongarch_tcg_ss]) +subdir('tcg') target_arch += {'loongarch': loongarch_ss} target_system_arch += {'loongarch': loongarch_system_ss} +subdir('kvm') diff --git a/target/loongarch/constant_timer.c b/target/loongarch/tcg/constant_timer.c similarity index 100% rename from target/loongarch/constant_timer.c rename to target/loongarch/tcg/constant_timer.c diff --git a/target/loongarch/csr_helper.c b/target/loongarch/tcg/csr_helper.c similarity index 96% rename from target/loongarch/csr_helper.c rename to target/loongarch/tcg/csr_helper.c index 55341551a5c..15f94caefab 100644 --- a/target/loongarch/csr_helper.c +++ b/target/loongarch/tcg/csr_helper.c @@ -89,9 +89,9 @@ target_ulong helper_csrwr_ticlr(CPULoongArchState *env, target_ulong val) int64_t old_v = 0; if (val & 0x1) { - qemu_mutex_lock_iothread(); + bql_lock(); loongarch_cpu_set_irq(cpu, IRQ_TIMER, 0); - qemu_mutex_unlock_iothread(); + bql_unlock(); } return old_v; } diff --git a/target/loongarch/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c similarity index 100% rename from target/loongarch/fpu_helper.c rename to target/loongarch/tcg/fpu_helper.c diff --git a/target/loongarch/insn_trans/trans_arith.c.inc b/target/loongarch/tcg/insn_trans/trans_arith.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_arith.c.inc rename to target/loongarch/tcg/insn_trans/trans_arith.c.inc diff --git a/target/loongarch/insn_trans/trans_atomic.c.inc b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc similarity index 95% rename from target/loongarch/insn_trans/trans_atomic.c.inc rename to target/loongarch/tcg/insn_trans/trans_atomic.c.inc index 80c2e286fd8..974bc2a70fe 100644 --- a/target/loongarch/insn_trans/trans_atomic.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc @@ -5,14 +5,14 @@ static bool gen_ll(DisasContext *ctx, arg_rr_i *a, MemOp mop) { - TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv t1 = tcg_temp_new(); TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); TCGv t0 = make_address_i(ctx, src1, a->imm); - tcg_gen_qemu_ld_i64(dest, t0, ctx->mem_idx, mop); + tcg_gen_qemu_ld_i64(t1, t0, ctx->mem_idx, mop); tcg_gen_st_tl(t0, tcg_env, offsetof(CPULoongArchState, lladdr)); - tcg_gen_st_tl(dest, tcg_env, offsetof(CPULoongArchState, llval)); - gen_set_gpr(a->rd, dest, EXT_NONE); + tcg_gen_st_tl(t1, tcg_env, offsetof(CPULoongArchState, llval)); + gen_set_gpr(a->rd, t1, EXT_NONE); return true; } diff --git a/target/loongarch/insn_trans/trans_bit.c.inc b/target/loongarch/tcg/insn_trans/trans_bit.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_bit.c.inc rename to target/loongarch/tcg/insn_trans/trans_bit.c.inc diff --git a/target/loongarch/insn_trans/trans_branch.c.inc b/target/loongarch/tcg/insn_trans/trans_branch.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_branch.c.inc rename to target/loongarch/tcg/insn_trans/trans_branch.c.inc diff --git a/target/loongarch/insn_trans/trans_extra.c.inc b/target/loongarch/tcg/insn_trans/trans_extra.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_extra.c.inc rename to target/loongarch/tcg/insn_trans/trans_extra.c.inc diff --git a/target/loongarch/insn_trans/trans_farith.c.inc b/target/loongarch/tcg/insn_trans/trans_farith.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_farith.c.inc rename to target/loongarch/tcg/insn_trans/trans_farith.c.inc diff --git a/target/loongarch/insn_trans/trans_fcmp.c.inc b/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fcmp.c.inc rename to target/loongarch/tcg/insn_trans/trans_fcmp.c.inc diff --git a/target/loongarch/insn_trans/trans_fcnv.c.inc b/target/loongarch/tcg/insn_trans/trans_fcnv.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fcnv.c.inc rename to target/loongarch/tcg/insn_trans/trans_fcnv.c.inc diff --git a/target/loongarch/insn_trans/trans_fmemory.c.inc b/target/loongarch/tcg/insn_trans/trans_fmemory.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fmemory.c.inc rename to target/loongarch/tcg/insn_trans/trans_fmemory.c.inc diff --git a/target/loongarch/insn_trans/trans_fmov.c.inc b/target/loongarch/tcg/insn_trans/trans_fmov.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fmov.c.inc rename to target/loongarch/tcg/insn_trans/trans_fmov.c.inc diff --git a/target/loongarch/insn_trans/trans_memory.c.inc b/target/loongarch/tcg/insn_trans/trans_memory.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_memory.c.inc rename to target/loongarch/tcg/insn_trans/trans_memory.c.inc diff --git a/target/loongarch/insn_trans/trans_privileged.c.inc b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc similarity index 99% rename from target/loongarch/insn_trans/trans_privileged.c.inc rename to target/loongarch/tcg/insn_trans/trans_privileged.c.inc index 01d457212b3..7e4ec93edb3 100644 --- a/target/loongarch/insn_trans/trans_privileged.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc @@ -323,7 +323,7 @@ TRANS(iocsrwr_d, IOCSR, gen_iocsrwr, gen_helper_iocsrwr_d) static void check_mmu_idx(DisasContext *ctx) { - if (ctx->mem_idx != MMU_IDX_DA) { + if (ctx->mem_idx != MMU_DA_IDX) { tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; } diff --git a/target/loongarch/insn_trans/trans_shift.c.inc b/target/loongarch/tcg/insn_trans/trans_shift.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_shift.c.inc rename to target/loongarch/tcg/insn_trans/trans_shift.c.inc diff --git a/target/loongarch/insn_trans/trans_vec.c.inc b/target/loongarch/tcg/insn_trans/trans_vec.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_vec.c.inc rename to target/loongarch/tcg/insn_trans/trans_vec.c.inc diff --git a/target/loongarch/iocsr_helper.c b/target/loongarch/tcg/iocsr_helper.c similarity index 76% rename from target/loongarch/iocsr_helper.c rename to target/loongarch/tcg/iocsr_helper.c index 6cd01d5f094..b6916f53d20 100644 --- a/target/loongarch/iocsr_helper.c +++ b/target/loongarch/tcg/iocsr_helper.c @@ -17,52 +17,52 @@ uint64_t helper_iocsrrd_b(CPULoongArchState *env, target_ulong r_addr) { - return address_space_ldub(&env->address_space_iocsr, r_addr, + return address_space_ldub(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } uint64_t helper_iocsrrd_h(CPULoongArchState *env, target_ulong r_addr) { - return address_space_lduw(&env->address_space_iocsr, r_addr, + return address_space_lduw(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } uint64_t helper_iocsrrd_w(CPULoongArchState *env, target_ulong r_addr) { - return address_space_ldl(&env->address_space_iocsr, r_addr, + return address_space_ldl(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } uint64_t helper_iocsrrd_d(CPULoongArchState *env, target_ulong r_addr) { - return address_space_ldq(&env->address_space_iocsr, r_addr, + return address_space_ldq(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_b(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stb(&env->address_space_iocsr, w_addr, + address_space_stb(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_h(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stw(&env->address_space_iocsr, w_addr, + address_space_stw(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_w(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stl(&env->address_space_iocsr, w_addr, + address_space_stl(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_d(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stq(&env->address_space_iocsr, w_addr, + address_space_stq(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } diff --git a/target/loongarch/tcg/meson.build b/target/loongarch/tcg/meson.build new file mode 100644 index 00000000000..bdf34f9673b --- /dev/null +++ b/target/loongarch/tcg/meson.build @@ -0,0 +1,19 @@ +if 'CONFIG_TCG' not in config_all_accel + subdir_done() +endif + +loongarch_ss.add([zlib, gen]) + +loongarch_ss.add(files( + 'fpu_helper.c', + 'op_helper.c', + 'translate.c', + 'vec_helper.c', +)) + +loongarch_system_ss.add(files( + 'constant_timer.c', + 'csr_helper.c', + 'iocsr_helper.c', + 'tlb_helper.c', +)) diff --git a/target/loongarch/op_helper.c b/target/loongarch/tcg/op_helper.c similarity index 100% rename from target/loongarch/op_helper.c rename to target/loongarch/tcg/op_helper.c diff --git a/target/loongarch/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c similarity index 67% rename from target/loongarch/tlb_helper.c rename to target/loongarch/tcg/tlb_helper.c index 449043c68be..57f53086324 100644 --- a/target/loongarch/tlb_helper.c +++ b/target/loongarch/tcg/tlb_helper.c @@ -17,234 +17,32 @@ #include "exec/log.h" #include "cpu-csr.h" -enum { - TLBRET_MATCH = 0, - TLBRET_BADADDR = 1, - TLBRET_NOMATCH = 2, - TLBRET_INVALID = 3, - TLBRET_DIRTY = 4, - TLBRET_RI = 5, - TLBRET_XI = 6, - TLBRET_PE = 7, -}; - -static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - int access_type, int index, int mmu_idx) +static void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, + uint64_t *dir_width, target_ulong level) { - LoongArchTLB *tlb = &env->tlb[index]; - uint64_t plv = mmu_idx; - uint64_t tlb_entry, tlb_ppn; - uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; - - if (index >= LOONGARCH_STLB) { - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - } else { - tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - } - n = (address >> tlb_ps) & 0x1;/* Odd or even */ - - tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; - tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); - tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); - tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); - if (is_la64(env)) { - tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); - tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); - tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); - tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); - } else { - tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); - tlb_nx = 0; - tlb_nr = 0; - tlb_rplv = 0; - } - - /* Remove sw bit between bit12 -- bit PS*/ - tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1)); - - /* Check access rights */ - if (!tlb_v) { - return TLBRET_INVALID; - } - - if (access_type == MMU_INST_FETCH && tlb_nx) { - return TLBRET_XI; - } - - if (access_type == MMU_DATA_LOAD && tlb_nr) { - return TLBRET_RI; - } - - if (((tlb_rplv == 0) && (plv > tlb_plv)) || - ((tlb_rplv == 1) && (plv != tlb_plv))) { - return TLBRET_PE; - } - - if ((access_type == MMU_DATA_STORE) && !tlb_d) { - return TLBRET_DIRTY; - } - - *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | - (address & MAKE_64BIT_MASK(0, tlb_ps)); - *prot = PAGE_READ; - if (tlb_d) { - *prot |= PAGE_WRITE; - } - if (!tlb_nx) { - *prot |= PAGE_EXEC; - } - return TLBRET_MATCH; -} - -/* - * One tlb entry holds an adjacent odd/even pair, the vpn is the - * content of the virtual page number divided by 2. So the - * compare vpn is bit[47:15] for 16KiB page. while the vppn - * field in tlb entry contains bit[47:13], so need adjust. - * virt_vpn = vaddr[47:13] - */ -static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, - int *index) -{ - LoongArchTLB *tlb; - uint16_t csr_asid, tlb_asid, stlb_idx; - uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; - int i, compare_shift; - uint64_t vpn, tlb_vppn; - - csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); - stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); - stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ - compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; - - /* Search STLB */ - for (i = 0; i < 8; ++i) { - tlb = &env->tlb[i * 256 + stlb_idx]; - tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); - if (tlb_e) { - tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); - tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); - tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); - - if ((tlb_g == 1 || tlb_asid == csr_asid) && - (vpn == (tlb_vppn >> compare_shift))) { - *index = i * 256 + stlb_idx; - return true; - } - } - } - - /* Search MTLB */ - for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { - tlb = &env->tlb[i]; - tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); - if (tlb_e) { - tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); - tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); - compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; - vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); - if ((tlb_g == 1 || tlb_asid == csr_asid) && - (vpn == (tlb_vppn >> compare_shift))) { - *index = i; - return true; - } - } - } - return false; -} - -static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx) -{ - int index, match; - - match = loongarch_tlb_search(env, address, &index); - if (match) { - return loongarch_map_tlb_entry(env, physical, prot, - address, access_type, index, mmu_idx); - } - - return TLBRET_NOMATCH; -} - -static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, - target_ulong dmw) -{ - if (is_la64(env)) { - return va & TARGET_VIRT_MASK; - } else { - uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG); - return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \ - (pseg << R_CSR_DMW_32_VSEG_SHIFT); - } -} - -static int get_physical_address(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx) -{ - int user_mode = mmu_idx == MMU_IDX_USER; - int kernel_mode = mmu_idx == MMU_IDX_KERNEL; - uint32_t plv, base_c, base_v; - int64_t addr_high; - uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); - uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); - - /* Check PG and DA */ - if (da & !pg) { - *physical = address & TARGET_PHYS_MASK; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - return TLBRET_MATCH; - } - - plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); - if (is_la64(env)) { - base_v = address >> R_CSR_DMW_64_VSEG_SHIFT; - } else { - base_v = address >> R_CSR_DMW_32_VSEG_SHIFT; - } - /* Check direct map window */ - for (int i = 0; i < 4; i++) { - if (is_la64(env)) { - base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG); - } else { - base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG); - } - if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { - *physical = dmw_va2pa(env, address, env->CSR_DMW[i]); - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - return TLBRET_MATCH; - } - } - - /* Check valid extension */ - addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); - if (!(addr_high == 0 || addr_high == -1)) { - return TLBRET_BADADDR; - } - - /* Mapped address */ - return loongarch_map_address(env, physical, prot, address, - access_type, mmu_idx); -} - -hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; - hwaddr phys_addr; - int prot; - - if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, - cpu_mmu_index(env, false)) != 0) { - return -1; + switch (level) { + case 1: + *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); + *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); + break; + case 2: + *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); + *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); + break; + case 3: + *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); + *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); + break; + case 4: + *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); + *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); + break; + default: + /* level may be zero for ldpte */ + *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); + *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); + break; } - return phys_addr; } static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, @@ -320,7 +118,7 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index) uint8_t tlb_ps; LoongArchTLB *tlb = &env->tlb[index]; - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = cpu_mmu_index(env_cpu(env), false); uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); @@ -679,8 +477,7 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; + CPULoongArchState *env = cpu_env(cs); hwaddr physical; int prot; int ret; @@ -716,7 +513,25 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, target_ulong badvaddr, index, phys, ret; int shift; uint64_t dir_base, dir_width; - bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1; + + if (unlikely((level == 0) || (level > 4))) { + qemu_log_mask(LOG_GUEST_ERROR, + "Attepted LDDIR with level %"PRId64"\n", level); + return base; + } + + if (FIELD_EX64(base, TLBENTRY, HUGE)) { + if (unlikely(level == 4)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Attempted use of level 4 huge page\n"); + } + + if (FIELD_EX64(base, TLBENTRY, LEVEL)) { + return base; + } else { + return FIELD_DP64(base, TLBENTRY, LEVEL, level); + } + } badvaddr = env->CSR_TLBRBADV; base = base & TARGET_PHYS_MASK; @@ -725,30 +540,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); shift = (shift + 1) * 3; - if (huge) { - return base; - } - switch (level) { - case 1: - dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); - dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); - break; - case 2: - dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); - dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); - break; - case 3: - dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); - dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); - break; - case 4: - dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); - dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); - break; - default: - do_raise_exception(env, EXCCODE_INE, GETPC()); - return 0; - } + get_dir_base_width(env, &dir_base, &dir_width, level); index = (badvaddr >> dir_base) & ((1 << dir_width) - 1); phys = base | index << shift; ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; @@ -761,20 +553,42 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, CPUState *cs = env_cpu(env); target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv; int shift; - bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1; uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); + uint64_t dir_base, dir_width; + /* + * The parameter "base" has only two types, + * one is the page table base address, + * whose bit 6 should be 0, + * and the other is the huge page entry, + * whose bit 6 should be 1. + */ base = base & TARGET_PHYS_MASK; + if (FIELD_EX64(base, TLBENTRY, HUGE)) { + /* + * Gets the huge page level and Gets huge page size. + * Clears the huge page level information in the entry. + * Clears huge page bit. + * Move HGLOBAL bit to GLOBAL bit. + */ + get_dir_base_width(env, &dir_base, &dir_width, + FIELD_EX64(base, TLBENTRY, LEVEL)); + + base = FIELD_DP64(base, TLBENTRY, LEVEL, 0); + base = FIELD_DP64(base, TLBENTRY, HUGE, 0); + if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) { + base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0); + base = FIELD_DP64(base, TLBENTRY, G, 1); + } - if (huge) { - /* Huge Page. base is paddr */ - tmp0 = base ^ (1 << LOONGARCH_PAGE_HUGE_SHIFT); - /* Move Global bit */ - tmp0 = ((tmp0 & (1 << LOONGARCH_HGLOBAL_SHIFT)) >> - LOONGARCH_HGLOBAL_SHIFT) << R_TLBENTRY_G_SHIFT | - (tmp0 & (~(1 << LOONGARCH_HGLOBAL_SHIFT))); - ps = ptbase + ptwidth - 1; + ps = dir_base + dir_width - 1; + /* + * Huge pages are evenly split into parity pages + * when loaded into the tlb, + * so the tlb page size needs to be divided by 2. + */ + tmp0 = base; if (odd) { tmp0 += MAKE_64BIT_MASK(ps, 1); } diff --git a/target/loongarch/translate.c b/target/loongarch/tcg/translate.c similarity index 97% rename from target/loongarch/translate.c rename to target/loongarch/tcg/translate.c index 21f4db6fbd2..75677126555 100644 --- a/target/loongarch/translate.c +++ b/target/loongarch/tcg/translate.c @@ -125,7 +125,7 @@ static void loongarch_tr_init_disas_context(DisasContextBase *dcbase, if (ctx->base.tb->flags & HW_FLAGS_CRMD_PG) { ctx->mem_idx = ctx->plv; } else { - ctx->mem_idx = MMU_IDX_DA; + ctx->mem_idx = MMU_DA_IDX; } /* Bound the number of insns to execute to those left on the page. */ @@ -282,10 +282,9 @@ static uint64_t make_address_pc(DisasContext *ctx, uint64_t addr) static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { - CPULoongArchState *env = cpu_env(cs); DisasContext *ctx = container_of(dcbase, DisasContext, base); - ctx->opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next); + ctx->opcode = translator_ldl(cpu_env(cs), &ctx->base, ctx->base.pc_next); if (!decode(ctx, ctx->opcode)) { qemu_log_mask(LOG_UNIMP, "Error: unknown opcode. " @@ -343,7 +342,7 @@ static const TranslatorOps loongarch_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/loongarch/vec_helper.c b/target/loongarch/tcg/vec_helper.c similarity index 100% rename from target/loongarch/vec_helper.c rename to target/loongarch/tcg/vec_helper.c diff --git a/target/loongarch/trace-events b/target/loongarch/trace-events new file mode 100644 index 00000000000..dea11edc0f1 --- /dev/null +++ b/target/loongarch/trace-events @@ -0,0 +1,15 @@ +# See docs/devel/tracing.rst for syntax documentation. + +#kvm.c +kvm_failed_get_regs_core(const char *msg) "Failed to get core regs from KVM: %s" +kvm_failed_put_regs_core(const char *msg) "Failed to put core regs into KVM: %s" +kvm_failed_get_fpu(const char *msg) "Failed to get fpu from KVM: %s" +kvm_failed_put_fpu(const char *msg) "Failed to put fpu into KVM: %s" +kvm_failed_get_mpstate(const char *msg) "Failed to get mp_state from KVM: %s" +kvm_failed_put_mpstate(const char *msg) "Failed to put mp_state into KVM: %s" +kvm_failed_get_counter(const char *msg) "Failed to get counter from KVM: %s" +kvm_failed_put_counter(const char *msg) "Failed to put counter into KVM: %s" +kvm_failed_get_cpucfg(const char *msg) "Failed to get cpucfg from KVM: %s" +kvm_failed_put_cpucfg(const char *msg) "Failed to put cpucfg into KVM: %s" +kvm_arch_handle_exit(int num) "kvm arch handle exit, the reason number: %d" +kvm_set_intr(int irq, int level) "kvm set interrupt, irq num: %d, level: %d" diff --git a/target/loongarch/trace.h b/target/loongarch/trace.h new file mode 100644 index 00000000000..c2ecb78f084 --- /dev/null +++ b/target/loongarch/trace.h @@ -0,0 +1 @@ +#include "trace/trace-target_loongarch.h" diff --git a/target/m68k/Kconfig b/target/m68k/Kconfig index 23debad519a..9eae71486ff 100644 --- a/target/m68k/Kconfig +++ b/target/m68k/Kconfig @@ -1,2 +1,3 @@ config M68K bool + select SEMIHOSTING diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index 11c7e0a7902..df49ff1880c 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -56,6 +56,11 @@ static bool m68k_cpu_has_work(CPUState *cs) return cs->interrupt_request & CPU_INTERRUPT_HARD; } +static int m68k_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return cpu_env(cs)->sr & SR_S ? MMU_KERNEL_IDX : MMU_USER_IDX; +} + static void m68k_set_feature(CPUM68KState *env, int feature) { env->features |= BIT_ULL(feature); @@ -68,10 +73,9 @@ static void m68k_unset_feature(CPUM68KState *env, int feature) static void m68k_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - M68kCPU *cpu = M68K_CPU(s); - M68kCPUClass *mcc = M68K_CPU_GET_CLASS(cpu); - CPUM68KState *env = &cpu->env; + CPUState *cs = CPU(obj); + M68kCPUClass *mcc = M68K_CPU_GET_CLASS(obj); + CPUM68KState *env = cpu_env(cs); floatx80 nan = floatx80_default_nan(NULL); int i; @@ -111,16 +115,13 @@ static ObjectClass *m68k_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(M68K_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && object_class_dynamic_cast(oc, TYPE_M68K_CPU) == NULL) { - return NULL; - } + return oc; } static void m5206_cpu_initfn(Object *obj) { - M68kCPU *cpu = M68K_CPU(obj); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(CPU(obj)); m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); m68k_set_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV); @@ -129,8 +130,7 @@ static void m5206_cpu_initfn(Object *obj) /* Base feature set, including isns. for m68k family */ static void m68000_cpu_initfn(Object *obj) { - M68kCPU *cpu = M68K_CPU(obj); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(CPU(obj)); m68k_set_feature(env, M68K_FEATURE_M68K); m68k_set_feature(env, M68K_FEATURE_USP); @@ -139,12 +139,12 @@ static void m68000_cpu_initfn(Object *obj) } /* - * Adds BKPT, MOVE-from-SR *now priv instr, and MOVEC, MOVES, RTD + * Adds BKPT, MOVE-from-SR *now priv instr, and MOVEC, MOVES, RTD, + * format+vector in exception frame. */ static void m68010_cpu_initfn(Object *obj) { - M68kCPU *cpu = M68K_CPU(obj); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(CPU(obj)); m68000_cpu_initfn(obj); m68k_set_feature(env, M68K_FEATURE_M68010); @@ -152,6 +152,7 @@ static void m68010_cpu_initfn(Object *obj) m68k_set_feature(env, M68K_FEATURE_BKPT); m68k_set_feature(env, M68K_FEATURE_MOVEC); m68k_set_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV); + m68k_set_feature(env, M68K_FEATURE_EXCEPTION_FORMAT_VEC); } /* @@ -163,8 +164,7 @@ static void m68010_cpu_initfn(Object *obj) */ static void m68020_cpu_initfn(Object *obj) { - M68kCPU *cpu = M68K_CPU(obj); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(CPU(obj)); m68010_cpu_initfn(obj); m68k_unset_feature(env, M68K_FEATURE_M68010); @@ -194,8 +194,7 @@ static void m68020_cpu_initfn(Object *obj) */ static void m68030_cpu_initfn(Object *obj) { - M68kCPU *cpu = M68K_CPU(obj); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(CPU(obj)); m68020_cpu_initfn(obj); m68k_unset_feature(env, M68K_FEATURE_M68020); @@ -221,8 +220,7 @@ static void m68030_cpu_initfn(Object *obj) */ static void m68040_cpu_initfn(Object *obj) { - M68kCPU *cpu = M68K_CPU(obj); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(CPU(obj)); m68030_cpu_initfn(obj); m68k_unset_feature(env, M68K_FEATURE_M68030); @@ -242,8 +240,7 @@ static void m68040_cpu_initfn(Object *obj) */ static void m68060_cpu_initfn(Object *obj) { - M68kCPU *cpu = M68K_CPU(obj); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(CPU(obj)); m68040_cpu_initfn(obj); m68k_unset_feature(env, M68K_FEATURE_M68040); @@ -256,8 +253,7 @@ static void m68060_cpu_initfn(Object *obj) static void m5208_cpu_initfn(Object *obj) { - M68kCPU *cpu = M68K_CPU(obj); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(CPU(obj)); m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC); @@ -269,8 +265,7 @@ static void m5208_cpu_initfn(Object *obj) static void cfv4e_cpu_initfn(Object *obj) { - M68kCPU *cpu = M68K_CPU(obj); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(CPU(obj)); m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); m68k_set_feature(env, M68K_FEATURE_CF_ISA_B); @@ -283,8 +278,7 @@ static void cfv4e_cpu_initfn(Object *obj) static void any_cpu_initfn(Object *obj) { - M68kCPU *cpu = M68K_CPU(obj); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(CPU(obj)); m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); m68k_set_feature(env, M68K_FEATURE_CF_ISA_B); @@ -381,7 +375,7 @@ static const VMStateDescription vmstate_freg_tmp = { .name = "freg_tmp", .post_load = freg_post_load, .pre_save = freg_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tmp_mant, m68k_FPReg_tmp), VMSTATE_UINT16(tmp_exp, m68k_FPReg_tmp), VMSTATE_END_OF_LIST() @@ -390,18 +384,25 @@ static const VMStateDescription vmstate_freg_tmp = { static const VMStateDescription vmstate_freg = { .name = "freg", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP(FPReg, m68k_FPReg_tmp, vmstate_freg_tmp), VMSTATE_END_OF_LIST() } }; -static int fpu_post_load(void *opaque, int version) +static int fpu_pre_save(void *opaque) { M68kCPU *s = opaque; - cpu_m68k_restore_fp_status(&s->env); + s->env.fpsr = cpu_m68k_get_fpsr(&s->env); + return 0; +} + +static int fpu_post_load(void *opaque, int version) +{ + M68kCPU *s = opaque; + cpu_m68k_set_fpsr(&s->env, s->env.fpsr); return 0; } @@ -410,8 +411,9 @@ const VMStateDescription vmmstate_fpu = { .version_id = 1, .minimum_version_id = 1, .needed = fpu_needed, + .pre_save = fpu_pre_save, .post_load = fpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.fpcr, M68kCPU), VMSTATE_UINT32(env.fpsr, M68kCPU), VMSTATE_STRUCT_ARRAY(env.fregs, M68kCPU, 8, 0, vmstate_freg, FPReg), @@ -432,7 +434,7 @@ const VMStateDescription vmstate_cf_spregs = { .version_id = 1, .minimum_version_id = 1, .needed = cf_spregs_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.macc, M68kCPU, 4), VMSTATE_UINT32(env.macsr, M68kCPU), VMSTATE_UINT32(env.mac_mask, M68kCPU), @@ -454,7 +456,7 @@ const VMStateDescription vmstate_68040_mmu = { .version_id = 1, .minimum_version_id = 1, .needed = cpu_68040_mmu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.mmu.ar, M68kCPU), VMSTATE_UINT32(env.mmu.ssw, M68kCPU), VMSTATE_UINT16(env.mmu.tcr, M68kCPU), @@ -479,7 +481,7 @@ const VMStateDescription vmstate_68040_spregs = { .version_id = 1, .minimum_version_id = 1, .needed = cpu_68040_spregs_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.vbr, M68kCPU), VMSTATE_UINT32(env.cacr, M68kCPU), VMSTATE_UINT32(env.sfc, M68kCPU), @@ -492,7 +494,7 @@ static const VMStateDescription vmstate_m68k_cpu = { .name = "cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(env.dregs, M68kCPU, 8), VMSTATE_UINT32_ARRAY(env.aregs, M68kCPU, 8), VMSTATE_UINT32(env.pc, M68kCPU), @@ -509,7 +511,7 @@ static const VMStateDescription vmstate_m68k_cpu = { VMSTATE_INT32(env.pending_level, M68kCPU), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmmstate_fpu, &vmstate_cf_spregs, &vmstate_68040_mmu, @@ -527,7 +529,7 @@ static const struct SysemuCPUOps m68k_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps m68k_tcg_ops = { +static const TCGCPUOps m68k_tcg_ops = { .initialize = m68k_tcg_init, .restore_state_to_opc = m68k_restore_state_to_opc, @@ -553,6 +555,7 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data) cc->class_by_name = m68k_cpu_class_by_name; cc->has_work = m68k_cpu_has_work; + cc->mmu_index = m68k_cpu_mmu_index; cc->dump_state = m68k_cpu_dump_state; cc->set_pc = m68k_cpu_set_pc; cc->get_pc = m68k_cpu_get_pc; @@ -564,7 +567,6 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data) #endif cc->disas_set_info = m68k_cpu_disas_set_info; - cc->gdb_num_core_regs = 18; cc->tcg_ops = &m68k_tcg_ops; } diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h index 6cfc696d2ba..e184239a810 100644 --- a/target/m68k/cpu.h +++ b/target/m68k/cpu.h @@ -199,7 +199,8 @@ void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t); void cpu_m68k_set_sr(CPUM68KState *env, uint32_t); void cpu_m68k_restore_fp_status(CPUM68KState *env); void cpu_m68k_set_fpcr(CPUM68KState *env, uint32_t val); - +uint32_t cpu_m68k_get_fpsr(CPUM68KState *env); +void cpu_m68k_set_fpsr(CPUM68KState *env, uint32_t val); /* * Instead of computing the condition codes after each m68k instruction, @@ -478,10 +479,11 @@ void do_m68k_semihosting(CPUM68KState *env, int nr); * The 68000 family is defined in six main CPU classes, the 680[012346]0. * Generally each successive CPU adds enhanced data/stack/instructions. * However, some features are only common to one, or a few classes. - * The features covers those subsets of instructons. + * The features cover those subsets of instructions. * - * CPU32/32+ are basically 680010 compatible with some 68020 class instructons, - * and some additional CPU32 instructions. Mostly Supervisor state differences. + * CPU32/32+ are basically 680010 compatible with some 68020 class + * instructions, and some additional CPU32 instructions. Mostly Supervisor + * state differences. * * The ColdFire core ISA is a RISC-style reduction of the 68000 series cpu. * There are 4 ColdFire core ISA revisions: A, A+, B and C. @@ -549,6 +551,8 @@ enum m68k_features { M68K_FEATURE_TRAPCC, /* MOVE from SR privileged (from 68010) */ M68K_FEATURE_MOVEFROMSR_PRIV, + /* Exception frame with format+vector (from 68010) */ + M68K_FEATURE_EXCEPTION_FORMAT_VEC, }; static inline bool m68k_feature(CPUM68KState *env, int feature) @@ -556,8 +560,6 @@ static inline bool m68k_feature(CPUM68KState *env, int feature) return (env->features & BIT_ULL(feature)) != 0; } -void m68k_cpu_list(void); - void register_m68k_insns (CPUM68KState *env); enum { @@ -576,15 +578,9 @@ enum { #define CPU_RESOLVING_TYPE TYPE_M68K_CPU -#define cpu_list m68k_cpu_list - /* MMU modes definitions */ #define MMU_KERNEL_IDX 0 #define MMU_USER_IDX 1 -static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch) -{ - return (env->sr & SR_S) == 0 ? 1 : 0; -} bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, diff --git a/target/m68k/fpu_helper.c b/target/m68k/fpu_helper.c index ab120b5f59b..8314791f504 100644 --- a/target/m68k/fpu_helper.c +++ b/target/m68k/fpu_helper.c @@ -164,6 +164,78 @@ void HELPER(set_fpcr)(CPUM68KState *env, uint32_t val) cpu_m68k_set_fpcr(env, val); } +/* Convert host exception flags to cpu_m68k form. */ +static int cpu_m68k_exceptbits_from_host(int host_bits) +{ + int target_bits = 0; + + if (host_bits & float_flag_invalid) { + target_bits |= 0x80; + } + if (host_bits & float_flag_overflow) { + target_bits |= 0x40; + } + if (host_bits & (float_flag_underflow | float_flag_output_denormal)) { + target_bits |= 0x20; + } + if (host_bits & float_flag_divbyzero) { + target_bits |= 0x10; + } + if (host_bits & float_flag_inexact) { + target_bits |= 0x08; + } + return target_bits; +} + +/* Convert cpu_m68k exception flags to target form. */ +static int cpu_m68k_exceptbits_to_host(int target_bits) +{ + int host_bits = 0; + + if (target_bits & 0x80) { + host_bits |= float_flag_invalid; + } + if (target_bits & 0x40) { + host_bits |= float_flag_overflow; + } + if (target_bits & 0x20) { + host_bits |= float_flag_underflow; + } + if (target_bits & 0x10) { + host_bits |= float_flag_divbyzero; + } + if (target_bits & 0x08) { + host_bits |= float_flag_inexact; + } + return host_bits; +} + +uint32_t cpu_m68k_get_fpsr(CPUM68KState *env) +{ + int host_flags = get_float_exception_flags(&env->fp_status); + int target_flags = cpu_m68k_exceptbits_from_host(host_flags); + int except = (env->fpsr & ~(0xf8)) | target_flags; + return except; +} + +uint32_t HELPER(get_fpsr)(CPUM68KState *env) +{ + return cpu_m68k_get_fpsr(env); +} + +void cpu_m68k_set_fpsr(CPUM68KState *env, uint32_t val) +{ + env->fpsr = val; + + int host_flags = cpu_m68k_exceptbits_to_host((int) env->fpsr); + set_float_exception_flags(host_flags, &env->fp_status); +} + +void HELPER(set_fpsr)(CPUM68KState *env, uint32_t val) +{ + cpu_m68k_set_fpsr(env, val); +} + #define PREC_BEGIN(prec) \ do { \ FloatX80RoundPrec old = \ diff --git a/target/m68k/gdbstub.c b/target/m68k/gdbstub.c index 1e5f033a12b..15547e2313c 100644 --- a/target/m68k/gdbstub.c +++ b/target/m68k/gdbstub.c @@ -23,8 +23,7 @@ int m68k_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - M68kCPU *cpu = M68K_CPU(cs); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(cs); if (n < 8) { /* D0-D7 */ @@ -50,8 +49,7 @@ int m68k_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int m68k_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - M68kCPU *cpu = M68K_CPU(cs); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(cs); uint32_t tmp; tmp = ldl_p(mem_buf); diff --git a/target/m68k/helper.c b/target/m68k/helper.c index 0a1544cd68d..7a91f33b172 100644 --- a/target/m68k/helper.c +++ b/target/m68k/helper.c @@ -29,48 +29,11 @@ #define SIGNBIT (1u << 31) -/* Sort alphabetically, except for "any". */ -static gint m68k_cpu_list_compare(gconstpointer a, gconstpointer b) +static int cf_fpu_gdb_get_reg(CPUState *cs, GByteArray *mem_buf, int n) { - ObjectClass *class_a = (ObjectClass *)a; - ObjectClass *class_b = (ObjectClass *)b; - const char *name_a, *name_b; - - name_a = object_class_get_name(class_a); - name_b = object_class_get_name(class_b); - if (strcmp(name_a, "any-" TYPE_M68K_CPU) == 0) { - return 1; - } else if (strcmp(name_b, "any-" TYPE_M68K_CPU) == 0) { - return -1; - } else { - return strcasecmp(name_a, name_b); - } -} - -static void m68k_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *c = data; - const char *typename; - char *name; - - typename = object_class_get_name(c); - name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_M68K_CPU)); - qemu_printf("%s\n", name); - g_free(name); -} - -void m68k_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list(TYPE_M68K_CPU, false); - list = g_slist_sort(list, m68k_cpu_list_compare); - g_slist_foreach(list, m68k_cpu_list_entry, NULL); - g_slist_free(list); -} + M68kCPU *cpu = M68K_CPU(cs); + CPUM68KState *env = &cpu->env; -static int cf_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n) -{ if (n < 8) { float_status s; return gdb_get_reg64(mem_buf, floatx80_to_float64(env->fregs[n].d, &s)); @@ -86,8 +49,11 @@ static int cf_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n) return 0; } -static int cf_fpu_gdb_set_reg(CPUM68KState *env, uint8_t *mem_buf, int n) +static int cf_fpu_gdb_set_reg(CPUState *cs, uint8_t *mem_buf, int n) { + M68kCPU *cpu = M68K_CPU(cs); + CPUM68KState *env = &cpu->env; + if (n < 8) { float_status s; env->fregs[n].d = float64_to_floatx80(ldq_p(mem_buf), &s); @@ -106,8 +72,11 @@ static int cf_fpu_gdb_set_reg(CPUM68KState *env, uint8_t *mem_buf, int n) return 0; } -static int m68k_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n) +static int m68k_fpu_gdb_get_reg(CPUState *cs, GByteArray *mem_buf, int n) { + M68kCPU *cpu = M68K_CPU(cs); + CPUM68KState *env = &cpu->env; + if (n < 8) { int len = gdb_get_reg16(mem_buf, env->fregs[n].l.upper); len += gdb_get_reg16(mem_buf, 0); @@ -118,15 +87,18 @@ static int m68k_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n) case 8: /* fpcontrol */ return gdb_get_reg32(mem_buf, env->fpcr); case 9: /* fpstatus */ - return gdb_get_reg32(mem_buf, env->fpsr); + return gdb_get_reg32(mem_buf, cpu_m68k_get_fpsr(env)); case 10: /* fpiar, not implemented */ return gdb_get_reg32(mem_buf, 0); } return 0; } -static int m68k_fpu_gdb_set_reg(CPUM68KState *env, uint8_t *mem_buf, int n) +static int m68k_fpu_gdb_set_reg(CPUState *cs, uint8_t *mem_buf, int n) { + M68kCPU *cpu = M68K_CPU(cs); + CPUM68KState *env = &cpu->env; + if (n < 8) { env->fregs[n].l.upper = lduw_be_p(mem_buf); env->fregs[n].l.lower = ldq_be_p(mem_buf + 4); @@ -137,7 +109,7 @@ static int m68k_fpu_gdb_set_reg(CPUM68KState *env, uint8_t *mem_buf, int n) cpu_m68k_set_fpcr(env, ldl_p(mem_buf)); return 4; case 9: /* fpstatus */ - env->fpsr = ldl_p(mem_buf); + cpu_m68k_set_fpsr(env, ldl_p(mem_buf)); return 4; case 10: /* fpiar, not implemented */ return 4; @@ -152,10 +124,10 @@ void m68k_cpu_init_gdb(M68kCPU *cpu) if (m68k_feature(env, M68K_FEATURE_CF_FPU)) { gdb_register_coprocessor(cs, cf_fpu_gdb_get_reg, cf_fpu_gdb_set_reg, - 11, "cf-fp.xml", 18); + gdb_find_static_feature("cf-fp.xml"), 18); } else if (m68k_feature(env, M68K_FEATURE_FPU)) { - gdb_register_coprocessor(cs, m68k_fpu_gdb_get_reg, - m68k_fpu_gdb_set_reg, 11, "m68k-fp.xml", 18); + gdb_register_coprocessor(cs, m68k_fpu_gdb_get_reg, m68k_fpu_gdb_set_reg, + gdb_find_static_feature("m68k-fp.xml"), 18); } /* TODO: Add [E]MAC registers. */ } @@ -934,8 +906,7 @@ static int get_physical_address(CPUM68KState *env, hwaddr *physical, hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { - M68kCPU *cpu = M68K_CPU(cs); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(cs); hwaddr phys_addr; int prot; int access_type; @@ -983,8 +954,7 @@ bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType qemu_access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - M68kCPU *cpu = M68K_CPU(cs); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(cs); hwaddr physical; int prot; int access_type; @@ -1012,7 +982,7 @@ bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, access_type |= ACCESS_SUPER; } - ret = get_physical_address(&cpu->env, &physical, &prot, + ret = get_physical_address(env, &physical, &prot, address, access_type, &page_size); if (likely(ret == 0)) { tlb_set_page(cs, address & TARGET_PAGE_MASK, diff --git a/target/m68k/helper.h b/target/m68k/helper.h index 2bbe0dc0325..95aa5e53bb9 100644 --- a/target/m68k/helper.h +++ b/target/m68k/helper.h @@ -54,6 +54,8 @@ DEF_HELPER_4(fsdiv, void, env, fp, fp, fp) DEF_HELPER_4(fddiv, void, env, fp, fp, fp) DEF_HELPER_4(fsgldiv, void, env, fp, fp, fp) DEF_HELPER_FLAGS_3(fcmp, TCG_CALL_NO_RWG, void, env, fp, fp) +DEF_HELPER_2(set_fpsr, void, env, i32) +DEF_HELPER_1(get_fpsr, i32, env) DEF_HELPER_FLAGS_2(set_fpcr, TCG_CALL_NO_RWG, void, env, i32) DEF_HELPER_FLAGS_2(ftst, TCG_CALL_NO_RWG, void, env, fp) DEF_HELPER_3(fconst, void, env, fp, i32) diff --git a/target/m68k/m68k-semi.c b/target/m68k/m68k-semi.c index b4ffb70f8b7..546cff22469 100644 --- a/target/m68k/m68k-semi.c +++ b/target/m68k/m68k-semi.c @@ -77,8 +77,7 @@ static int host_to_gdb_errno(int err) static void m68k_semi_u32_cb(CPUState *cs, uint64_t ret, int err) { - M68kCPU *cpu = M68K_CPU(cs); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(cs); target_ulong args = env->dregs[1]; if (put_user_u32(ret, args) || @@ -95,8 +94,7 @@ static void m68k_semi_u32_cb(CPUState *cs, uint64_t ret, int err) static void m68k_semi_u64_cb(CPUState *cs, uint64_t ret, int err) { - M68kCPU *cpu = M68K_CPU(cs); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(cs); target_ulong args = env->dregs[1]; if (put_user_u32(ret >> 32, args) || diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c index 1ce850bbc59..125f6c1b082 100644 --- a/target/m68k/op_helper.c +++ b/target/m68k/op_helper.c @@ -52,7 +52,7 @@ static void m68k_rte(CPUM68KState *env) sp += 2; env->pc = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0); sp += 4; - if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) { + if (m68k_feature(env, M68K_FEATURE_EXCEPTION_FORMAT_VEC)) { /* all except 68000 */ fmt = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0); sp += 2; @@ -256,7 +256,7 @@ static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp, uint16_t format, uint16_t sr, uint32_t addr, uint32_t retaddr) { - if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) { + if (m68k_feature(env, M68K_FEATURE_EXCEPTION_FORMAT_VEC)) { /* all except 68000 */ CPUState *cs = env_cpu(env); switch (format) { @@ -441,10 +441,7 @@ static void do_interrupt_all(CPUM68KState *env, int is_hw) void m68k_cpu_do_interrupt(CPUState *cs) { - M68kCPU *cpu = M68K_CPU(cs); - CPUM68KState *env = &cpu->env; - - do_interrupt_all(env, 0); + do_interrupt_all(cpu_env(cs), 0); } static inline void do_interrupt_m68k_hardirq(CPUM68KState *env) @@ -457,8 +454,7 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { - M68kCPU *cpu = M68K_CPU(cs); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(cs); cpu_restore_state(cs, retaddr); @@ -511,8 +507,7 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - M68kCPU *cpu = M68K_CPU(cs); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(cs); if (interrupt_request & CPU_INTERRUPT_HARD && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) { @@ -811,7 +806,7 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2, uint32_t l1, l2; uintptr_t ra = GETPC(); #if defined(CONFIG_ATOMIC64) - int mmu_idx = cpu_mmu_index(env, 0); + int mmu_idx = cpu_mmu_index(env_cpu(env), 0); MemOpIdx oi = make_memop_idx(MO_BEUQ, mmu_idx); #endif diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 4a0b0b27036..8a194f2f213 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -1457,7 +1457,7 @@ DISAS_INSN(undef) * for the 680x0 series, as well as those that are implemented * but actually illegal for CPU32 or pre-68020. */ - qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n", + qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %" VADDR_PRIx "\n", insn, s->base.pc_next); gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); } @@ -4686,7 +4686,7 @@ static void gen_load_fcr(DisasContext *s, TCGv res, int reg) tcg_gen_movi_i32(res, 0); break; case M68K_FPSR: - tcg_gen_ld_i32(res, tcg_env, offsetof(CPUM68KState, fpsr)); + gen_helper_get_fpsr(res, tcg_env); break; case M68K_FPCR: tcg_gen_ld_i32(res, tcg_env, offsetof(CPUM68KState, fpcr)); @@ -4700,7 +4700,7 @@ static void gen_store_fcr(DisasContext *s, TCGv val, int reg) case M68K_FPIAR: break; case M68K_FPSR: - tcg_gen_st_i32(val, tcg_env, offsetof(CPUM68KState, fpsr)); + gen_helper_set_fpsr(tcg_env, val); break; case M68K_FPCR: gen_helper_set_fpcr(tcg_env, val); @@ -5129,46 +5129,44 @@ DISAS_INSN(fpu) static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) { TCGv fpsr; + int imm = 0; - c->v2 = tcg_constant_i32(0); /* TODO: Raise BSUN exception. */ fpsr = tcg_temp_new(); gen_load_fcr(s, fpsr, M68K_FPSR); + c->v1 = fpsr; + switch (cond) { case 0: /* False */ case 16: /* Signaling False */ - c->v1 = c->v2; c->tcond = TCG_COND_NEVER; break; case 1: /* EQual Z */ case 17: /* Signaling EQual Z */ - c->v1 = tcg_temp_new(); - tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); - c->tcond = TCG_COND_NE; + imm = FPSR_CC_Z; + c->tcond = TCG_COND_TSTNE; break; case 2: /* Ordered Greater Than !(A || Z || N) */ case 18: /* Greater Than !(A || Z || N) */ - c->v1 = tcg_temp_new(); - tcg_gen_andi_i32(c->v1, fpsr, - FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); - c->tcond = TCG_COND_EQ; + imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N; + c->tcond = TCG_COND_TSTEQ; break; case 3: /* Ordered Greater than or Equal Z || !(A || N) */ case 19: /* Greater than or Equal Z || !(A || N) */ c->v1 = tcg_temp_new(); tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); - tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N); tcg_gen_or_i32(c->v1, c->v1, fpsr); tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N); - c->tcond = TCG_COND_NE; + imm = FPSR_CC_Z | FPSR_CC_N; + c->tcond = TCG_COND_TSTNE; break; case 4: /* Ordered Less Than !(!N || A || Z); */ case 20: /* Less Than !(!N || A || Z); */ c->v1 = tcg_temp_new(); tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N); - tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z); - c->tcond = TCG_COND_EQ; + imm = FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z; + c->tcond = TCG_COND_TSTEQ; break; case 5: /* Ordered Less than or Equal Z || (N && !A) */ case 21: /* Less than or Equal Z || (N && !A) */ @@ -5176,49 +5174,45 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); tcg_gen_andc_i32(c->v1, fpsr, c->v1); - tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N); - c->tcond = TCG_COND_NE; + imm = FPSR_CC_Z | FPSR_CC_N; + c->tcond = TCG_COND_TSTNE; break; case 6: /* Ordered Greater or Less than !(A || Z) */ case 22: /* Greater or Less than !(A || Z) */ - c->v1 = tcg_temp_new(); - tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); - c->tcond = TCG_COND_EQ; + imm = FPSR_CC_A | FPSR_CC_Z; + c->tcond = TCG_COND_TSTEQ; break; case 7: /* Ordered !A */ case 23: /* Greater, Less or Equal !A */ - c->v1 = tcg_temp_new(); - tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); - c->tcond = TCG_COND_EQ; + imm = FPSR_CC_A; + c->tcond = TCG_COND_TSTEQ; break; case 8: /* Unordered A */ case 24: /* Not Greater, Less or Equal A */ - c->v1 = tcg_temp_new(); - tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); - c->tcond = TCG_COND_NE; + imm = FPSR_CC_A; + c->tcond = TCG_COND_TSTNE; break; case 9: /* Unordered or Equal A || Z */ case 25: /* Not Greater or Less then A || Z */ - c->v1 = tcg_temp_new(); - tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); - c->tcond = TCG_COND_NE; + imm = FPSR_CC_A | FPSR_CC_Z; + c->tcond = TCG_COND_TSTNE; break; case 10: /* Unordered or Greater Than A || !(N || Z)) */ case 26: /* Not Less or Equal A || !(N || Z)) */ c->v1 = tcg_temp_new(); tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); - tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N); tcg_gen_or_i32(c->v1, c->v1, fpsr); tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N); - c->tcond = TCG_COND_NE; + imm = FPSR_CC_A | FPSR_CC_N; + c->tcond = TCG_COND_TSTNE; break; case 11: /* Unordered or Greater or Equal A || Z || !N */ case 27: /* Not Less Than A || Z || !N */ c->v1 = tcg_temp_new(); - tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); - tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N); - c->tcond = TCG_COND_NE; + tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N); + imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N; + c->tcond = TCG_COND_TSTNE; break; case 12: /* Unordered or Less Than A || (N && !Z) */ case 28: /* Not Greater than or Equal A || (N && !Z) */ @@ -5226,27 +5220,25 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); tcg_gen_andc_i32(c->v1, fpsr, c->v1); - tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N); - c->tcond = TCG_COND_NE; + imm = FPSR_CC_A | FPSR_CC_N; + c->tcond = TCG_COND_TSTNE; break; case 13: /* Unordered or Less or Equal A || Z || N */ case 29: /* Not Greater Than A || Z || N */ - c->v1 = tcg_temp_new(); - tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); - c->tcond = TCG_COND_NE; + imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N; + c->tcond = TCG_COND_TSTNE; break; case 14: /* Not Equal !Z */ case 30: /* Signaling Not Equal !Z */ - c->v1 = tcg_temp_new(); - tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); - c->tcond = TCG_COND_EQ; + imm = FPSR_CC_Z; + c->tcond = TCG_COND_TSTEQ; break; case 15: /* True */ case 31: /* Signaling True */ - c->v1 = c->v2; c->tcond = TCG_COND_ALWAYS; break; } + c->v2 = tcg_constant_i32(imm); } static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1) @@ -6088,7 +6080,7 @@ static const TranslatorOps m68k_tr_ops = { }; void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base); @@ -6108,8 +6100,7 @@ static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low) void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - M68kCPU *cpu = M68K_CPU(cs); - CPUM68KState *env = &cpu->env; + CPUM68KState *env = cpu_env(cs); int i; uint16_t sr; for (i = 0; i < 8; i++) { diff --git a/target/meson.build b/target/meson.build index a53a60486fc..dee2ac47e02 100644 --- a/target/meson.build +++ b/target/meson.build @@ -19,3 +19,5 @@ subdir('sh4') subdir('sparc') subdir('tricore') subdir('xtensa') + +specific_ss.add(files('target-common.c')) diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index bbb3335cadd..96c2b71f7f7 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -118,6 +118,22 @@ static bool mb_cpu_has_work(CPUState *cs) return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); } +static int mb_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + CPUMBState *env = cpu_env(cs); + MicroBlazeCPU *cpu = env_archcpu(env); + + /* Are we in nommu mode?. */ + if (!(env->msr & MSR_VM) || !cpu->cfg.use_mmu) { + return MMU_NOMMU_IDX; + } + + if (env->msr & MSR_UM) { + return MMU_USER_IDX; + } + return MMU_KERNEL_IDX; +} + #ifndef CONFIG_USER_ONLY static void mb_cpu_ns_axi_dp(void *opaque, int irq, int level) { @@ -167,9 +183,9 @@ static void microblaze_cpu_set_irq(void *opaque, int irq, int level) static void mb_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - MicroBlazeCPU *cpu = MICROBLAZE_CPU(s); - MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_GET_CLASS(cpu); + CPUState *cs = CPU(obj); + MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); + MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_GET_CLASS(obj); CPUMBState *env = &cpu->env; if (mcc->parent_phases.hold) { @@ -297,8 +313,9 @@ static void mb_cpu_initfn(Object *obj) CPUMBState *env = &cpu->env; gdb_register_coprocessor(CPU(cpu), mb_cpu_gdb_read_stack_protect, - mb_cpu_gdb_write_stack_protect, 2, - "microblaze-stack-protect.xml", 0); + mb_cpu_gdb_write_stack_protect, + gdb_find_static_feature("microblaze-stack-protect.xml"), + 0); set_float_rounding_mode(float_round_nearest_even, &env->fp_status); @@ -387,7 +404,7 @@ static const struct SysemuCPUOps mb_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps mb_tcg_ops = { +static const TCGCPUOps mb_tcg_ops = { .initialize = mb_tcg_init, .synchronize_from_tb = mb_cpu_synchronize_from_tb, .restore_state_to_opc = mb_restore_state_to_opc, @@ -415,7 +432,7 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = mb_cpu_class_by_name; cc->has_work = mb_cpu_has_work; - + cc->mmu_index = mb_cpu_mmu_index; cc->dump_state = mb_cpu_dump_state; cc->set_pc = mb_cpu_set_pc; cc->get_pc = mb_cpu_get_pc; @@ -427,7 +444,6 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data) cc->sysemu_ops = &mb_sysemu_ops; #endif device_class_set_props(dc, mb_properties); - cc->gdb_num_core_regs = 32 + 25; cc->gdb_core_xml_file = "microblaze-core.xml"; cc->disas_set_info = mb_disas_set_info; diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h index b5374365f5f..c0c7574dbd5 100644 --- a/target/microblaze/cpu.h +++ b/target/microblaze/cpu.h @@ -381,8 +381,8 @@ G_NORETURN void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, void mb_cpu_dump_state(CPUState *cpu, FILE *f, int flags); int mb_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int mb_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -int mb_cpu_gdb_read_stack_protect(CPUArchState *cpu, GByteArray *buf, int reg); -int mb_cpu_gdb_write_stack_protect(CPUArchState *cpu, uint8_t *buf, int reg); +int mb_cpu_gdb_read_stack_protect(CPUState *cs, GByteArray *buf, int reg); +int mb_cpu_gdb_write_stack_protect(CPUState *cs, uint8_t *buf, int reg); static inline uint32_t mb_cpu_read_msr(const CPUMBState *env) { @@ -434,21 +434,6 @@ void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, MemTxResult response, uintptr_t retaddr); #endif -static inline int cpu_mmu_index(CPUMBState *env, bool ifetch) -{ - MicroBlazeCPU *cpu = env_archcpu(env); - - /* Are we in nommu mode?. */ - if (!(env->msr & MSR_VM) || !cpu->cfg.use_mmu) { - return MMU_NOMMU_IDX; - } - - if (env->msr & MSR_UM) { - return MMU_USER_IDX; - } - return MMU_KERNEL_IDX; -} - #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_mb_cpu; #endif diff --git a/target/microblaze/gdbstub.c b/target/microblaze/gdbstub.c index 29ac6e9c0f7..09d74e164d0 100644 --- a/target/microblaze/gdbstub.c +++ b/target/microblaze/gdbstub.c @@ -49,14 +49,9 @@ enum { int mb_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - CPUClass *cc = CPU_GET_CLASS(cs); CPUMBState *env = &cpu->env; uint32_t val; - if (n > cc->gdb_num_core_regs) { - return 0; - } - switch (n) { case 1 ... 31: val = env->regs[n]; @@ -94,8 +89,10 @@ int mb_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) return gdb_get_reg32(mem_buf, val); } -int mb_cpu_gdb_read_stack_protect(CPUMBState *env, GByteArray *mem_buf, int n) +int mb_cpu_gdb_read_stack_protect(CPUState *cs, GByteArray *mem_buf, int n) { + MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); + CPUMBState *env = &cpu->env; uint32_t val; switch (n) { @@ -113,9 +110,8 @@ int mb_cpu_gdb_read_stack_protect(CPUMBState *env, GByteArray *mem_buf, int n) int mb_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); CPUClass *cc = CPU_GET_CLASS(cs); - CPUMBState *env = &cpu->env; + CPUMBState *env = cpu_env(cs); uint32_t tmp; if (n > cc->gdb_num_core_regs) { @@ -153,8 +149,11 @@ int mb_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return 4; } -int mb_cpu_gdb_write_stack_protect(CPUMBState *env, uint8_t *mem_buf, int n) +int mb_cpu_gdb_write_stack_protect(CPUState *cs, uint8_t *mem_buf, int n) { + MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); + CPUMBState *env = &cpu->env; + switch (n) { case GDB_SP_SHL: env->slr = ldl_p(mem_buf); diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c index 98bdb82de87..d25c9eb4d3e 100644 --- a/target/microblaze/helper.c +++ b/target/microblaze/helper.c @@ -228,10 +228,9 @@ hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, MemTxAttrs *attrs) { MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - CPUMBState *env = &cpu->env; target_ulong vaddr, paddr = 0; MicroBlazeMMULookup lu; - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = cpu_mmu_index(cs, false); unsigned int hit; /* Caller doesn't initialize */ @@ -253,8 +252,7 @@ hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - CPUMBState *env = &cpu->env; + CPUMBState *env = cpu_env(cs); if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->msr & MSR_IE) diff --git a/target/microblaze/machine.c b/target/microblaze/machine.c index d24def3992e..51705e4f5c9 100644 --- a/target/microblaze/machine.c +++ b/target/microblaze/machine.c @@ -22,7 +22,7 @@ #include "migration/cpu.h" -static VMStateField vmstate_mmu_fields[] = { +static const VMStateField vmstate_mmu_fields[] = { VMSTATE_UINT64_2DARRAY(rams, MicroBlazeMMU, 2, TLB_ENTRIES), VMSTATE_UINT8_ARRAY(tids, MicroBlazeMMU, TLB_ENTRIES), VMSTATE_UINT32_ARRAY(regs, MicroBlazeMMU, 3), @@ -60,7 +60,7 @@ static const VMStateInfo vmstate_msr = { .put = put_msr, }; -static VMStateField vmstate_env_fields[] = { +static const VMStateField vmstate_env_fields[] = { VMSTATE_UINT32_ARRAY(regs, CPUMBState, 32), VMSTATE_UINT32(pc, CPUMBState), @@ -92,7 +92,7 @@ static const VMStateDescription vmstate_env = { .fields = vmstate_env_fields, }; -static VMStateField vmstate_cpu_fields[] = { +static const VMStateField vmstate_cpu_fields[] = { VMSTATE_CPU(), VMSTATE_STRUCT(env, MicroBlazeCPU, 1, vmstate_env, CPUMBState), VMSTATE_END_OF_LIST() diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c index 75651979a99..234006634e4 100644 --- a/target/microblaze/mmu.c +++ b/target/microblaze/mmu.c @@ -305,7 +305,7 @@ void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v) } hit = mmu_translate(cpu, &lu, v & TLB_EPN_MASK, - 0, cpu_mmu_index(env, false)); + 0, cpu_mmu_index(env_cpu(env), false)); if (hit) { env->mmu.regs[MMU_R_TLBX] = lu.idx; } else { diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index 49bfb4a0eaa..fc451befae6 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -62,9 +62,6 @@ typedef struct DisasContext { DisasContextBase base; const MicroBlazeCPUConfig *cfg; - /* TCG op of the current insn_start. */ - TCGOp *insn_start; - TCGv_i32 r0; bool r0_set; @@ -699,14 +696,14 @@ static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb) static void record_unaligned_ess(DisasContext *dc, int rd, MemOp size, bool store) { - uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1); + uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1); iflags |= ESR_ESS_FLAG; iflags |= rd << 5; iflags |= store * ESR_S; iflags |= (size == MO_32) * ESR_W; - tcg_set_insn_start_param(dc->insn_start, 1, iflags); + tcg_set_insn_start_param(dc->base.insn_start, 1, iflags); } #endif @@ -1607,7 +1604,7 @@ static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs) dc->ext_imm = dc->base.tb->cs_base; dc->r0 = NULL; dc->r0_set = false; - dc->mem_index = cpu_mmu_index(&cpu->env, false); + dc->mem_index = cpu_mmu_index(cs, false); dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER; dc->jmp_dest = -1; @@ -1624,13 +1621,11 @@ static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs) DisasContext *dc = container_of(dcb, DisasContext, base); tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK); - dc->insn_start = tcg_last_op(); } static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs) { DisasContext *dc = container_of(dcb, DisasContext, base); - CPUMBState *env = cpu_env(cs); uint32_t ir; /* TODO: This should raise an exception, not terminate qemu. */ @@ -1641,7 +1636,7 @@ static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs) dc->tb_flags_to_set = 0; - ir = cpu_ldl_code(env, dc->base.pc_next); + ir = cpu_ldl_code(cpu_env(cs), dc->base.pc_next); if (!decode(dc, ir)) { trap_illegal(dc, true); } @@ -1792,7 +1787,7 @@ static const TranslatorOps mb_tr_ops = { }; void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base); @@ -1800,8 +1795,7 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - CPUMBState *env = &cpu->env; + CPUMBState *env = cpu_env(cs); uint32_t iflags; int i; diff --git a/target/mips/Kconfig b/target/mips/Kconfig index 6adf1453548..eb19c94c7d4 100644 --- a/target/mips/Kconfig +++ b/target/mips/Kconfig @@ -1,5 +1,6 @@ config MIPS bool + select SEMIHOSTING config MIPS64 bool diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc index c0c389c59a1..fbf787d8ce1 100644 --- a/target/mips/cpu-defs.c.inc +++ b/target/mips/cpu-defs.c.inc @@ -1018,15 +1018,6 @@ const mips_def_t mips_defs[] = }; const int mips_defs_number = ARRAY_SIZE(mips_defs); -void mips_cpu_list(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(mips_defs); i++) { - qemu_printf("MIPS '%s'\n", mips_defs[i].name); - } -} - static void fpu_init (CPUMIPSState *env, const mips_def_t *def) { int i; diff --git a/target/mips/cpu.c b/target/mips/cpu.c index bda31751cd7..f4b54ed3059 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -89,8 +89,7 @@ static void fpu_dump_state(CPUMIPSState *env, FILE *f, int flags) static void mips_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); int i; qemu_fprintf(f, "pc=0x" TARGET_FMT_lx " HI=0x" TARGET_FMT_lx @@ -132,9 +131,7 @@ void cpu_set_exception_base(int vp_index, target_ulong address) static void mips_cpu_set_pc(CPUState *cs, vaddr value) { - MIPSCPU *cpu = MIPS_CPU(cs); - - mips_env_set_pc(&cpu->env, value); + mips_env_set_pc(cpu_env(cs), value); } static vaddr mips_cpu_get_pc(CPUState *cs) @@ -146,8 +143,7 @@ static vaddr mips_cpu_get_pc(CPUState *cs) static bool mips_cpu_has_work(CPUState *cs) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); bool has_work = false; /* @@ -191,13 +187,18 @@ static bool mips_cpu_has_work(CPUState *cs) return has_work; } +static int mips_cpu_mmu_index(CPUState *cs, bool ifunc) +{ + return mips_env_mmu_index(cpu_env(cs)); +} + #include "cpu-defs.c.inc" static void mips_cpu_reset_hold(Object *obj) { CPUState *cs = CPU(obj); MIPSCPU *cpu = MIPS_CPU(cs); - MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu); + MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(obj); CPUMIPSState *env = &cpu->env; if (mcc->parent_phases.hold) { @@ -437,10 +438,7 @@ static void mips_cpu_reset_hold(Object *obj) static void mips_cpu_disas_set_info(CPUState *s, disassemble_info *info) { - MIPSCPU *cpu = MIPS_CPU(s); - CPUMIPSState *env = &cpu->env; - - if (!(env->insn_flags & ISA_NANOMIPS32)) { + if (!(cpu_env(s)->insn_flags & ISA_NANOMIPS32)) { #if TARGET_BIG_ENDIAN info->print_insn = print_insn_big_mips; #else @@ -560,7 +558,7 @@ static const struct SysemuCPUOps mips_sysemu_ops = { * NB: cannot be const, as some elements are changed for specific * mips hardware (see hw/mips/jazz.c). */ -static const struct TCGCPUOps mips_tcg_ops = { +static const TCGCPUOps mips_tcg_ops = { .initialize = mips_tcg_init, .synchronize_from_tb = mips_cpu_synchronize_from_tb, .restore_state_to_opc = mips_restore_state_to_opc, @@ -590,6 +588,7 @@ static void mips_cpu_class_init(ObjectClass *c, void *data) cc->class_by_name = mips_cpu_class_by_name; cc->has_work = mips_cpu_has_work; + cc->mmu_index = mips_cpu_mmu_index; cc->dump_state = mips_cpu_dump_state; cc->set_pc = mips_cpu_set_pc; cc->get_pc = mips_cpu_get_pc; diff --git a/target/mips/cpu.h b/target/mips/cpu.h index 98407d97348..41a09a67bbc 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -747,9 +747,7 @@ typedef struct CPUArchState { * CP0 Register 9 */ int32_t CP0_Count; - uint32_t CP0_SAARI; #define CP0SAARI_TARGET 0 /* 5..0 */ - uint64_t CP0_SAAR[2]; #define CP0SAAR_BASE 12 /* 43..12 */ #define CP0SAAR_SIZE 1 /* 5..1 */ #define CP0SAAR_EN 0 @@ -1174,7 +1172,6 @@ typedef struct CPUArchState { uint32_t CP0_Status_rw_bitmask; /* Read/write bits in CP0_Status */ uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */ uint64_t insn_flags; /* Supported instruction set */ - int saarp; /* Fields up to this point are cleared by a CPU reset */ struct {} end_reset_fields; @@ -1183,8 +1180,7 @@ typedef struct CPUArchState { CPUMIPSMVPContext *mvp; #if !defined(CONFIG_USER_ONLY) CPUMIPSTLBContext *tlb; - void *irq[8]; - struct MIPSITUState *itu; + qemu_irq irq[8]; MemoryRegion *itc_tag; /* ITC Configuration Tags */ /* Loongson IOCSR memory */ @@ -1238,10 +1234,6 @@ struct MIPSCPUClass { bool no_data_aborts; }; -void mips_cpu_list(void); - -#define cpu_list mips_cpu_list - void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env); uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env); @@ -1249,18 +1241,20 @@ uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env); * MMU modes definitions. We carefully match the indices with our * hflags layout. */ +#define MMU_KERNEL_IDX 0 #define MMU_USER_IDX 2 +#define MMU_ERL_IDX 3 static inline int hflags_mmu_index(uint32_t hflags) { if (hflags & MIPS_HFLAG_ERL) { - return 3; /* ERL */ + return MMU_ERL_IDX; } else { return hflags & MIPS_HFLAG_KSU; } } -static inline int cpu_mmu_index(CPUMIPSState *env, bool ifetch) +static inline int mips_env_mmu_index(CPUMIPSState *env) { return hflags_mmu_index(env->hflags); } diff --git a/target/mips/gdbstub.c b/target/mips/gdbstub.c index 62d7b72407e..169d47416a6 100644 --- a/target/mips/gdbstub.c +++ b/target/mips/gdbstub.c @@ -25,8 +25,7 @@ int mips_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); if (n < 32) { return gdb_get_regl(mem_buf, env->active_tc.gpr[n]); @@ -78,8 +77,7 @@ int mips_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int mips_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); target_ulong tmp; tmp = ldtul_p(mem_buf); diff --git a/target/mips/internal.h b/target/mips/internal.h index 1d0c026c7d0..a9a22ea00ec 100644 --- a/target/mips/internal.h +++ b/target/mips/internal.h @@ -83,7 +83,6 @@ struct mips_def_t { uint32_t lcsr_cpucfg2; uint64_t insn_flags; enum mips_mmu_types mmu_type; - int32_t SAARP; }; extern const char regnames[32][3]; diff --git a/target/mips/kvm.c b/target/mips/kvm.c index e22e24ed974..6c52e59f55d 100644 --- a/target/mips/kvm.c +++ b/target/mips/kvm.c @@ -63,8 +63,7 @@ int kvm_arch_irqchip_create(KVMState *s) int kvm_arch_init_vcpu(CPUState *cs) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); int ret = 0; qemu_add_vm_change_state_handler(kvm_mips_update_state, cs); @@ -138,7 +137,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) int r; struct kvm_mips_interrupt intr; - qemu_mutex_lock_iothread(); + bql_lock(); if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && cpu_mips_io_interrupts_pending(cpu)) { @@ -151,7 +150,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) @@ -460,8 +459,7 @@ static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id, */ static int kvm_mips_save_count(CPUState *cs) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); uint64_t count_ctl; int err, ret = 0; @@ -502,8 +500,7 @@ static int kvm_mips_save_count(CPUState *cs) */ static int kvm_mips_restore_count(CPUState *cs) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); uint64_t count_ctl; int err_dc, err, ret = 0; @@ -590,8 +587,7 @@ static void kvm_mips_update_state(void *opaque, bool running, RunState state) static int kvm_mips_put_fpu_registers(CPUState *cs, int level) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); int err, ret = 0; unsigned int i; @@ -670,8 +666,7 @@ static int kvm_mips_put_fpu_registers(CPUState *cs, int level) static int kvm_mips_get_fpu_registers(CPUState *cs) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); int err, ret = 0; unsigned int i; @@ -751,8 +746,7 @@ static int kvm_mips_get_fpu_registers(CPUState *cs) static int kvm_mips_put_cp0_registers(CPUState *cs, int level) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); int err, ret = 0; (void)level; @@ -974,8 +968,7 @@ static int kvm_mips_put_cp0_registers(CPUState *cs, int level) static int kvm_mips_get_cp0_registers(CPUState *cs) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); int err, ret = 0; err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); @@ -1181,8 +1174,7 @@ static int kvm_mips_get_cp0_registers(CPUState *cs) int kvm_arch_put_registers(CPUState *cs, int level) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); struct kvm_regs regs; int ret; int i; @@ -1217,8 +1209,7 @@ int kvm_arch_put_registers(CPUState *cs, int level) int kvm_arch_get_registers(CPUState *cs) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); int ret = 0; struct kvm_regs regs; int i; diff --git a/target/mips/meson.build b/target/mips/meson.build index e57ef24ecf4..a26d1e1f792 100644 --- a/target/mips/meson.build +++ b/target/mips/meson.build @@ -12,7 +12,7 @@ if have_system subdir('sysemu') endif -if 'CONFIG_TCG' in config_all +if 'CONFIG_TCG' in config_all_accel subdir('tcg') endif diff --git a/target/mips/sysemu/machine.c b/target/mips/sysemu/machine.c index 80d37f9c2fc..213fd637fcb 100644 --- a/target/mips/sysemu/machine.c +++ b/target/mips/sysemu/machine.c @@ -44,7 +44,7 @@ static int put_fpr(QEMUFile *f, void *pv, size_t size, return 0; } -const VMStateInfo vmstate_info_fpr = { +static const VMStateInfo vmstate_info_fpr = { .name = "fpr", .get = get_fpr, .put = put_fpr, @@ -56,21 +56,21 @@ const VMStateInfo vmstate_info_fpr = { #define VMSTATE_FPR_ARRAY(_f, _s, _n) \ VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) -static VMStateField vmstate_fpu_fields[] = { +static const VMStateField vmstate_fpu_fields[] = { VMSTATE_FPR_ARRAY(fpr, CPUMIPSFPUContext, 32), VMSTATE_UINT32(fcr0, CPUMIPSFPUContext), VMSTATE_UINT32(fcr31, CPUMIPSFPUContext), VMSTATE_END_OF_LIST() }; -const VMStateDescription vmstate_fpu = { +static const VMStateDescription vmstate_fpu = { .name = "cpu/fpu", .version_id = 1, .minimum_version_id = 1, .fields = vmstate_fpu_fields }; -const VMStateDescription vmstate_inactive_fpu = { +static const VMStateDescription vmstate_inactive_fpu = { .name = "cpu/inactive_fpu", .version_id = 1, .minimum_version_id = 1, @@ -79,7 +79,7 @@ const VMStateDescription vmstate_inactive_fpu = { /* TC state */ -static VMStateField vmstate_tc_fields[] = { +static const VMStateField vmstate_tc_fields[] = { VMSTATE_UINTTL_ARRAY(gpr, TCState, 32), #if defined(TARGET_MIPS64) VMSTATE_UINT64_ARRAY(gpr_hi, TCState, 32), @@ -103,14 +103,14 @@ static VMStateField vmstate_tc_fields[] = { VMSTATE_END_OF_LIST() }; -const VMStateDescription vmstate_tc = { +static const VMStateDescription vmstate_tc = { .name = "cpu/tc", .version_id = 2, .minimum_version_id = 2, .fields = vmstate_tc_fields }; -const VMStateDescription vmstate_inactive_tc = { +static const VMStateDescription vmstate_inactive_tc = { .name = "cpu/inactive_tc", .version_id = 2, .minimum_version_id = 2, @@ -119,11 +119,11 @@ const VMStateDescription vmstate_inactive_tc = { /* MVP state */ -const VMStateDescription vmstate_mvp = { +static const VMStateDescription vmstate_mvp = { .name = "cpu/mvp", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(CP0_MVPControl, CPUMIPSMVPContext), VMSTATE_INT32(CP0_MVPConf0, CPUMIPSMVPContext), VMSTATE_INT32(CP0_MVPConf1, CPUMIPSMVPContext), @@ -190,7 +190,7 @@ static int put_tlb(QEMUFile *f, void *pv, size_t size, return 0; } -const VMStateInfo vmstate_info_tlb = { +static const VMStateInfo vmstate_info_tlb = { .name = "tlb_entry", .get = get_tlb, .put = put_tlb, @@ -202,11 +202,11 @@ const VMStateInfo vmstate_info_tlb = { #define VMSTATE_TLB_ARRAY(_f, _s, _n) \ VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0) -const VMStateDescription vmstate_tlb = { +static const VMStateDescription vmstate_tlb = { .name = "cpu/tlb", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(nb_tlb, CPUMIPSTLBContext), VMSTATE_UINT32(tlb_in_use, CPUMIPSTLBContext), VMSTATE_TLB_ARRAY(mmu.r4k.tlb, CPUMIPSTLBContext, MIPS_TLB_MAX), @@ -221,7 +221,7 @@ const VMStateDescription vmstate_mips_cpu = { .version_id = 21, .minimum_version_id = 21, .post_load = cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Active TC */ VMSTATE_STRUCT(env.active_tc, MIPSCPU, 1, vmstate_tc, TCState), @@ -281,8 +281,8 @@ const VMStateDescription vmstate_mips_cpu = { VMSTATE_UINT32(env.CP0_BadInstrP, MIPSCPU), VMSTATE_UINT32(env.CP0_BadInstrX, MIPSCPU), VMSTATE_INT32(env.CP0_Count, MIPSCPU), - VMSTATE_UINT32(env.CP0_SAARI, MIPSCPU), - VMSTATE_UINT64_ARRAY(env.CP0_SAAR, MIPSCPU, 2), + VMSTATE_UNUSED(sizeof(uint32_t)), /* was CP0_SAARI */ + VMSTATE_UNUSED(2 * sizeof(uint64_t)), /* was CP0_SAAR[2] */ VMSTATE_UINTTL(env.CP0_EntryHi, MIPSCPU), VMSTATE_INT32(env.CP0_Compare, MIPSCPU), VMSTATE_INT32(env.CP0_Status, MIPSCPU), diff --git a/target/mips/sysemu/mips-qmp-cmds.c b/target/mips/sysemu/mips-qmp-cmds.c index 6db4626412c..7340ac70ba0 100644 --- a/target/mips/sysemu/mips-qmp-cmds.c +++ b/target/mips/sysemu/mips-qmp-cmds.c @@ -19,8 +19,7 @@ static void mips_cpu_add_definition(gpointer data, gpointer user_data) typename = object_class_get_name(oc); info = g_malloc0(sizeof(*info)); - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_MIPS_CPU)); + info->name = cpu_model_from_type(typename); info->q_typename = g_strdup(typename); QAPI_LIST_PREPEND(*cpu_list, info); diff --git a/target/mips/sysemu/physaddr.c b/target/mips/sysemu/physaddr.c index 05990aa5bb3..5c5184e136f 100644 --- a/target/mips/sysemu/physaddr.c +++ b/target/mips/sysemu/physaddr.c @@ -230,13 +230,12 @@ int get_physical_address(CPUMIPSState *env, hwaddr *physical, hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); hwaddr phys_addr; int prot; if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, - cpu_mmu_index(env, false)) != 0) { + mips_env_mmu_index(env)) != 0) { return -1; } return phys_addr; diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c index da49a93912a..13275d1ded8 100644 --- a/target/mips/tcg/exception.c +++ b/target/mips/tcg/exception.c @@ -79,8 +79,7 @@ void helper_wait(CPUMIPSState *env) void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); env->active_tc.PC = tb->pc; diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c index 7a8dbada5d9..d2181763e72 100644 --- a/target/mips/tcg/msa_helper.c +++ b/target/mips/tcg/msa_helper.c @@ -8214,7 +8214,7 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, #if !defined(CONFIG_USER_ONLY) #define MEMOP_IDX(DF) \ MemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \ - cpu_mmu_index(env, false)); + mips_env_mmu_index(env)); #else #define MEMOP_IDX(DF) #endif @@ -8323,7 +8323,7 @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = mips_env_mmu_index(env); uintptr_t ra = GETPC(); ensure_writable_pages(env, addr, mmu_idx, ra); @@ -8337,7 +8337,7 @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = mips_env_mmu_index(env); uintptr_t ra = GETPC(); uint64_t d0, d1; @@ -8358,7 +8358,7 @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = mips_env_mmu_index(env); uintptr_t ra = GETPC(); uint64_t d0, d1; @@ -8379,7 +8379,7 @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = mips_env_mmu_index(env); uintptr_t ra = GETPC(); ensure_writable_pages(env, addr, mmu_idx, GETPC()); diff --git a/target/mips/tcg/op_helper.c b/target/mips/tcg/op_helper.c index 98935b5e641..65403f1a87b 100644 --- a/target/mips/tcg/op_helper.c +++ b/target/mips/tcg/op_helper.c @@ -279,8 +279,7 @@ void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); int error_code = 0; int excp; @@ -306,9 +305,8 @@ void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { - MIPSCPU *cpu = MIPS_CPU(cs); - MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu); - CPUMIPSState *env = &cpu->env; + MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cs); + CPUMIPSState *env = cpu_env(cs); if (access_type == MMU_INST_FETCH) { do_raise_exception(env, EXCP_IBE, retaddr); diff --git a/target/mips/tcg/sysemu/cp0_helper.c b/target/mips/tcg/sysemu/cp0_helper.c index d3495487431..ded6c78e9a3 100644 --- a/target/mips/tcg/sysemu/cp0_helper.c +++ b/target/mips/tcg/sysemu/cp0_helper.c @@ -59,9 +59,9 @@ static inline void mips_vpe_wake(MIPSCPU *c) * because there might be other conditions that state that c should * be sleeping. */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE); - qemu_mutex_unlock_iothread(); + bql_unlock(); } static inline void mips_vpe_sleep(MIPSCPU *cpu) @@ -371,22 +371,6 @@ target_ulong helper_mfc0_count(CPUMIPSState *env) return (int32_t)cpu_mips_get_count(env); } -target_ulong helper_mfc0_saar(CPUMIPSState *env) -{ - if ((env->CP0_SAARI & 0x3f) < 2) { - return (int32_t) env->CP0_SAAR[env->CP0_SAARI & 0x3f]; - } - return 0; -} - -target_ulong helper_mfhc0_saar(CPUMIPSState *env) -{ - if ((env->CP0_SAARI & 0x3f) < 2) { - return env->CP0_SAAR[env->CP0_SAARI & 0x3f] >> 32; - } - return 0; -} - target_ulong helper_mftc0_entryhi(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); @@ -514,13 +498,6 @@ target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel) return env->CP0_WatchHi[sel]; } -target_ulong helper_dmfc0_saar(CPUMIPSState *env) -{ - if ((env->CP0_SAARI & 0x3f) < 2) { - return env->CP0_SAAR[env->CP0_SAARI & 0x3f]; - } - return 0; -} #endif /* TARGET_MIPS64 */ void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1) @@ -1100,46 +1077,6 @@ void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1) cpu_mips_store_count(env, arg1); } -void helper_mtc0_saari(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t target = arg1 & 0x3f; - if (target <= 1) { - env->CP0_SAARI = target; - } -} - -void helper_mtc0_saar(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t target = env->CP0_SAARI & 0x3f; - if (target < 2) { - env->CP0_SAAR[target] = arg1 & 0x00000ffffffff03fULL; - switch (target) { - case 0: - if (env->itu) { - itc_reconfigure(env->itu); - } - break; - } - } -} - -void helper_mthc0_saar(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t target = env->CP0_SAARI & 0x3f; - if (target < 2) { - env->CP0_SAAR[target] = - (((uint64_t) arg1 << 32) & 0x00000fff00000000ULL) | - (env->CP0_SAAR[target] & 0x00000000ffffffffULL); - switch (target) { - case 0: - if (env->itu) { - itc_reconfigure(env->itu); - } - break; - } - } -} - void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1) { target_ulong old, val, mask; @@ -1202,7 +1139,7 @@ void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1) old, old & env->CP0_Cause & CP0Ca_IP_mask, val, val & env->CP0_Cause & CP0Ca_IP_mask, env->CP0_Cause); - switch (cpu_mmu_index(env, false)) { + switch (mips_env_mmu_index(env)) { case 3: qemu_log(", ERL\n"); break; diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c index 93276f789d7..5baa25348e1 100644 --- a/target/mips/tcg/sysemu/special_helper.c +++ b/target/mips/tcg/sysemu/special_helper.c @@ -68,7 +68,7 @@ static void debug_post_eret(CPUMIPSState *env) if (env->hflags & MIPS_HFLAG_DM) { qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); } - switch (cpu_mmu_index(env, false)) { + switch (mips_env_mmu_index(env)) { case 3: qemu_log(", ERL\n"); break; @@ -90,8 +90,7 @@ static void debug_post_eret(CPUMIPSState *env) bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && !(cs->tcg_cflags & CF_PCREL) && env->active_tc.PC != tb->pc) { diff --git a/target/mips/tcg/sysemu/tlb_helper.c b/target/mips/tcg/sysemu/tlb_helper.c index 4ede9048003..119eae771e6 100644 --- a/target/mips/tcg/sysemu/tlb_helper.c +++ b/target/mips/tcg/sysemu/tlb_helper.c @@ -623,7 +623,7 @@ static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry, static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, int directory_index, bool *huge_page, bool *hgpg_directory_hit, uint64_t *pw_entrylo0, uint64_t *pw_entrylo1, - unsigned directory_shift, unsigned leaf_shift) + unsigned directory_shift, unsigned leaf_shift, int ptw_mmu_idx) { int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1; int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F; @@ -638,8 +638,7 @@ static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, uint64_t w = 0; if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD, - cpu_mmu_index(env, false)) != - TLBRET_MATCH) { + ptw_mmu_idx) != TLBRET_MATCH) { /* wrong base address */ return 0; } @@ -666,8 +665,7 @@ static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, *pw_entrylo0 = entry; } if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD, - cpu_mmu_index(env, false)) != - TLBRET_MATCH) { + ptw_mmu_idx) != TLBRET_MATCH) { return 0; } if (!get_pte(env, vaddr2, leafentry_size, &entry)) { @@ -690,7 +688,7 @@ static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, } static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, - int mmu_idx) + int ptw_mmu_idx) { int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F; int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F; @@ -776,7 +774,7 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, vaddr |= goffset; switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit, &pw_entrylo0, &pw_entrylo1, - directory_shift, leaf_shift)) + directory_shift, leaf_shift, ptw_mmu_idx)) { case 0: return false; @@ -793,7 +791,7 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, vaddr |= uoffset; switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit, &pw_entrylo0, &pw_entrylo1, - directory_shift, leaf_shift)) + directory_shift, leaf_shift, ptw_mmu_idx)) { case 0: return false; @@ -810,7 +808,7 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, vaddr |= moffset; switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit, &pw_entrylo0, &pw_entrylo1, - directory_shift, leaf_shift)) + directory_shift, leaf_shift, ptw_mmu_idx)) { case 0: return false; @@ -825,8 +823,7 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, /* Leaf Level Page Table - First half of PTE pair */ vaddr |= ptoffset0; if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD, - cpu_mmu_index(env, false)) != - TLBRET_MATCH) { + ptw_mmu_idx) != TLBRET_MATCH) { return false; } if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { @@ -838,8 +835,7 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, /* Leaf Level Page Table - Second half of PTE pair */ vaddr |= ptoffset1; if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD, - cpu_mmu_index(env, false)) != - TLBRET_MATCH) { + ptw_mmu_idx) != TLBRET_MATCH) { return false; } if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { @@ -910,8 +906,7 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); hwaddr physical; int prot; int ret = TLBRET_BADADDR; @@ -944,12 +939,10 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, * Memory reads during hardware page table walking are performed * as if they were kernel-mode load instructions. */ - int mode = (env->hflags & MIPS_HFLAG_KSU); - bool ret_walker; - env->hflags &= ~MIPS_HFLAG_KSU; - ret_walker = page_table_walk_refill(env, address, mmu_idx); - env->hflags |= mode; - if (ret_walker) { + int ptw_mmu_idx = (env->hflags & MIPS_HFLAG_ERL ? + MMU_ERL_IDX : MMU_KERNEL_IDX); + + if (page_table_walk_refill(env, address, ptw_mmu_idx)) { ret = get_physical_address(env, &physical, &prot, address, access_type, mmu_idx); if (ret == TLBRET_MATCH) { @@ -979,7 +972,7 @@ hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, /* data access */ ret = get_physical_address(env, &physical, &prot, address, access_type, - cpu_mmu_index(env, false)); + mips_env_mmu_index(env)); if (ret == TLBRET_MATCH) { return physical; } @@ -1346,8 +1339,7 @@ void mips_cpu_do_interrupt(CPUState *cs) bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); if (cpu_mips_hw_interrupts_enabled(env) && cpu_mips_hw_interrupts_pending(env)) { diff --git a/target/mips/tcg/sysemu_helper.h.inc b/target/mips/tcg/sysemu_helper.h.inc index f163af1eac7..1861d538de1 100644 --- a/target/mips/tcg/sysemu_helper.h.inc +++ b/target/mips/tcg/sysemu_helper.h.inc @@ -31,8 +31,6 @@ DEF_HELPER_1(mftc0_tcschedule, tl, env) DEF_HELPER_1(mfc0_tcschefback, tl, env) DEF_HELPER_1(mftc0_tcschefback, tl, env) DEF_HELPER_1(mfc0_count, tl, env) -DEF_HELPER_1(mfc0_saar, tl, env) -DEF_HELPER_1(mfhc0_saar, tl, env) DEF_HELPER_1(mftc0_entryhi, tl, env) DEF_HELPER_1(mftc0_status, tl, env) DEF_HELPER_1(mftc0_cause, tl, env) @@ -57,7 +55,6 @@ DEF_HELPER_1(dmfc0_lladdr, tl, env) DEF_HELPER_1(dmfc0_maar, tl, env) DEF_HELPER_2(dmfc0_watchlo, tl, env, i32) DEF_HELPER_2(dmfc0_watchhi, tl, env, i32) -DEF_HELPER_1(dmfc0_saar, tl, env) #endif /* TARGET_MIPS64 */ DEF_HELPER_2(mtc0_index, void, env, tl) @@ -103,9 +100,6 @@ DEF_HELPER_2(mtc0_srsconf4, void, env, tl) DEF_HELPER_2(mtc0_hwrena, void, env, tl) DEF_HELPER_2(mtc0_pwctl, void, env, tl) DEF_HELPER_2(mtc0_count, void, env, tl) -DEF_HELPER_2(mtc0_saari, void, env, tl) -DEF_HELPER_2(mtc0_saar, void, env, tl) -DEF_HELPER_2(mthc0_saar, void, env, tl) DEF_HELPER_2(mtc0_entryhi, void, env, tl) DEF_HELPER_2(mttc0_entryhi, void, env, tl) DEF_HELPER_2(mtc0_compare, void, env, tl) diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index 13e43fa3b6a..06c108cc9c3 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -4585,8 +4585,8 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, if (ctx->hflags & MIPS_HFLAG_BMASK) { #ifdef MIPS_DEBUG_DISAS - LOG_DISAS("Branch in delay / forbidden slot at PC 0x" - TARGET_FMT_lx "\n", ctx->base.pc_next); + LOG_DISAS("Branch in delay / forbidden slot at PC 0x%016" + VADDR_PRIx "\n", ctx->base.pc_next); #endif gen_reserved_instruction(ctx); goto out; @@ -5151,17 +5151,6 @@ static void gen_mfhc0(DisasContext *ctx, TCGv arg, int reg, int sel) goto cp0_unimplemented; } break; - case CP0_REGISTER_09: - switch (sel) { - case CP0_REG09__SAAR: - CP0_CHECK(ctx->saar); - gen_helper_mfhc0_saar(arg, tcg_env); - register_name = "SAAR"; - break; - default: - goto cp0_unimplemented; - } - break; case CP0_REGISTER_17: switch (sel) { case CP0_REG17__LLADDR: @@ -5252,17 +5241,6 @@ static void gen_mthc0(DisasContext *ctx, TCGv arg, int reg, int sel) goto cp0_unimplemented; } break; - case CP0_REGISTER_09: - switch (sel) { - case CP0_REG09__SAAR: - CP0_CHECK(ctx->saar); - gen_helper_mthc0_saar(tcg_env, arg); - register_name = "SAAR"; - break; - default: - goto cp0_unimplemented; - } - break; case CP0_REGISTER_17: switch (sel) { case CP0_REG17__LLADDR: @@ -5675,16 +5653,6 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) ctx->base.is_jmp = DISAS_EXIT; register_name = "Count"; break; - case CP0_REG09__SAARI: - CP0_CHECK(ctx->saar); - gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SAARI)); - register_name = "SAARI"; - break; - case CP0_REG09__SAAR: - CP0_CHECK(ctx->saar); - gen_helper_mfc0_saar(arg, tcg_env); - register_name = "SAAR"; - break; default: goto cp0_unimplemented; } @@ -6401,16 +6369,6 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_helper_mtc0_count(tcg_env, arg); register_name = "Count"; break; - case CP0_REG09__SAARI: - CP0_CHECK(ctx->saar); - gen_helper_mtc0_saari(tcg_env, arg); - register_name = "SAARI"; - break; - case CP0_REG09__SAAR: - CP0_CHECK(ctx->saar); - gen_helper_mtc0_saar(tcg_env, arg); - register_name = "SAAR"; - break; default: goto cp0_unimplemented; } @@ -7175,16 +7133,6 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) ctx->base.is_jmp = DISAS_EXIT; register_name = "Count"; break; - case CP0_REG09__SAARI: - CP0_CHECK(ctx->saar); - gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SAARI)); - register_name = "SAARI"; - break; - case CP0_REG09__SAAR: - CP0_CHECK(ctx->saar); - gen_helper_dmfc0_saar(arg, tcg_env); - register_name = "SAAR"; - break; default: goto cp0_unimplemented; } @@ -7887,16 +7835,6 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_helper_mtc0_count(tcg_env, arg); register_name = "Count"; break; - case CP0_REG09__SAARI: - CP0_CHECK(ctx->saar); - gen_helper_mtc0_saari(tcg_env, arg); - register_name = "SAARI"; - break; - case CP0_REG09__SAAR: - CP0_CHECK(ctx->saar); - gen_helper_mtc0_saar(tcg_env, arg); - register_name = "SAAR"; - break; default: goto cp0_unimplemented; } @@ -9061,8 +8999,8 @@ static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, if (ctx->hflags & MIPS_HFLAG_BMASK) { #ifdef MIPS_DEBUG_DISAS - LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx - "\n", ctx->base.pc_next); + LOG_DISAS("Branch in delay / forbidden slot at PC 0x%016" + VADDR_PRIx "\n", ctx->base.pc_next); #endif gen_reserved_instruction(ctx); return; @@ -11274,8 +11212,8 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, if (ctx->hflags & MIPS_HFLAG_BMASK) { #ifdef MIPS_DEBUG_DISAS - LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx - "\n", ctx->base.pc_next); + LOG_DISAS("Branch in delay / forbidden slot at PC 0x%016" + VADDR_PRIx "\n", ctx->base.pc_next); #endif gen_reserved_instruction(ctx); return; @@ -15554,7 +15492,7 @@ static const TranslatorOps mips_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext ctx; @@ -15628,8 +15566,7 @@ void mips_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) { - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; + CPUMIPSState *env = cpu_env(cs); env->active_tc.PC = data[0]; env->hflags &= ~MIPS_HFLAG_BMASK; diff --git a/target/mips/tcg/translate.h b/target/mips/tcg/translate.h index cffcfeab8cd..2b6646b339b 100644 --- a/target/mips/tcg/translate.h +++ b/target/mips/tcg/translate.h @@ -49,7 +49,6 @@ typedef struct DisasContext { bool mrp; bool nan2008; bool abs2008; - bool saar; bool mi; int gi; } DisasContext; @@ -202,7 +201,8 @@ extern TCGv bcond; do { \ if (MIPS_DEBUG_DISAS) { \ qemu_log_mask(CPU_LOG_TB_IN_ASM, \ - TARGET_FMT_lx ": %08x Invalid %s %03x %03x %03x\n", \ + "%016" VADDR_PRIx \ + ": %08x Invalid %s %03x %03x %03x\n", \ ctx->base.pc_next, ctx->opcode, op, \ ctx->opcode >> 26, ctx->opcode & 0x3F, \ ((ctx->opcode >> 16) & 0x1F)); \ diff --git a/target/nios2/Kconfig b/target/nios2/Kconfig index 1529ab8950d..c65550c861a 100644 --- a/target/nios2/Kconfig +++ b/target/nios2/Kconfig @@ -1,2 +1,3 @@ config NIOS2 bool + select SEMIHOSTING diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c index a27732bf2b2..679aff5730f 100644 --- a/target/nios2/cpu.c +++ b/target/nios2/cpu.c @@ -28,28 +28,19 @@ static void nios2_cpu_set_pc(CPUState *cs, vaddr value) { - Nios2CPU *cpu = NIOS2_CPU(cs); - CPUNios2State *env = &cpu->env; - - env->pc = value; + cpu_env(cs)->pc = value; } static vaddr nios2_cpu_get_pc(CPUState *cs) { - Nios2CPU *cpu = NIOS2_CPU(cs); - CPUNios2State *env = &cpu->env; - - return env->pc; + return cpu_env(cs)->pc; } static void nios2_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) { - Nios2CPU *cpu = NIOS2_CPU(cs); - CPUNios2State *env = &cpu->env; - - env->pc = data[0]; + cpu_env(cs)->pc = data[0]; } static bool nios2_cpu_has_work(CPUState *cs) @@ -57,11 +48,17 @@ static bool nios2_cpu_has_work(CPUState *cs) return cs->interrupt_request & CPU_INTERRUPT_HARD; } +static int nios2_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return (cpu_env(cs)->ctrl[CR_STATUS] & CR_STATUS_U + ? MMU_USER_IDX : MMU_SUPERVISOR_IDX); +} + static void nios2_cpu_reset_hold(Object *obj) { CPUState *cs = CPU(obj); Nios2CPU *cpu = NIOS2_CPU(cs); - Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(cpu); + Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(obj); CPUNios2State *env = &cpu->env; if (ncc->parent_phases.hold) { @@ -354,7 +351,7 @@ static const struct SysemuCPUOps nios2_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps nios2_tcg_ops = { +static const TCGCPUOps nios2_tcg_ops = { .initialize = nios2_tcg_init, .restore_state_to_opc = nios2_restore_state_to_opc, @@ -381,6 +378,7 @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = nios2_cpu_class_by_name; cc->has_work = nios2_cpu_has_work; + cc->mmu_index = nios2_cpu_mmu_index; cc->dump_state = nios2_cpu_dump_state; cc->set_pc = nios2_cpu_set_pc; cc->get_pc = nios2_cpu_get_pc; diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h index 2d79b5b298a..4164a3432eb 100644 --- a/target/nios2/cpu.h +++ b/target/nios2/cpu.h @@ -270,12 +270,6 @@ void do_nios2_semihosting(CPUNios2State *env); #define MMU_SUPERVISOR_IDX 0 #define MMU_USER_IDX 1 -static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch) -{ - return (env->ctrl[CR_STATUS] & CR_STATUS_U) ? MMU_USER_IDX : - MMU_SUPERVISOR_IDX; -} - #ifndef CONFIG_USER_ONLY hwaddr nios2_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size, diff --git a/target/nios2/helper.c b/target/nios2/helper.c index bb3b09e5a77..ac57121afca 100644 --- a/target/nios2/helper.c +++ b/target/nios2/helper.c @@ -287,8 +287,7 @@ void nios2_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { - Nios2CPU *cpu = NIOS2_CPU(cs); - CPUNios2State *env = &cpu->env; + CPUNios2State *env = cpu_env(cs); env->ctrl[CR_BADADDR] = addr; cs->exception_index = EXCP_UNALIGN; diff --git a/target/nios2/nios2-semi.c b/target/nios2/nios2-semi.c index 0b84fcb6b62..420702e293e 100644 --- a/target/nios2/nios2-semi.c +++ b/target/nios2/nios2-semi.c @@ -75,8 +75,7 @@ static int host_to_gdb_errno(int err) static void nios2_semi_u32_cb(CPUState *cs, uint64_t ret, int err) { - Nios2CPU *cpu = NIOS2_CPU(cs); - CPUNios2State *env = &cpu->env; + CPUNios2State *env = cpu_env(cs); target_ulong args = env->regs[R_ARG1]; if (put_user_u32(ret, args) || @@ -93,8 +92,7 @@ static void nios2_semi_u32_cb(CPUState *cs, uint64_t ret, int err) static void nios2_semi_u64_cb(CPUState *cs, uint64_t ret, int err) { - Nios2CPU *cpu = NIOS2_CPU(cs); - CPUNios2State *env = &cpu->env; + CPUNios2State *env = cpu_env(cs); target_ulong args = env->regs[R_ARG1]; if (put_user_u32(ret >> 32, args) || diff --git a/target/nios2/translate.c b/target/nios2/translate.c index e8066235946..7ddc6ac1a24 100644 --- a/target/nios2/translate.c +++ b/target/nios2/translate.c @@ -948,7 +948,7 @@ static void nios2_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) Nios2CPU *cpu = env_archcpu(env); int page_insns; - dc->mem_idx = cpu_mmu_index(env, false); + dc->mem_idx = cpu_mmu_index(cs, false); dc->cr_state = cpu->cr_state; dc->tb_flags = dc->base.tb->flags; dc->eic_present = cpu->eic_present; @@ -970,7 +970,6 @@ static void nios2_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); - CPUNios2State *env = cpu_env(cs); const Nios2Instruction *instr; uint32_t code, pc; uint8_t op; @@ -980,7 +979,7 @@ static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) dc->base.pc_next = pc + 4; /* Decode an instruction */ - code = cpu_ldl_code(env, pc); + code = cpu_ldl_code(cpu_env(cs), pc); op = get_opcode(code); if (unlikely(op >= ARRAY_SIZE(i_type_instructions))) { @@ -1036,7 +1035,7 @@ static const TranslatorOps nios2_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cs, tb, max_insns, pc, host_pc, &nios2_tr_ops, &dc.base); diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index 1173260017a..33c45dbf04e 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -68,6 +68,18 @@ static bool openrisc_cpu_has_work(CPUState *cs) CPU_INTERRUPT_TIMER); } +static int openrisc_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + CPUOpenRISCState *env = cpu_env(cs); + + if (env->sr & (ifetch ? SR_IME : SR_DME)) { + /* The mmu is enabled; test supervisor state. */ + return env->sr & SR_SM ? MMU_SUPERVISOR_IDX : MMU_USER_IDX; + } + + return MMU_NOMMU_IDX; /* mmu is disabled */ +} + static void openrisc_disas_set_info(CPUState *cpu, disassemble_info *info) { info->print_insn = print_insn_or1k; @@ -75,9 +87,9 @@ static void openrisc_disas_set_info(CPUState *cpu, disassemble_info *info) static void openrisc_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - OpenRISCCPU *cpu = OPENRISC_CPU(s); - OpenRISCCPUClass *occ = OPENRISC_CPU_GET_CLASS(cpu); + CPUState *cs = CPU(obj); + OpenRISCCPU *cpu = OPENRISC_CPU(cs); + OpenRISCCPUClass *occ = OPENRISC_CPU_GET_CLASS(obj); if (occ->parent_phases.hold) { occ->parent_phases.hold(obj); @@ -88,7 +100,7 @@ static void openrisc_cpu_reset_hold(Object *obj) cpu->env.pc = 0x100; cpu->env.sr = SR_FO | SR_SM; cpu->env.lock_addr = -1; - s->exception_index = -1; + cs->exception_index = -1; cpu_set_fpcsr(&cpu->env, 0); set_float_detect_tininess(float_tininess_before_rounding, @@ -164,9 +176,7 @@ static ObjectClass *openrisc_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(OPENRISC_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && !object_class_dynamic_cast(oc, TYPE_OPENRISC_CPU)) { - return NULL; - } + return oc; } @@ -215,7 +225,7 @@ static const struct SysemuCPUOps openrisc_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps openrisc_tcg_ops = { +static const TCGCPUOps openrisc_tcg_ops = { .initialize = openrisc_translate_init, .synchronize_from_tb = openrisc_cpu_synchronize_from_tb, .restore_state_to_opc = openrisc_restore_state_to_opc, @@ -241,6 +251,7 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = openrisc_cpu_class_by_name; cc->has_work = openrisc_cpu_has_work; + cc->mmu_index = openrisc_cpu_mmu_index; cc->dump_state = openrisc_cpu_dump_state; cc->set_pc = openrisc_cpu_set_pc; cc->get_pc = openrisc_cpu_get_pc; @@ -255,48 +266,6 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data) cc->tcg_ops = &openrisc_tcg_ops; } -/* Sort alphabetically by type name, except for "any". */ -static gint openrisc_cpu_list_compare(gconstpointer a, gconstpointer b) -{ - ObjectClass *class_a = (ObjectClass *)a; - ObjectClass *class_b = (ObjectClass *)b; - const char *name_a, *name_b; - - name_a = object_class_get_name(class_a); - name_b = object_class_get_name(class_b); - if (strcmp(name_a, "any-" TYPE_OPENRISC_CPU) == 0) { - return 1; - } else if (strcmp(name_b, "any-" TYPE_OPENRISC_CPU) == 0) { - return -1; - } else { - return strcmp(name_a, name_b); - } -} - -static void openrisc_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - const char *typename; - char *name; - - typename = object_class_get_name(oc); - name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_OPENRISC_CPU)); - qemu_printf(" %s\n", name); - g_free(name); -} - -void cpu_openrisc_list(void) -{ - GSList *list; - - list = object_class_get_list(TYPE_OPENRISC_CPU, false); - list = g_slist_sort(list, openrisc_cpu_list_compare); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, openrisc_cpu_list_entry, NULL); - g_slist_free(list); -} - #define DEFINE_OPENRISC_CPU_TYPE(cpu_model, initfn) \ { \ .parent = TYPE_OPENRISC_CPU, \ diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index dedeb89f8e9..b1b7db5cbd9 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -299,15 +299,12 @@ struct ArchCPU { CPUOpenRISCState env; }; -void cpu_openrisc_list(void); void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags); int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void openrisc_translate_init(void); int print_insn_or1k(bfd_vma addr, disassemble_info *info); -#define cpu_list cpu_openrisc_list - #ifndef CONFIG_USER_ONLY hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); @@ -364,18 +361,6 @@ static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env, vaddr *pc, | (env->sr & (SR_SM | SR_DME | SR_IME | SR_OVE)); } -static inline int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch) -{ - int ret = MMU_NOMMU_IDX; /* mmu is disabled */ - - if (env->sr & (ifetch ? SR_IME : SR_DME)) { - /* The mmu is enabled; test supervisor state. */ - ret = env->sr & SR_SM ? MMU_SUPERVISOR_IDX : MMU_USER_IDX; - } - - return ret; -} - static inline uint32_t cpu_get_sr(const CPUOpenRISCState *env) { return (env->sr diff --git a/target/openrisc/gdbstub.c b/target/openrisc/gdbstub.c index d1074a05811..c2a77d5d4d5 100644 --- a/target/openrisc/gdbstub.c +++ b/target/openrisc/gdbstub.c @@ -23,8 +23,7 @@ int openrisc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - OpenRISCCPU *cpu = OPENRISC_CPU(cs); - CPUOpenRISCState *env = &cpu->env; + CPUOpenRISCState *env = cpu_env(cs); if (n < 32) { return gdb_get_reg32(mem_buf, cpu_get_gpr(env, n)); @@ -48,9 +47,8 @@ int openrisc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int openrisc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - OpenRISCCPU *cpu = OPENRISC_CPU(cs); CPUClass *cc = CPU_GET_CLASS(cs); - CPUOpenRISCState *env = &cpu->env; + CPUOpenRISCState *env = cpu_env(cs); uint32_t tmp; if (n > cc->gdb_num_core_regs) { diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c index d4fdb8ce8e9..b3b5b405779 100644 --- a/target/openrisc/interrupt.c +++ b/target/openrisc/interrupt.c @@ -29,8 +29,7 @@ void openrisc_cpu_do_interrupt(CPUState *cs) { - OpenRISCCPU *cpu = OPENRISC_CPU(cs); - CPUOpenRISCState *env = &cpu->env; + CPUOpenRISCState *env = cpu_env(cs); int exception = cs->exception_index; env->epcr = env->pc; @@ -105,8 +104,7 @@ void openrisc_cpu_do_interrupt(CPUState *cs) bool openrisc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - OpenRISCCPU *cpu = OPENRISC_CPU(cs); - CPUOpenRISCState *env = &cpu->env; + CPUOpenRISCState *env = cpu_env(cs); int idx = -1; if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->sr & SR_IEE)) { diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c index b7d73886401..3574e571cb2 100644 --- a/target/openrisc/machine.c +++ b/target/openrisc/machine.c @@ -25,7 +25,7 @@ static const VMStateDescription vmstate_tlb_entry = { .name = "tlb_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(mr, OpenRISCTLBEntry), VMSTATE_UINTTL(tr, OpenRISCTLBEntry), VMSTATE_END_OF_LIST() @@ -36,7 +36,7 @@ static const VMStateDescription vmstate_cpu_tlb = { .name = "cpu_tlb", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(itlb, CPUOpenRISCTLBContext, TLB_SIZE, 0, vmstate_tlb_entry, OpenRISCTLBEntry), VMSTATE_STRUCT_ARRAY(dtlb, CPUOpenRISCTLBContext, TLB_SIZE, 0, @@ -71,7 +71,7 @@ static const VMStateDescription vmstate_env = { .name = "env", .version_id = 6, .minimum_version_id = 6, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_2DARRAY(shadow_gpr, CPUOpenRISCState, 16, 32), VMSTATE_UINTTL(pc, CPUOpenRISCState), VMSTATE_UINTTL(ppc, CPUOpenRISCState), @@ -135,7 +135,7 @@ const VMStateDescription vmstate_openrisc_cpu = { .version_id = 1, .minimum_version_id = 1, .post_load = cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CPU(), VMSTATE_STRUCT(env, OpenRISCCPU, 1, vmstate_env, CPUOpenRISCState), VMSTATE_END_OF_LIST() diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c index 782a5751b75..77567afba47 100644 --- a/target/openrisc/sys_helper.c +++ b/target/openrisc/sys_helper.c @@ -160,20 +160,20 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) break; case TO_SPR(9, 0): /* PICMR */ env->picmr = rb; - qemu_mutex_lock_iothread(); + bql_lock(); if (env->picsr & env->picmr) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case TO_SPR(9, 2): /* PICSR */ env->picsr &= ~rb; break; case TO_SPR(10, 0): /* TTMR */ { - qemu_mutex_lock_iothread(); + bql_lock(); if ((env->ttmr & TTMR_M) ^ (rb & TTMR_M)) { switch (rb & TTMR_M) { case TIMER_NONE: @@ -198,15 +198,15 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) cs->interrupt_request &= ~CPU_INTERRUPT_TIMER; } cpu_openrisc_timer_update(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); } break; case TO_SPR(10, 1): /* TTCR */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_openrisc_count_set(cpu, rb); cpu_openrisc_timer_update(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; } #endif @@ -347,9 +347,9 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd, return env->ttmr; case TO_SPR(10, 1): /* TTCR */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_openrisc_count_update(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); return cpu_openrisc_count_get(cpu); } #endif diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index ecff4412b7a..23fff460846 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -1528,7 +1528,7 @@ static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs) CPUOpenRISCState *env = cpu_env(cs); int bound; - dc->mem_idx = cpu_mmu_index(env, false); + dc->mem_idx = cpu_mmu_index(cs, false); dc->tb_flags = dc->base.tb->flags; dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0; dc->cpucfgr = env->cpucfgr; @@ -1564,8 +1564,7 @@ static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); - OpenRISCCPU *cpu = OPENRISC_CPU(cs); - uint32_t insn = translator_ldl(&cpu->env, &dc->base, dc->base.pc_next); + uint32_t insn = translator_ldl(cpu_env(cs), &dc->base, dc->base.pc_next); if (!decode(dc, insn)) { gen_illegal_exception(dc); @@ -1658,7 +1657,7 @@ static const TranslatorOps openrisc_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext ctx; @@ -1668,8 +1667,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - OpenRISCCPU *cpu = OPENRISC_CPU(cs); - CPUOpenRISCState *env = &cpu->env; + CPUOpenRISCState *env = cpu_env(cs); int i; qemu_fprintf(f, "PC=%08x\n", env->pc); diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c index 7dbb47de645..f2301b43f78 100644 --- a/target/ppc/cpu-models.c +++ b/target/ppc/cpu-models.c @@ -716,11 +716,11 @@ "PowerPC 970MP v1.0") POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970, "PowerPC 970MP v1.1") - POWERPC_DEF("power5+_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P, + POWERPC_DEF("power5p_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P, "POWER5+ v2.1") POWERPC_DEF("power7_v2.3", CPU_POWERPC_POWER7_v23, POWER7, "POWER7 v2.3") - POWERPC_DEF("power7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7, + POWERPC_DEF("power7p_v2.1", CPU_POWERPC_POWER7P_v21, POWER7, "POWER7+ v2.1") POWERPC_DEF("power8e_v2.1", CPU_POWERPC_POWER8E_v21, POWER8, "POWER8E v2.1") @@ -728,14 +728,10 @@ "POWER8 v2.0") POWERPC_DEF("power8nvl_v1.0", CPU_POWERPC_POWER8NVL_v10, POWER8, "POWER8NVL v1.0") - POWERPC_DEF("power9_v1.0", CPU_POWERPC_POWER9_DD1, POWER9, - "POWER9 v1.0") POWERPC_DEF("power9_v2.0", CPU_POWERPC_POWER9_DD20, POWER9, "POWER9 v2.0") POWERPC_DEF("power9_v2.2", CPU_POWERPC_POWER9_DD22, POWER9, "POWER9 v2.2") - POWERPC_DEF("power10_v1.0", CPU_POWERPC_POWER10_DD1, POWER10, - "POWER10 v1.0") POWERPC_DEF("power10_v2.0", CPU_POWERPC_POWER10_DD20, POWER10, "POWER10 v2.0") #endif /* defined (TARGET_PPC64) */ @@ -902,10 +898,12 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { { "970", "970_v2.2" }, { "970fx", "970fx_v3.1" }, { "970mp", "970mp_v1.1" }, - { "power5+", "power5+_v2.1" }, + { "power5+", "power5p_v2.1" }, + { "power5+_v2.1", "power5p_v2.1" }, { "power5gs", "power5+_v2.1" }, { "power7", "power7_v2.3" }, - { "power7+", "power7+_v2.1" }, + { "power7+", "power7p_v2.1" }, + { "power7+_v2.1", "power7p_v2.1" }, { "power8e", "power8e_v2.1" }, { "power8", "power8_v2.0" }, { "power8nvl", "power8nvl_v1.0" }, diff --git a/target/ppc/cpu-param.h b/target/ppc/cpu-param.h index 0a0416e0a8c..b7ad52de039 100644 --- a/target/ppc/cpu-param.h +++ b/target/ppc/cpu-param.h @@ -31,6 +31,13 @@ # define TARGET_PHYS_ADDR_SPACE_BITS 36 # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif -#define TARGET_PAGE_BITS 12 + +#ifdef CONFIG_USER_ONLY +/* Allow user-only to vary page size from 4k */ +# define TARGET_PAGE_BITS_VARY +# define TARGET_PAGE_BITS_MIN 12 +#else +# define TARGET_PAGE_BITS 12 +#endif #endif diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h index 0241609efef..8247fa23367 100644 --- a/target/ppc/cpu-qom.h +++ b/target/ppc/cpu-qom.h @@ -20,6 +20,7 @@ #ifndef QEMU_PPC_CPU_QOM_H #define QEMU_PPC_CPU_QOM_H +#include "exec/gdbstub.h" #include "hw/core/cpu.h" #ifdef TARGET_PPC64 diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index f8101ffa296..67e6b2effd6 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1183,6 +1183,21 @@ DEXCR_ASPECT(SRAPD, 4) DEXCR_ASPECT(NPHIE, 5) DEXCR_ASPECT(PHIE, 6) +/*****************************************************************************/ +/* PowerNV ChipTOD and TimeBase State Machine */ +struct pnv_tod_tbst { + int tb_ready_for_tod; /* core TB ready to receive TOD from chiptod */ + int tod_sent_to_tb; /* chiptod sent TOD to the core TB */ + + /* + * "Timers" for async TBST events are simulated by mfTFAC because TFAC + * is polled for such events. These are just used to ensure firmware + * performs the polling at least a few times. + */ + int tb_state_timer; + int tb_sync_pulse_timer; +}; + /*****************************************************************************/ /* The whole PowerPC CPU context */ @@ -1258,6 +1273,12 @@ struct CPUArchState { uint32_t tlb_need_flush; /* Delayed flush needed */ #define TLB_NEED_LOCAL_FLUSH 0x1 #define TLB_NEED_GLOBAL_FLUSH 0x2 + +#if defined(TARGET_PPC64) + /* PowerNV chiptod / timebase facility state. */ + /* Would be nice to put these into PnvCore */ + struct pnv_tod_tbst pnv_tod_tbst; +#endif #endif /* Other registers */ @@ -1471,8 +1492,7 @@ struct PowerPCCPUClass { int bfd_mach; uint32_t l1_dcache_size, l1_icache_size; #ifndef CONFIG_USER_ONLY - unsigned int gdb_num_sprs; - const char *gdb_spr_xml; + GDBFeature gdb_spr; #endif const PPCHash64Options *hash64_opts; struct ppc_radix_page_info *radix_page_info; @@ -1525,8 +1545,6 @@ int ppc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); int ppc_cpu_gdb_write_register_apple(CPUState *cpu, uint8_t *buf, int reg); #ifndef CONFIG_USER_ONLY hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu); -const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name); #endif int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, int cpuid, DumpState *s); @@ -1566,6 +1584,8 @@ uint64_t cpu_ppc_load_atbl(CPUPPCState *env); uint32_t cpu_ppc_load_atbu(CPUPPCState *env); void cpu_ppc_store_atbl(CPUPPCState *env, uint32_t value); void cpu_ppc_store_atbu(CPUPPCState *env, uint32_t value); +void cpu_ppc_increase_tb_by_offset(CPUPPCState *env, int64_t offset); +void cpu_ppc_decrease_tb_by_offset(CPUPPCState *env, int64_t offset); uint64_t cpu_ppc_load_vtb(CPUPPCState *env); void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value); bool ppc_decr_clear_on_delivery(CPUPPCState *env); @@ -1624,7 +1644,7 @@ int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val); /* MMU modes definitions */ #define MMU_USER_IDX 0 -static inline int cpu_mmu_index(CPUPPCState *env, bool ifetch) +static inline int ppc_env_mmu_index(CPUPPCState *env, bool ifetch) { #ifdef CONFIG_USER_ONLY return MMU_USER_IDX; @@ -1737,9 +1757,11 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_PSPB (0x09F) #define SPR_DPDES (0x0B0) #define SPR_DAWR0 (0x0B4) +#define SPR_DAWR1 (0x0B5) #define SPR_RPR (0x0BA) #define SPR_CIABR (0x0BB) #define SPR_DAWRX0 (0x0BC) +#define SPR_DAWRX1 (0x0BD) #define SPR_HFSCR (0x0BE) #define SPR_VRSAVE (0x100) #define SPR_USPRG0 (0x100) @@ -1750,8 +1772,8 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_USPRG5 (0x105) #define SPR_USPRG6 (0x106) #define SPR_USPRG7 (0x107) -#define SPR_VTBL (0x10C) -#define SPR_VTBU (0x10D) +#define SPR_TBL (0x10C) +#define SPR_TBU (0x10D) #define SPR_SPRG0 (0x110) #define SPR_SPRG1 (0x111) #define SPR_SPRG2 (0x112) @@ -1764,8 +1786,8 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_SPRG7 (0x117) #define SPR_ASR (0x118) #define SPR_EAR (0x11A) -#define SPR_TBL (0x11C) -#define SPR_TBU (0x11D) +#define SPR_WR_TBL (0x11C) +#define SPR_WR_TBU (0x11D) #define SPR_TBU40 (0x11E) #define SPR_SVR (0x11E) #define SPR_BOOKE_PIR (0x11E) @@ -1933,6 +1955,12 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_BOOKE_TLB2CFG (0x2B2) #define SPR_BOOKE_TLB3CFG (0x2B3) #define SPR_BOOKE_EPR (0x2BE) +#define SPR_POWER_USIER2 (0x2E0) +#define SPR_POWER_USIER3 (0x2E1) +#define SPR_POWER_UMMCR3 (0x2E2) +#define SPR_POWER_SIER2 (0x2F0) +#define SPR_POWER_SIER3 (0x2F1) +#define SPR_POWER_MMCR3 (0x2F2) #define SPR_PERF0 (0x300) #define SPR_RCPU_MI_RBA0 (0x300) #define SPR_MPC_MI_CTR (0x300) @@ -2648,6 +2676,34 @@ enum { HMER_XSCOM_STATUS_MASK = PPC_BITMASK(21, 23), }; +/* TFMR */ +enum { + TFMR_CONTROL_MASK = PPC_BITMASK(0, 24), + TFMR_MASK_HMI = PPC_BIT(10), + TFMR_TB_ECLIPZ = PPC_BIT(14), + TFMR_LOAD_TOD_MOD = PPC_BIT(16), + TFMR_MOVE_CHIP_TOD_TO_TB = PPC_BIT(18), + TFMR_CLEAR_TB_ERRORS = PPC_BIT(24), + TFMR_STATUS_MASK = PPC_BITMASK(25, 63), + TFMR_TBST_ENCODED = PPC_BITMASK(28, 31), /* TBST = TB State */ + TFMR_TBST_LAST = PPC_BITMASK(32, 35), /* Previous TBST */ + TFMR_TB_ENABLED = PPC_BIT(40), + TFMR_TB_VALID = PPC_BIT(41), + TFMR_TB_SYNC_OCCURED = PPC_BIT(42), + TFMR_FIRMWARE_CONTROL_ERROR = PPC_BIT(46), +}; + +/* TFMR TBST (Time Base State Machine). */ +enum { + TBST_RESET = 0x0, + TBST_SEND_TOD_MOD = 0x1, + TBST_NOT_SET = 0x2, + TBST_SYNC_WAIT = 0x6, + TBST_GET_TOD = 0x7, + TBST_TB_RUNNING = 0x8, + TBST_TB_ERROR = 0x9, +}; + /*****************************************************************************/ #define is_isa300(ctx) (!!(ctx->insns_flags2 & PPC2_ISA300)) @@ -2902,6 +2958,7 @@ static inline bool ppc_has_spr(PowerPCCPU *cpu, int spr) } #if !defined(CONFIG_USER_ONLY) +/* Sort out endianness of interrupt. Depends on the CPU, HV mode, etc. */ static inline bool ppc_interrupts_little_endian(PowerPCCPU *cpu, bool hv) { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 40fe14a6c25..6241de62ce8 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -1642,7 +1642,7 @@ static void register_8xx_sprs(CPUPPCState *env) /*****************************************************************************/ /* Exception vectors models */ -static void init_excp_4xx_softmmu(CPUPPCState *env) +static void init_excp_4xx(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100; @@ -2120,7 +2120,7 @@ static void init_proc_405(CPUPPCState *env) env->id_tlbs = 0; env->tlb_type = TLB_EMB; #endif - init_excp_4xx_softmmu(env); + init_excp_4xx(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ @@ -5062,7 +5062,7 @@ static void register_970_hid_sprs(CPUPPCState *env) static void register_970_hior_sprs(CPUPPCState *env) { - spr_register(env, SPR_HIOR, "SPR_HIOR", + spr_register(env, SPR_HIOR, "HIOR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_hior, &spr_write_hior, 0x00000000); @@ -5070,11 +5070,11 @@ static void register_970_hior_sprs(CPUPPCState *env) static void register_book3s_ctrl_sprs(CPUPPCState *env) { - spr_register(env, SPR_CTRL, "SPR_CTRL", + spr_register(env, SPR_CTRL, "CTRL", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_write_CTRL, 0x00000000); - spr_register(env, SPR_UCTRL, "SPR_UCTRL", + spr_register(env, SPR_UCTRL, "UCTRL", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); @@ -5308,6 +5308,38 @@ static void register_power8_pmu_user_sprs(CPUPPCState *env) 0x00000000); } +static void register_power10_pmu_sup_sprs(CPUPPCState *env) +{ + spr_register_kvm(env, SPR_POWER_MMCR3, "MMCR3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_MMCR3, 0x00000000); + spr_register_kvm(env, SPR_POWER_SIER2, "SIER2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_SIER2, 0x00000000); + spr_register_kvm(env, SPR_POWER_SIER3, "SIER3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_SIER3, 0x00000000); +} + +static void register_power10_pmu_user_sprs(CPUPPCState *env) +{ + spr_register(env, SPR_POWER_UMMCR3, "UMMCR3", + &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_POWER_USIER2, "USIER2", + &spr_read_generic, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_POWER_USIER3, "USIER3", + &spr_read_generic, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + static void register_power5p_ear_sprs(CPUPPCState *env) { /* External access control */ @@ -5465,7 +5497,7 @@ static void register_book3s_purr_sprs(CPUPPCState *env) static void register_power6_dbg_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) - spr_register(env, SPR_CFAR, "SPR_CFAR", + spr_register(env, SPR_CFAR, "CFAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_cfar, &spr_write_cfar, 0x00000000); @@ -5483,7 +5515,7 @@ static void register_power5p_common_sprs(CPUPPCState *env) static void register_power6_common_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) - spr_register_kvm(env, SPR_DSCR, "SPR_DSCR", + spr_register_kvm(env, SPR_DSCR, "DSCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DSCR, 0x00000000); @@ -5695,7 +5727,7 @@ static void register_power8_book4_sprs(CPUPPCState *env) &spr_read_generic, &spr_write_generic, KVM_REG_PPC_ACOP, 0); /* PID is only in BookE in ISA v2.07 */ - spr_register_kvm(env, SPR_BOOKS_PID, "PID", + spr_register_kvm(env, SPR_BOOKS_PID, "PIDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pidr, KVM_REG_PPC_PID, 0); @@ -5716,7 +5748,7 @@ static void register_power7_book4_sprs(CPUPPCState *env) &spr_read_generic, &spr_write_generic, KVM_REG_PPC_ACOP, 0); /* PID is only in BookE in ISA v2.06 */ - spr_register_kvm(env, SPR_BOOKS_PID, "PID", + spr_register_kvm(env, SPR_BOOKS_PID, "PIDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic32, KVM_REG_PPC_PID, 0); @@ -5750,7 +5782,7 @@ static void register_power9_mmu_sprs(CPUPPCState *env) &spr_read_generic, &spr_write_generic, 0x0000000000000000); /* PID is part of the BookS ISA from v3.0 */ - spr_register_kvm(env, SPR_BOOKS_PID, "PID", + spr_register_kvm(env, SPR_BOOKS_PID, "PIDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pidr, KVM_REG_PPC_PID, 0); @@ -5788,10 +5820,10 @@ static void register_power10_dexcr_sprs(CPUPPCState *env) { spr_register(env, SPR_DEXCR, "DEXCR", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic32, + &spr_read_generic, &spr_write_generic, 0); - spr_register(env, SPR_UDEXCR, "DEXCR", + spr_register(env, SPR_UDEXCR, "UDEXCR", &spr_read_dexcr_ureg, SPR_NOACCESS, &spr_read_dexcr_ureg, SPR_NOACCESS, 0); @@ -5799,10 +5831,10 @@ static void register_power10_dexcr_sprs(CPUPPCState *env) spr_register_hv(env, SPR_HDEXCR, "HDEXCR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic32, + &spr_read_generic, &spr_write_generic, 0); - spr_register(env, SPR_UHDEXCR, "HDEXCR", + spr_register(env, SPR_UHDEXCR, "UHDEXCR", &spr_read_dexcr_ureg, SPR_NOACCESS, &spr_read_dexcr_ureg, SPR_NOACCESS, 0); @@ -6350,10 +6382,7 @@ static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr, bool best) return false; } - if ((pvr & 0x0f00) == 0x100) { - /* DD1.x always matches power9_v1.0 */ - return true; - } else if ((pvr & 0x0f00) == 0x200) { + if ((pvr & 0x0f00) == 0x200) { if ((pvr & 0xf) < 2) { /* DD2.0, DD2.1 match power9_v2.0 */ if ((pcc->pvr & 0xf) == 0) { @@ -6505,6 +6534,8 @@ static void init_proc_POWER10(CPUPPCState *env) register_power9_mmu_sprs(env); register_power10_hash_sprs(env); register_power10_dexcr_sprs(env); + register_power10_pmu_sup_sprs(env); + register_power10_pmu_user_sprs(env); /* FIXME: Filter fields properly based on privilege level */ spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, @@ -6536,7 +6567,7 @@ static bool ppc_pvr_match_power10(PowerPCCPUClass *pcc, uint32_t pvr, bool best) } if ((pvr & 0x0f00) == (pcc->pvr & 0x0f00)) { - /* Major DD version matches to power10_v1.0 and power10_v2.0 */ + /* Major DD version matches power10_v2.0 */ return true; } @@ -6576,11 +6607,10 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310 | + PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310 | PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_HV) | - (1ull << MSR_TM) | (1ull << MSR_VR) | (1ull << MSR_VSX) | (1ull << MSR_EE) | @@ -6620,7 +6650,7 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | - POWERPC_FLAG_VSX | POWERPC_FLAG_TM | POWERPC_FLAG_SCV; + POWERPC_FLAG_VSX | POWERPC_FLAG_SCV; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; } @@ -6682,10 +6712,6 @@ static void init_ppc_proc(PowerPCCPU *cpu) /* PowerPC implementation specific initialisations (SPRs, timers, ...) */ (*pcc->init_proc)(env); -#if !defined(CONFIG_USER_ONLY) - ppc_gdb_gen_spr_xml(cpu); -#endif - /* MSR bits & flags consistency checks */ if (env->msr_mask & (1 << 25)) { switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) { @@ -7036,8 +7062,7 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data) return; } - name = g_strndup(typename, - strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX)); + name = cpu_model_from_type(typename); qemu_printf("PowerPC %-16s PVR %08x\n", name, pcc->pvr); for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) { PowerPCCPUAlias *alias = &ppc_cpu_aliases[i]; @@ -7106,11 +7131,16 @@ static bool ppc_cpu_has_work(CPUState *cs) return cs->interrupt_request & CPU_INTERRUPT_HARD; } +static int ppc_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return ppc_env_mmu_index(cpu_env(cs), ifetch); +} + static void ppc_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - PowerPCCPU *cpu = POWERPC_CPU(s); - PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + CPUState *cs = CPU(obj); + PowerPCCPU *cpu = POWERPC_CPU(cs); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(obj); CPUPPCState *env = &cpu->env; target_ulong msr; int i; @@ -7159,8 +7189,8 @@ static void ppc_cpu_reset_hold(Object *obj) env->nip = env->hreset_vector | env->excp_prefix; if (tcg_enabled()) { - cpu_breakpoint_remove_all(s, BP_CPU); - cpu_watchpoint_remove_all(s, BP_CPU); + cpu_breakpoint_remove_all(cs, BP_CPU); + cpu_watchpoint_remove_all(cs, BP_CPU); if (env->mmu_model != POWERPC_MMU_REAL) { ppc_tlb_invalidate_all(env); } @@ -7174,7 +7204,7 @@ static void ppc_cpu_reset_hold(Object *obj) env->reserve_addr = (target_ulong)-1ULL; /* Be sure no exception or interrupt is pending */ env->pending_interrupts = 0; - s->exception_index = POWERPC_EXCP_NONE; + cs->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; ppc_irq_reset(cpu); @@ -7196,12 +7226,9 @@ static void ppc_cpu_reset_hold(Object *obj) static bool ppc_cpu_is_big_endian(CPUState *cs) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; - cpu_synchronize_state(cs); - return !FIELD_EX64(env->msr, MSR, LE); + return !FIELD_EX64(cpu_env(cs)->msr, MSR, LE); } static bool ppc_get_irq_stats(InterruptStatsProvider *obj, @@ -7288,8 +7315,7 @@ static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr, bool best) static void ppc_disas_set_info(CPUState *cs, disassemble_info *info) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); if ((env->hflags >> MSR_LE) & 1) { info->endian = BFD_ENDIAN_LITTLE; @@ -7333,7 +7359,7 @@ static const struct SysemuCPUOps ppc_sysemu_ops = { #ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps ppc_tcg_ops = { +static const TCGCPUOps ppc_tcg_ops = { .initialize = ppc_translate_init, .restore_state_to_opc = ppc_restore_state_to_opc, @@ -7373,6 +7399,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = ppc_cpu_class_by_name; cc->has_work = ppc_cpu_has_work; + cc->mmu_index = ppc_cpu_mmu_index; cc->dump_state = ppc_cpu_dump_state; cc->set_pc = ppc_cpu_set_pc; cc->get_pc = ppc_cpu_get_pc; @@ -7384,9 +7411,6 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data) #endif cc->gdb_num_core_regs = 71; -#ifndef CONFIG_USER_ONLY - cc->gdb_get_dynamic_xml = ppc_gdb_get_dynamic_xml; -#endif #ifdef USE_APPLE_GDB cc->gdb_read_register = ppc_cpu_gdb_read_register_apple; cc->gdb_write_register = ppc_cpu_gdb_write_register_apple; @@ -7447,8 +7471,7 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) #define RGPL 4 #define RFPL 4 - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); int i; qemu_fprintf(f, "NIP " TARGET_FMT_lx " LR " TARGET_FMT_lx " CTR " @@ -7458,7 +7481,7 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, "MSR " TARGET_FMT_lx " HID0 " TARGET_FMT_lx " HF " "%08x iidx %d didx %d\n", env->msr, env->spr[SPR_HID0], env->hflags, - cpu_mmu_index(env, true), cpu_mmu_index(env, false)); + ppc_env_mmu_index(env, true), ppc_env_mmu_index(env, false)); #if !defined(CONFIG_USER_ONLY) if (env->tb_env) { qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64 diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 9b8fd69b85e..674c05a2ce7 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -35,7 +35,7 @@ /*****************************************************************************/ /* Exception processing */ -#if !defined(CONFIG_USER_ONLY) +#ifndef CONFIG_USER_ONLY static const char *powerpc_excp_name(int excp) { @@ -186,7 +186,7 @@ static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp) env->error_code); } -#if defined(TARGET_PPC64) +#ifdef TARGET_PPC64 static int powerpc_reset_wakeup(CPUPPCState *env, int excp, target_ulong *msr) { /* We no longer are in a PM state */ @@ -380,7 +380,7 @@ static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr, } } } -#endif +#endif /* TARGET_PPC64 */ static void powerpc_reset_excp_state(PowerPCCPU *cpu) { @@ -403,9 +403,8 @@ static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector, * We don't use hreg_store_msr here as already have treated any * special case that could occur. Just store MSR and update hflags * - * Note: We *MUST* not use hreg_store_msr() as-is anyway because it - * will prevent setting of the HV bit which some exceptions might need - * to do. + * Note: We *MUST* not use hreg_store_msr() as-is anyway because it will + * prevent setting of the HV bit which some exceptions might need to do. */ env->nip = vector; env->msr = msr; @@ -445,38 +444,26 @@ static void powerpc_mcheck_checkstop(CPUPPCState *env) static void powerpc_excp_40x(PowerPCCPU *cpu, int excp) { - CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; target_ulong msr, new_msr, vector; - int srr0, srr1; + int srr0 = SPR_SRR0, srr1 = SPR_SRR1; /* new srr1 value excluding must-be-zero bits */ msr = env->msr & ~0x783f0000ULL; - /* - * new interrupt handler msr preserves existing ME unless - * explicitly overridden. - */ + /* new interrupt handler msr preserves ME unless explicitly overridden */ new_msr = env->msr & (((target_ulong)1 << MSR_ME)); - /* target registers */ - srr0 = SPR_SRR0; - srr1 = SPR_SRR1; - - /* - * Hypervisor emulation assistance interrupt only exists on server - * arch 2.05 server or later. - */ + /* HV emu assistance interrupt only exists on server arch 2.05 or later */ if (excp == POWERPC_EXCP_HV_EMU) { excp = POWERPC_EXCP_PROGRAM; } vector = env->excp_vectors[excp]; if (vector == (target_ulong)-1ULL) { - cpu_abort(cs, "Raised an exception without defined vector %d\n", - excp); + cpu_abort(env_cpu(env), + "Raised an exception without defined vector %d\n", excp); } - vector |= env->excp_prefix; switch (excp) { @@ -488,7 +475,6 @@ static void powerpc_excp_40x(PowerPCCPU *cpu, int excp) powerpc_mcheck_checkstop(env); /* machine check exceptions don't have ME set */ new_msr &= ~((target_ulong)1 << MSR_ME); - srr0 = SPR_40x_SRR2; srr1 = SPR_40x_SRR3; break; @@ -523,7 +509,7 @@ static void powerpc_excp_40x(PowerPCCPU *cpu, int excp) env->spr[SPR_40x_ESR] = ESR_PTR; break; default: - cpu_abort(cs, "Invalid program exception %d. Aborting\n", + cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", env->error_code); break; } @@ -550,52 +536,41 @@ static void powerpc_excp_40x(PowerPCCPU *cpu, int excp) trace_ppc_excp_print("PIT"); break; case POWERPC_EXCP_DEBUG: /* Debug interrupt */ - cpu_abort(cs, "%s exception not implemented\n", + cpu_abort(env_cpu(env), "%s exception not implemented\n", powerpc_excp_name(excp)); break; default: - cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", + excp); break; } - /* Save PC */ env->spr[srr0] = env->nip; - - /* Save MSR */ env->spr[srr1] = msr; - powerpc_set_excp_state(cpu, vector, new_msr); } static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp) { - CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; target_ulong msr, new_msr, vector; /* new srr1 value excluding must-be-zero bits */ msr = env->msr & ~0x783f0000ULL; - /* - * new interrupt handler msr preserves existing ME unless - * explicitly overridden - */ + /* new interrupt handler msr preserves ME unless explicitly overridden */ new_msr = env->msr & ((target_ulong)1 << MSR_ME); - /* - * Hypervisor emulation assistance interrupt only exists on server - * arch 2.05 server or later. - */ + /* HV emu assistance interrupt only exists on server arch 2.05 or later */ if (excp == POWERPC_EXCP_HV_EMU) { excp = POWERPC_EXCP_PROGRAM; } vector = env->excp_vectors[excp]; if (vector == (target_ulong)-1ULL) { - cpu_abort(cs, "Raised an exception without defined vector %d\n", - excp); + cpu_abort(env_cpu(env), + "Raised an exception without defined vector %d\n", excp); } - vector |= env->excp_prefix; switch (excp) { @@ -605,7 +580,6 @@ static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp) powerpc_mcheck_checkstop(env); /* machine check exceptions don't have ME set */ new_msr &= ~((target_ulong)1 << MSR_ME); - break; case POWERPC_EXCP_DSI: /* Data storage exception */ trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); @@ -633,11 +607,9 @@ static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp) powerpc_reset_excp_state(cpu); return; } - /* - * FP exceptions always have NIP pointing to the faulting - * instruction, so always use store_next and claim we are - * precise in the MSR. + * NIP always points to the faulting instruction for FP exceptions, + * so always use store_next and claim we are precise in the MSR. */ msr |= 0x00100000; break; @@ -653,7 +625,7 @@ static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp) break; default: /* Should never occur */ - cpu_abort(cs, "Invalid program exception %d. Aborting\n", + cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", env->error_code); break; } @@ -675,8 +647,9 @@ static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp) break; case POWERPC_EXCP_RESET: /* System reset exception */ if (FIELD_EX64(env->msr, MSR, POW)) { - cpu_abort(cs, "Trying to deliver power-saving system reset " - "exception %d with no HV support\n", excp); + cpu_abort(env_cpu(env), + "Trying to deliver power-saving system reset exception " + "%d with no HV support\n", excp); } break; case POWERPC_EXCP_TRACE: /* Trace exception */ @@ -703,60 +676,44 @@ static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp) case POWERPC_EXCP_SMI: /* System management interrupt */ case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ - cpu_abort(cs, "%s exception not implemented\n", + cpu_abort(env_cpu(env), "%s exception not implemented\n", powerpc_excp_name(excp)); break; default: - cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", + excp); break; } - /* - * Sort out endianness of interrupt, this differs depending on the - * CPU, the HV mode, etc... - */ if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { new_msr |= (target_ulong)1 << MSR_LE; } - - /* Save PC */ env->spr[SPR_SRR0] = env->nip; - - /* Save MSR */ env->spr[SPR_SRR1] = msr; - powerpc_set_excp_state(cpu, vector, new_msr); } static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) { - CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; target_ulong msr, new_msr, vector; /* new srr1 value excluding must-be-zero bits */ msr = env->msr & ~0x783f0000ULL; - /* - * new interrupt handler msr preserves existing ME unless - * explicitly overridden - */ + /* new interrupt handler msr preserves ME unless explicitly overridden */ new_msr = env->msr & ((target_ulong)1 << MSR_ME); - /* - * Hypervisor emulation assistance interrupt only exists on server - * arch 2.05 server or later. - */ + /* HV emu assistance interrupt only exists on server arch 2.05 or later */ if (excp == POWERPC_EXCP_HV_EMU) { excp = POWERPC_EXCP_PROGRAM; } vector = env->excp_vectors[excp]; if (vector == (target_ulong)-1ULL) { - cpu_abort(cs, "Raised an exception without defined vector %d\n", - excp); + cpu_abort(env_cpu(env), + "Raised an exception without defined vector %d\n", excp); } - vector |= env->excp_prefix; switch (excp) { @@ -764,7 +721,6 @@ static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) powerpc_mcheck_checkstop(env); /* machine check exceptions don't have ME set */ new_msr &= ~((target_ulong)1 << MSR_ME); - break; case POWERPC_EXCP_DSI: /* Data storage exception */ trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); @@ -792,11 +748,9 @@ static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) powerpc_reset_excp_state(cpu); return; } - /* - * FP exceptions always have NIP pointing to the faulting - * instruction, so always use store_next and claim we are - * precise in the MSR. + * NIP always points to the faulting instruction for FP exceptions, + * so always use store_next and claim we are precise in the MSR. */ msr |= 0x00100000; break; @@ -812,7 +766,7 @@ static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) break; default: /* Should never occur */ - cpu_abort(cs, "Invalid program exception %d. Aborting\n", + cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", env->error_code); break; } @@ -854,8 +808,9 @@ static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) break; case POWERPC_EXCP_RESET: /* System reset exception */ if (FIELD_EX64(env->msr, MSR, POW)) { - cpu_abort(cs, "Trying to deliver power-saving system reset " - "exception %d with no HV support\n", excp); + cpu_abort(env_cpu(env), + "Trying to deliver power-saving system reset exception " + "%d with no HV support\n", excp); } break; case POWERPC_EXCP_TRACE: /* Trace exception */ @@ -864,71 +819,53 @@ static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ ppc_excp_debug_sw_tlb(env, excp); - msr |= env->crf[0] << 28; msr |= env->error_code; /* key, D/I, S/L bits */ /* Set way using a LRU mechanism */ msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; - break; case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ case POWERPC_EXCP_SMI: /* System management interrupt */ case POWERPC_EXCP_THERM: /* Thermal interrupt */ case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ - cpu_abort(cs, "%s exception not implemented\n", + cpu_abort(env_cpu(env), "%s exception not implemented\n", powerpc_excp_name(excp)); break; default: - cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", + excp); break; } - /* - * Sort out endianness of interrupt, this differs depending on the - * CPU, the HV mode, etc... - */ if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { new_msr |= (target_ulong)1 << MSR_LE; } - - /* Save PC */ env->spr[SPR_SRR0] = env->nip; - - /* Save MSR */ env->spr[SPR_SRR1] = msr; - powerpc_set_excp_state(cpu, vector, new_msr); } static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) { - CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; target_ulong msr, new_msr, vector; /* new srr1 value excluding must-be-zero bits */ msr = env->msr & ~0x783f0000ULL; - /* - * new interrupt handler msr preserves existing ME unless - * explicitly overridden - */ + /* new interrupt handler msr preserves ME unless explicitly overridden */ new_msr = env->msr & ((target_ulong)1 << MSR_ME); - /* - * Hypervisor emulation assistance interrupt only exists on server - * arch 2.05 server or later. - */ + /* HV emu assistance interrupt only exists on server arch 2.05 or later */ if (excp == POWERPC_EXCP_HV_EMU) { excp = POWERPC_EXCP_PROGRAM; } vector = env->excp_vectors[excp]; if (vector == (target_ulong)-1ULL) { - cpu_abort(cs, "Raised an exception without defined vector %d\n", - excp); + cpu_abort(env_cpu(env), + "Raised an exception without defined vector %d\n", excp); } - vector |= env->excp_prefix; switch (excp) { @@ -936,7 +873,6 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) powerpc_mcheck_checkstop(env); /* machine check exceptions don't have ME set */ new_msr &= ~((target_ulong)1 << MSR_ME); - break; case POWERPC_EXCP_DSI: /* Data storage exception */ trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); @@ -964,11 +900,9 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) powerpc_reset_excp_state(cpu); return; } - /* - * FP exceptions always have NIP pointing to the faulting - * instruction, so always use store_next and claim we are - * precise in the MSR. + * NIP always points to the faulting instruction for FP exceptions, + * so always use store_next and claim we are precise in the MSR. */ msr |= 0x00100000; break; @@ -984,7 +918,7 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) break; default: /* Should never occur */ - cpu_abort(cs, "Invalid program exception %d. Aborting\n", + cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", env->error_code); break; } @@ -1026,7 +960,8 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) break; case POWERPC_EXCP_RESET: /* System reset exception */ if (FIELD_EX64(env->msr, MSR, POW)) { - cpu_abort(cs, "Trying to deliver power-saving system reset " + cpu_abort(env_cpu(env), + "Trying to deliver power-saving system reset " "exception %d with no HV support\n", excp); } break; @@ -1039,54 +974,39 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) case POWERPC_EXCP_THERM: /* Thermal interrupt */ case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ case POWERPC_EXCP_VPUA: /* Vector assist exception */ - cpu_abort(cs, "%s exception not implemented\n", + cpu_abort(env_cpu(env), "%s exception not implemented\n", powerpc_excp_name(excp)); break; default: - cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", + excp); break; } - /* - * Sort out endianness of interrupt, this differs depending on the - * CPU, the HV mode, etc... - */ if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { new_msr |= (target_ulong)1 << MSR_LE; } - - /* Save PC */ env->spr[SPR_SRR0] = env->nip; - - /* Save MSR */ env->spr[SPR_SRR1] = msr; - powerpc_set_excp_state(cpu, vector, new_msr); } static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) { - CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; target_ulong msr, new_msr, vector; - int srr0, srr1; - - msr = env->msr; + int srr0 = SPR_SRR0, srr1 = SPR_SRR1; /* - * new interrupt handler msr preserves existing ME unless - * explicitly overridden + * Book E does not play games with certain bits of xSRR1 being MSR save + * bits and others being error status. xSRR1 is the old MSR, period. */ - new_msr = env->msr & ((target_ulong)1 << MSR_ME); + msr = env->msr; - /* target registers */ - srr0 = SPR_SRR0; - srr1 = SPR_SRR1; + /* new interrupt handler msr preserves ME unless explicitly overridden */ + new_msr = env->msr & ((target_ulong)1 << MSR_ME); - /* - * Hypervisor emulation assistance interrupt only exists on server - * arch 2.05 server or later. - */ + /* HV emu assistance interrupt only exists on server arch 2.05 or later */ if (excp == POWERPC_EXCP_HV_EMU) { excp = POWERPC_EXCP_PROGRAM; } @@ -1103,10 +1023,9 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) vector = env->excp_vectors[excp]; if (vector == (target_ulong)-1ULL) { - cpu_abort(cs, "Raised an exception without defined vector %d\n", - excp); + cpu_abort(env_cpu(env), + "Raised an exception without defined vector %d\n", excp); } - vector |= env->excp_prefix; switch (excp) { @@ -1135,6 +1054,7 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) break; case POWERPC_EXCP_EXTERNAL: /* External input */ if (env->mpic_proxy) { + CPUState *cs = env_cpu(env); /* IACK the IRQ on delivery */ env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack); } @@ -1149,11 +1069,9 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) powerpc_reset_excp_state(cpu); return; } - /* - * FP exceptions always have NIP pointing to the faulting - * instruction, so always use store_next and claim we are - * precise in the MSR. + * NIP always points to the faulting instruction for FP exceptions, + * so always use store_next and claim we are precise in the MSR. */ msr |= 0x00100000; env->spr[SPR_BOOKE_ESR] = ESR_FP; @@ -1173,7 +1091,7 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) break; default: /* Should never occur */ - cpu_abort(cs, "Invalid program exception %d. Aborting\n", + cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", env->error_code); break; } @@ -1214,7 +1132,8 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) /* DBSR already modified by caller */ } else { - cpu_abort(cs, "Debug exception triggered on unsupported model\n"); + cpu_abort(env_cpu(env), + "Debug exception triggered on unsupported model\n"); } break; case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */ @@ -1228,21 +1147,23 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) break; case POWERPC_EXCP_RESET: /* System reset exception */ if (FIELD_EX64(env->msr, MSR, POW)) { - cpu_abort(cs, "Trying to deliver power-saving system reset " + cpu_abort(env_cpu(env), + "Trying to deliver power-saving system reset " "exception %d with no HV support\n", excp); } break; case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ - cpu_abort(cs, "%s exception not implemented\n", + cpu_abort(env_cpu(env), "%s exception not implemented\n", powerpc_excp_name(excp)); break; default: - cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", + excp); break; } -#if defined(TARGET_PPC64) +#ifdef TARGET_PPC64 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ new_msr |= (target_ulong)1 << MSR_CM; @@ -1251,12 +1172,8 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) } #endif - /* Save PC */ env->spr[srr0] = env->nip; - - /* Save MSR */ env->spr[srr1] = msr; - powerpc_set_excp_state(cpu, vector, new_msr); } @@ -1376,24 +1293,19 @@ static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp) static void powerpc_excp_books(PowerPCCPU *cpu, int excp) { - CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; target_ulong msr, new_msr, vector; - int srr0, srr1, lev = -1; + int srr0 = SPR_SRR0, srr1 = SPR_SRR1, lev = -1; /* new srr1 value excluding must-be-zero bits */ msr = env->msr & ~0x783f0000ULL; /* - * new interrupt handler msr preserves existing HV and ME unless - * explicitly overridden + * new interrupt handler msr preserves HV and ME unless explicitly + * overridden */ new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); - /* target registers */ - srr0 = SPR_SRR0; - srr1 = SPR_SRR1; - /* * check for special resume at 0x100 from doze/nap/sleep/winkle on * P7/P8/P9 @@ -1415,10 +1327,9 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) vector = env->excp_vectors[excp]; if (vector == (target_ulong)-1ULL) { - cpu_abort(cs, "Raised an exception without defined vector %d\n", - excp); + cpu_abort(env_cpu(env), + "Raised an exception without defined vector %d\n", excp); } - vector |= env->excp_prefix; if (is_prefix_insn_excp(cpu, excp)) { @@ -1434,10 +1345,10 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) * clear (e.g., see FWNMI in PAPR). */ new_msr |= (target_ulong)MSR_HVB; - } - /* machine check exceptions don't have ME set */ - new_msr &= ~((target_ulong)1 << MSR_ME); + /* HV machine check exceptions don't have ME set */ + new_msr &= ~((target_ulong)1 << MSR_ME); + } msr |= env->error_code; break; @@ -1453,23 +1364,17 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) { bool lpes0; - /* - * LPES0 is only taken into consideration if we support HV - * mode for this CPU. - */ + /* LPES0 is only taken into consideration if we support HV mode */ if (!env->has_hv_mode) { break; } - lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - if (!lpes0) { new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; } - break; } case POWERPC_EXCP_ALIGN: /* Alignment exception */ @@ -1492,11 +1397,9 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) powerpc_reset_excp_state(cpu); return; } - /* - * FP exceptions always have NIP pointing to the faulting - * instruction, so always use store_next and claim we are - * precise in the MSR. + * NIP always points to the faulting instruction for FP exceptions, + * so always use store_next and claim we are precise in the MSR. */ msr |= 0x00100000; break; @@ -1512,7 +1415,7 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) break; default: /* Should never occur */ - cpu_abort(cs, "Invalid program exception %d. Aborting\n", + cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", env->error_code); break; } @@ -1578,7 +1481,8 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) new_msr |= (target_ulong)MSR_HVB; } else { if (FIELD_EX64(env->msr, MSR, POW)) { - cpu_abort(cs, "Trying to deliver power-saving system reset " + cpu_abort(env_cpu(env), + "Trying to deliver power-saving system reset " "exception %d with no HV support\n", excp); } } @@ -1650,29 +1554,22 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) case POWERPC_EXCP_VPUA: /* Vector assist exception */ case POWERPC_EXCP_MAINT: /* Maintenance exception */ case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */ - cpu_abort(cs, "%s exception not implemented\n", + cpu_abort(env_cpu(env), "%s exception not implemented\n", powerpc_excp_name(excp)); break; default: - cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", + excp); break; } - /* - * Sort out endianness of interrupt, this differs depending on the - * CPU, the HV mode, etc... - */ if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) { new_msr |= (target_ulong)1 << MSR_LE; } - new_msr |= (target_ulong)1 << MSR_SF; if (excp != POWERPC_EXCP_SYSCALL_VECTORED) { - /* Save PC */ env->spr[srr0] = env->nip; - - /* Save MSR */ env->spr[srr1] = msr; } @@ -1681,19 +1578,15 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */ vhc->deliver_hv_excp(cpu, excp); - powerpc_reset_excp_state(cpu); - } else { /* Sanity check */ if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) { - cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " - "no HV support\n", excp); + cpu_abort(env_cpu(env), "Trying to deliver HV exception (HSRR) %d " + "with no HV support\n", excp); } - /* This can update new_msr and vector if AIL applies */ ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector); - powerpc_set_excp_state(cpu, vector, new_msr); } } @@ -1702,15 +1595,15 @@ static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp) { g_assert_not_reached(); } -#endif +#endif /* TARGET_PPC64 */ static void powerpc_excp(PowerPCCPU *cpu, int excp) { - CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) { - cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n", + excp); } qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx @@ -1753,7 +1646,7 @@ void ppc_cpu_do_interrupt(CPUState *cs) powerpc_excp(cpu, cs->exception_index); } -#if defined(TARGET_PPC64) +#ifdef TARGET_PPC64 #define P7_UNUSED_INTERRUPTS \ (PPC_INTERRUPT_RESET | PPC_INTERRUPT_HVIRT | PPC_INTERRUPT_CEXT | \ PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ @@ -2084,10 +1977,23 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) return 0; } -#endif +#endif /* TARGET_PPC64 */ -static int ppc_next_unmasked_interrupt_generic(CPUPPCState *env) +static int ppc_next_unmasked_interrupt(CPUPPCState *env) { +#ifdef TARGET_PPC64 + switch (env->excp_model) { + case POWERPC_EXCP_POWER7: + return p7_next_unmasked_interrupt(env); + case POWERPC_EXCP_POWER8: + return p8_next_unmasked_interrupt(env); + case POWERPC_EXCP_POWER9: + case POWERPC_EXCP_POWER10: + return p9_next_unmasked_interrupt(env); + default: + break; + } +#endif bool async_deliver; /* External reset */ @@ -2198,23 +2104,6 @@ static int ppc_next_unmasked_interrupt_generic(CPUPPCState *env) return 0; } -static int ppc_next_unmasked_interrupt(CPUPPCState *env) -{ - switch (env->excp_model) { -#if defined(TARGET_PPC64) - case POWERPC_EXCP_POWER7: - return p7_next_unmasked_interrupt(env); - case POWERPC_EXCP_POWER8: - return p8_next_unmasked_interrupt(env); - case POWERPC_EXCP_POWER9: - case POWERPC_EXCP_POWER10: - return p9_next_unmasked_interrupt(env); -#endif - default: - return ppc_next_unmasked_interrupt_generic(env); - } -} - /* * Sets CPU_INTERRUPT_HARD if there is at least one unmasked interrupt to be * delivered and clears CPU_INTERRUPT_HARD otherwise. @@ -2231,7 +2120,7 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env) void ppc_maybe_interrupt(CPUPPCState *env) { CPUState *cs = env_cpu(env); - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); if (ppc_next_unmasked_interrupt(env)) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); @@ -2240,11 +2129,10 @@ void ppc_maybe_interrupt(CPUPPCState *env) } } -#if defined(TARGET_PPC64) +#ifdef TARGET_PPC64 static void p7_deliver_interrupt(CPUPPCState *env, int interrupt) { PowerPCCPU *cpu = env_archcpu(env); - CPUState *cs = env_cpu(env); switch (interrupt) { case PPC_INTERRUPT_MCK: /* Machine check exception */ @@ -2288,14 +2176,14 @@ static void p7_deliver_interrupt(CPUPPCState *env, int interrupt) assert(!env->resume_as_sreset); break; default: - cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); + cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", + interrupt); } } static void p8_deliver_interrupt(CPUPPCState *env, int interrupt) { PowerPCCPU *cpu = env_archcpu(env); - CPUState *cs = env_cpu(env); switch (interrupt) { case PPC_INTERRUPT_MCK: /* Machine check exception */ @@ -2359,7 +2247,8 @@ static void p8_deliver_interrupt(CPUPPCState *env, int interrupt) assert(!env->resume_as_sreset); break; default: - cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); + cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", + interrupt); } } @@ -2438,15 +2327,28 @@ static void p9_deliver_interrupt(CPUPPCState *env, int interrupt) assert(!env->resume_as_sreset); break; default: - cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); + cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", + interrupt); } } -#endif +#endif /* TARGET_PPC64 */ -static void ppc_deliver_interrupt_generic(CPUPPCState *env, int interrupt) +static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) { +#ifdef TARGET_PPC64 + switch (env->excp_model) { + case POWERPC_EXCP_POWER7: + return p7_deliver_interrupt(env, interrupt); + case POWERPC_EXCP_POWER8: + return p8_deliver_interrupt(env, interrupt); + case POWERPC_EXCP_POWER9: + case POWERPC_EXCP_POWER10: + return p9_deliver_interrupt(env, interrupt); + default: + break; + } +#endif PowerPCCPU *cpu = env_archcpu(env); - CPUState *cs = env_cpu(env); switch (interrupt) { case PPC_INTERRUPT_RESET: /* External reset */ @@ -2543,27 +2445,8 @@ static void ppc_deliver_interrupt_generic(CPUPPCState *env, int interrupt) assert(!env->resume_as_sreset); break; default: - cpu_abort(cs, "Invalid PowerPC interrupt %d. Aborting\n", interrupt); - } -} - -static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) -{ - switch (env->excp_model) { -#if defined(TARGET_PPC64) - case POWERPC_EXCP_POWER7: - p7_deliver_interrupt(env, interrupt); - break; - case POWERPC_EXCP_POWER8: - p8_deliver_interrupt(env, interrupt); - break; - case POWERPC_EXCP_POWER9: - case POWERPC_EXCP_POWER10: - p9_deliver_interrupt(env, interrupt); - break; -#endif - default: - ppc_deliver_interrupt_generic(env, interrupt); + cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n", + interrupt); } } @@ -2597,8 +2480,7 @@ void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); int interrupt; if ((interrupt_request & CPU_INTERRUPT_HARD) == 0) { @@ -2660,10 +2542,8 @@ void helper_raise_exception(CPUPPCState *env, uint32_t exception) { raise_exception_err_ra(env, exception, 0, 0); } -#endif -#if !defined(CONFIG_USER_ONLY) -#ifdef CONFIG_TCG +#ifndef CONFIG_USER_ONLY void helper_store_msr(CPUPPCState *env, target_ulong val) { uint32_t excp = hreg_store_msr(env, val, 0); @@ -2679,7 +2559,7 @@ void helper_ppc_maybe_interrupt(CPUPPCState *env) ppc_maybe_interrupt(env); } -#if defined(TARGET_PPC64) +#ifdef TARGET_PPC64 void helper_scv(CPUPPCState *env, uint32_t lev) { if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { @@ -2707,7 +2587,7 @@ void helper_pminsn(CPUPPCState *env, uint32_t insn) ppc_maybe_interrupt(env); } -#endif /* defined(TARGET_PPC64) */ +#endif /* TARGET_PPC64 */ static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) { @@ -2718,7 +2598,7 @@ static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) if (env->flags & POWERPC_FLAG_TGPR) msr &= ~(1ULL << MSR_TGPR); -#if defined(TARGET_PPC64) +#ifdef TARGET_PPC64 /* Switching to 32-bit ? Crop the nip */ if (!msr_is_64bit(env, msr)) { nip = (uint32_t)nip; @@ -2747,7 +2627,7 @@ void helper_rfi(CPUPPCState *env) do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); } -#if defined(TARGET_PPC64) +#ifdef TARGET_PPC64 void helper_rfid(CPUPPCState *env) { /* @@ -2768,9 +2648,7 @@ void helper_hrfid(CPUPPCState *env) { do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); } -#endif -#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) void helper_rfebb(CPUPPCState *env, target_ulong s) { target_ulong msr = env->msr; @@ -2845,7 +2723,7 @@ void raise_ebb_perfm_exception(CPUPPCState *env) do_ebb(env, POWERPC_EXCP_PERFM_EBB); } -#endif +#endif /* TARGET_PPC64 */ /*****************************************************************************/ /* Embedded PowerPC specific helpers */ @@ -2870,10 +2748,8 @@ void helper_rfmci(CPUPPCState *env) /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); } -#endif /* CONFIG_TCG */ -#endif /* !defined(CONFIG_USER_ONLY) */ +#endif /* !CONFIG_USER_ONLY */ -#ifdef CONFIG_TCG void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, uint32_t flags) { @@ -2887,7 +2763,7 @@ void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, } } -#if defined(TARGET_PPC64) +#ifdef TARGET_PPC64 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, uint32_t flags) { @@ -2900,10 +2776,8 @@ void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, POWERPC_EXCP_TRAP, GETPC()); } } -#endif -#endif +#endif /* TARGET_PPC64 */ -#ifdef CONFIG_TCG static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane) { const uint16_t c = 0xfffc; @@ -3014,12 +2888,8 @@ HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE) HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE) HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE) HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE) -#endif /* CONFIG_TCG */ - -#if !defined(CONFIG_USER_ONLY) - -#ifdef CONFIG_TCG +#ifndef CONFIG_USER_ONLY /* Embedded.Processor Control */ static int dbell2irq(target_ulong rb) { @@ -3065,7 +2935,7 @@ void helper_msgsnd(target_ulong rb) return; } - qemu_mutex_lock_iothread(); + bql_lock(); CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *cenv = &cpu->env; @@ -3074,7 +2944,7 @@ void helper_msgsnd(target_ulong rb) ppc_set_irq(cpu, irq, 1); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* Server Processor Control */ @@ -3102,7 +2972,7 @@ static void book3s_msgsnd_common(int pir, int irq) { CPUState *cs; - qemu_mutex_lock_iothread(); + bql_lock(); CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *cenv = &cpu->env; @@ -3112,7 +2982,7 @@ static void book3s_msgsnd_common(int pir, int irq) ppc_set_irq(cpu, irq, 1); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } void helper_book3s_msgsnd(target_ulong rb) @@ -3126,7 +2996,7 @@ void helper_book3s_msgsnd(target_ulong rb) book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); } -#if defined(TARGET_PPC64) +#ifdef TARGET_PPC64 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) { helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); @@ -3166,14 +3036,14 @@ void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) } /* Does iothread need to be locked for walking CPU list? */ - qemu_mutex_lock_iothread(); + bql_lock(); THREAD_SIBLING_FOREACH(cs, ccs) { PowerPCCPU *ccpu = POWERPC_CPU(ccs); uint32_t thread_id = ppc_cpu_tir(ccpu); if (ttir == thread_id) { ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } } @@ -3366,5 +3236,5 @@ bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) return false; } -#endif /* CONFIG_TCG */ #endif /* !CONFIG_USER_ONLY */ +#endif /* CONFIG_TCG */ diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c index ec5731e5d67..3b28d4e21c7 100644 --- a/target/ppc/gdbstub.c +++ b/target/ppc/gdbstub.c @@ -108,8 +108,7 @@ void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len) int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); uint8_t *mem_buf; int r = ppc_gdb_register_len(n); @@ -152,8 +151,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n) int ppc_cpu_gdb_read_register_apple(CPUState *cs, GByteArray *buf, int n) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); uint8_t *mem_buf; int r = ppc_gdb_register_len_apple(n); @@ -206,8 +204,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, GByteArray *buf, int n) int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); int r = ppc_gdb_register_len(n); if (!r) { @@ -253,8 +250,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) } int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); int r = ppc_gdb_register_len_apple(n); if (!r) { @@ -300,12 +296,12 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n) } #ifndef CONFIG_USER_ONLY -void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu) +static void gdb_gen_spr_feature(CPUState *cs) { - PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); + PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; - GString *xml; - char *spr_name; + GDBFeatureBuilder builder; unsigned int num_regs = 0; int i; @@ -328,13 +324,13 @@ void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu) num_regs++; } - if (pcc->gdb_spr_xml) { + if (pcc->gdb_spr.xml) { return; } - xml = g_string_new(""); - g_string_append(xml, ""); - g_string_append(xml, ""); + gdb_feature_builder_init(&builder, &pcc->gdb_spr, + "org.qemu.power.spr", "power-spr.xml", + cs->gdb_num_regs); for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) { ppc_spr_t *spr = &env->spr_cb[i]; @@ -343,28 +339,12 @@ void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu) continue; } - spr_name = g_ascii_strdown(spr->name, -1); - g_string_append_printf(xml, ""); + gdb_feature_builder_append_reg(&builder, g_ascii_strdown(spr->name, -1), + TARGET_LONG_BITS, spr->gdb_id, + "int", "spr"); } - g_string_append(xml, ""); - - pcc->gdb_num_sprs = num_regs; - pcc->gdb_spr_xml = g_string_free(xml, false); -} - -const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name) -{ - PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); - - if (strcmp(xml_name, "power-spr.xml") == 0) { - return pcc->gdb_spr_xml; - } - return NULL; + gdb_feature_builder_end(&builder); } #endif @@ -383,8 +363,10 @@ static int gdb_find_spr_idx(CPUPPCState *env, int n) return -1; } -static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n) +static int gdb_get_spr_reg(CPUState *cs, GByteArray *buf, int n) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; int reg; int len; @@ -394,13 +376,40 @@ static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n) } len = TARGET_LONG_SIZE; - gdb_get_regl(buf, env->spr[reg]); + + /* Handle those SPRs that are not part of the env->spr[] array */ + target_ulong val; + switch (reg) { +#if defined(TARGET_PPC64) + case SPR_CFAR: + val = env->cfar; + break; +#endif + case SPR_HDEC: + val = cpu_ppc_load_hdecr(env); + break; + case SPR_TBL: + val = cpu_ppc_load_tbl(env); + break; + case SPR_TBU: + val = cpu_ppc_load_tbu(env); + break; + case SPR_DECR: + val = cpu_ppc_load_decr(env); + break; + default: + val = env->spr[reg]; + } + gdb_get_regl(buf, val); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, len), len); return len; } -static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +static int gdb_set_spr_reg(CPUState *cs, uint8_t *mem_buf, int n) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; int reg; int len; @@ -411,14 +420,27 @@ static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) len = TARGET_LONG_SIZE; ppc_maybe_bswap_register(env, mem_buf, len); - env->spr[reg] = ldn_p(mem_buf, len); + + /* Handle those SPRs that are not part of the env->spr[] array */ + target_ulong val = ldn_p(mem_buf, len); + switch (reg) { +#if defined(TARGET_PPC64) + case SPR_CFAR: + env->cfar = val; + break; +#endif + default: + env->spr[reg] = val; + } return len; } #endif -static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n) +static int gdb_get_float_reg(CPUState *cs, GByteArray *buf, int n) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; uint8_t *mem_buf; if (n < 32) { gdb_get_reg64(buf, *cpu_fpr_ptr(env, n)); @@ -435,8 +457,11 @@ static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n) return 0; } -static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +static int gdb_set_float_reg(CPUState *cs, uint8_t *mem_buf, int n) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + if (n < 32) { ppc_maybe_bswap_register(env, mem_buf, 8); *cpu_fpr_ptr(env, n) = ldq_p(mem_buf); @@ -450,8 +475,10 @@ static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n) return 0; } -static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n) +static int gdb_get_avr_reg(CPUState *cs, GByteArray *buf, int n) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; uint8_t *mem_buf; if (n < 32) { @@ -476,8 +503,11 @@ static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n) return 0; } -static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +static int gdb_set_avr_reg(CPUState *cs, uint8_t *mem_buf, int n) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + if (n < 32) { ppc_avr_t *avr = cpu_avr_ptr(env, n); ppc_maybe_bswap_register(env, mem_buf, 16); @@ -498,8 +528,11 @@ static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) return 0; } -static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n) +static int gdb_get_spe_reg(CPUState *cs, GByteArray *buf, int n) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + if (n < 32) { #if defined(TARGET_PPC64) gdb_get_reg32(buf, env->gpr[n] >> 32); @@ -522,8 +555,11 @@ static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n) return 0; } -static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +static int gdb_set_spe_reg(CPUState *cs, uint8_t *mem_buf, int n) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + if (n < 32) { #if defined(TARGET_PPC64) target_ulong lo = (uint32_t)env->gpr[n]; @@ -551,8 +587,11 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n) return 0; } -static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n) +static int gdb_get_vsx_reg(CPUState *cs, GByteArray *buf, int n) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + if (n < 32) { gdb_get_reg64(buf, *cpu_vsrl_ptr(env, n)); ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8); @@ -561,8 +600,11 @@ static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n) return 0; } -static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +static int gdb_set_vsx_reg(CPUState *cs, uint8_t *mem_buf, int n) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + if (n < 32) { ppc_maybe_bswap_register(env, mem_buf, 8); *cpu_vsrl_ptr(env, n) = ldq_p(mem_buf); @@ -584,22 +626,24 @@ void ppc_gdb_init(CPUState *cs, PowerPCCPUClass *pcc) { if (pcc->insns_flags & PPC_FLOAT) { gdb_register_coprocessor(cs, gdb_get_float_reg, gdb_set_float_reg, - 33, "power-fpu.xml", 0); + gdb_find_static_feature("power-fpu.xml"), 0); } if (pcc->insns_flags & PPC_ALTIVEC) { gdb_register_coprocessor(cs, gdb_get_avr_reg, gdb_set_avr_reg, - 34, "power-altivec.xml", 0); + gdb_find_static_feature("power-altivec.xml"), + 0); } if (pcc->insns_flags & PPC_SPE) { gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg, - 34, "power-spe.xml", 0); + gdb_find_static_feature("power-spe.xml"), 0); } if (pcc->insns_flags2 & PPC2_VSX) { gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg, - 32, "power-vsx.xml", 0); + gdb_find_static_feature("power-vsx.xml"), 0); } #ifndef CONFIG_USER_ONLY + gdb_gen_spr_feature(cs); gdb_register_coprocessor(cs, gdb_get_spr_reg, gdb_set_spr_reg, - pcc->gdb_num_sprs, "power-spr.xml", 0); + &pcc->gdb_spr, 0); #endif } diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index f380342d4dd..25258986e36 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -244,7 +244,7 @@ void cpu_interrupt_exittb(CPUState *cs) * unless running with TCG. */ if (tcg_enabled()) { - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); } } @@ -264,6 +264,11 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) value &= ~MSR_HVB; value |= env->msr & MSR_HVB; } + /* Attempt to modify MSR[ME] in guest state is ignored */ + if (is_book3s_arch2x(env) && !(env->msr & MSR_HVB)) { + value &= ~(1 << MSR_ME); + value |= env->msr & (1 << MSR_ME); + } if ((value ^ env->msr) & (R_MSR_IR_MASK | R_MSR_DR_MASK)) { cpu_interrupt_exittb(cs); } @@ -460,22 +465,41 @@ void register_generic_sprs(PowerPCCPU *cpu) } /* Time base */ - spr_register(env, SPR_VTBL, "TBL", - &spr_read_tbl, SPR_NOACCESS, +#if defined(TARGET_PPC64) + spr_register(env, SPR_TBL, "TB", +#else + spr_register(env, SPR_TBL, "TBL", +#endif &spr_read_tbl, SPR_NOACCESS, - 0x00000000); - spr_register(env, SPR_TBL, "TBL", &spr_read_tbl, SPR_NOACCESS, - &spr_read_tbl, &spr_write_tbl, 0x00000000); - spr_register(env, SPR_VTBU, "TBU", + spr_register(env, SPR_TBU, "TBU", &spr_read_tbu, SPR_NOACCESS, &spr_read_tbu, SPR_NOACCESS, 0x00000000); - spr_register(env, SPR_TBU, "TBU", - &spr_read_tbu, SPR_NOACCESS, - &spr_read_tbu, &spr_write_tbu, - 0x00000000); +#ifndef CONFIG_USER_ONLY + if (env->has_hv_mode) { + spr_register_hv(env, SPR_WR_TBL, "TBL", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_tbl, + 0x00000000); + spr_register_hv(env, SPR_WR_TBU, "TBU", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_tbu, + 0x00000000); + } else { + spr_register(env, SPR_WR_TBL, "TBL", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_tbl, + 0x00000000); + spr_register(env, SPR_WR_TBU, "TBU", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_tbu, + 0x00000000); + } +#endif } void register_non_embedded_sprs(CPUPPCState *env) @@ -490,7 +514,7 @@ void register_non_embedded_sprs(CPUPPCState *env) &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DAR, 0x00000000); /* Timer */ - spr_register(env, SPR_DECR, "DECR", + spr_register(env, SPR_DECR, "DEC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_decr, &spr_write_decr, 0x00000000); diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index 4fcf3af8d0d..eada59f59f4 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -187,6 +187,12 @@ &X_a ra @X_a ...... ra:3 .. ..... ..... .......... . &X_a +&XO rt ra rb oe:bool rc:bool +@XO ...... rt:5 ra:5 rb:5 oe:1 ......... rc:1 &XO + +&XO_ta rt ra oe:bool rc:bool +@XO_ta ...... rt:5 ra:5 ..... oe:1 ......... rc:1 &XO_ta + %xx_xt 0:1 21:5 %xx_xb 1:1 11:5 %xx_xa 2:1 16:5 @@ -322,10 +328,30 @@ CMPLI 001010 ... - . ..... ................ @D_bfu ### Fixed-Point Arithmetic Instructions +ADD 011111 ..... ..... ..... . 100001010 . @XO +ADDC 011111 ..... ..... ..... . 000001010 . @XO +ADDE 011111 ..... ..... ..... . 010001010 . @XO + +# ADDEX is Z23-form, with CY=0; all other values for CY are reserved. +# This works out the same as X-form. +ADDEX 011111 ..... ..... ..... 00 10101010 - @X + ADDI 001110 ..... ..... ................ @D ADDIS 001111 ..... ..... ................ @D +ADDIC 001100 ..... ..... ................ @D +ADDIC_ 001101 ..... ..... ................ @D ADDPCIS 010011 ..... ..... .......... 00010 . @DX +ADDME 011111 ..... ..... ----- . 011101010 . @XO_ta +ADDZE 011111 ..... ..... ----- . 011001010 . @XO_ta + +SUBF 011111 ..... ..... ..... . 000101000 . @XO +SUBFIC 001000 ..... ..... ................ @D +SUBFC 011111 ..... ..... ..... . 000001000 . @XO +SUBFE 011111 ..... ..... ..... . 010001000 . @XO + +SUBFME 011111 ..... ..... ----- . 011101000 . @XO_ta +SUBFZE 011111 ..... ..... ----- . 011001000 . @XO_ta ## Fixed-Point Logical Instructions diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 9b1abe2fc41..8231feb2d45 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -546,8 +546,7 @@ static void kvm_sw_tlb_put(PowerPCCPU *cpu) static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); /* Init 'val' to avoid "uninitialised value" Valgrind warnings */ union { uint32_t u32; @@ -581,8 +580,7 @@ static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr) static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); union { uint32_t u32; uint64_t u64; @@ -615,8 +613,7 @@ static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr) static int kvm_put_fp(CPUState *cs) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); struct kvm_one_reg reg; int i; int ret; @@ -635,8 +632,8 @@ static int kvm_put_fp(CPUState *cs) for (i = 0; i < 32; i++) { uint64_t vsr[2]; - uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i); - uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i); + uint64_t *fpr = cpu_fpr_ptr(env, i); + uint64_t *vsrl = cpu_vsrl_ptr(env, i); #if HOST_BIG_ENDIAN vsr[0] = float64_val(*fpr); @@ -682,8 +679,7 @@ static int kvm_put_fp(CPUState *cs) static int kvm_get_fp(CPUState *cs) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); struct kvm_one_reg reg; int i; int ret; @@ -704,8 +700,8 @@ static int kvm_get_fp(CPUState *cs) for (i = 0; i < 32; i++) { uint64_t vsr[2]; - uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i); - uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i); + uint64_t *fpr = cpu_fpr_ptr(env, i); + uint64_t *vsrl = cpu_vsrl_ptr(env, i); reg.addr = (uintptr_t) &vsr; reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i); @@ -1656,7 +1652,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) CPUPPCState *env = &cpu->env; int ret; - qemu_mutex_lock_iothread(); + bql_lock(); switch (run->exit_reason) { case KVM_EXIT_DCR: @@ -1715,7 +1711,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) break; } - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -2369,17 +2365,6 @@ static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data) #if defined(TARGET_PPC64) pcc->radix_page_info = kvmppc_get_radix_page_info(); - - if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) { - /* - * POWER9 DD1 has some bugs which make it not really ISA 3.00 - * compliant. More importantly, advertising ISA 3.00 - * architected mode may prevent guests from activating - * necessary DD1 workarounds. - */ - pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07 - | PCR_COMPAT_2_06 | PCR_COMPAT_2_05); - } #endif /* defined(TARGET_PPC64) */ } @@ -2688,7 +2673,7 @@ int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp) int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns) { int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); - uint8_t buf[bufsize]; + g_autofree uint8_t *buf = g_malloc(bufsize); ssize_t rc; do { @@ -2770,9 +2755,9 @@ void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n) while (i < n) { struct kvm_get_htab_header *hdr; int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP; - char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64]; + char buf[sizeof(*hdr) + HPTES_PER_GROUP * HASH_PTE_SIZE_64]; - rc = read(fd, buf, sizeof(buf)); + rc = read(fd, buf, sizeof(*hdr) + m * HASH_PTE_SIZE_64); if (rc < 0) { hw_error("kvmppc_read_hptes: Unable to read HPTEs"); } diff --git a/target/ppc/machine.c b/target/ppc/machine.c index 68cbdffecd4..203fe28e014 100644 --- a/target/ppc/machine.c +++ b/target/ppc/machine.c @@ -351,7 +351,7 @@ static const VMStateDescription vmstate_fpu = { .version_id = 1, .minimum_version_id = 1, .needed = fpu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32), VMSTATE_UINTTL(env.fpscr, PowerPCCPU), VMSTATE_END_OF_LIST() @@ -392,7 +392,7 @@ static const VMStateDescription vmstate_altivec = { .version_id = 1, .minimum_version_id = 1, .needed = altivec_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32), /* * Save the architecture value of the vscr, not the internally @@ -425,7 +425,7 @@ static const VMStateDescription vmstate_vsx = { .version_id = 1, .minimum_version_id = 1, .needed = vsx_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32), VMSTATE_END_OF_LIST() }, @@ -445,7 +445,7 @@ static const VMStateDescription vmstate_tm = { .version_id = 1, .minimum_version_id = 1, .needed = tm_needed, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), VMSTATE_UINT64(env.tm_cr, PowerPCCPU), @@ -479,7 +479,7 @@ static const VMStateDescription vmstate_sr = { .version_id = 1, .minimum_version_id = 1, .needed = sr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), VMSTATE_END_OF_LIST() }, @@ -553,7 +553,7 @@ static const VMStateDescription vmstate_slb = { .minimum_version_id = 1, .needed = slb_needed, .post_load = slb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), VMSTATE_END_OF_LIST() @@ -565,7 +565,7 @@ static const VMStateDescription vmstate_tlb6xx_entry = { .name = "cpu/tlb6xx_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), @@ -586,7 +586,7 @@ static const VMStateDescription vmstate_tlb6xx = { .version_id = 1, .minimum_version_id = 1, .needed = tlb6xx_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, env.nb_tlb, @@ -601,7 +601,7 @@ static const VMStateDescription vmstate_tlbemb_entry = { .name = "cpu/tlbemb_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(RPN, ppcemb_tlb_t), VMSTATE_UINTTL(EPN, ppcemb_tlb_t), VMSTATE_UINTTL(PID, ppcemb_tlb_t), @@ -625,7 +625,7 @@ static const VMStateDescription vmstate_tlbemb = { .version_id = 1, .minimum_version_id = 1, .needed = tlbemb_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, env.nb_tlb, @@ -639,7 +639,7 @@ static const VMStateDescription vmstate_tlbmas_entry = { .name = "cpu/tlbmas_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(mas8, ppcmas_tlb_t), VMSTATE_UINT32(mas1, ppcmas_tlb_t), VMSTATE_UINT64(mas2, ppcmas_tlb_t), @@ -661,7 +661,7 @@ static const VMStateDescription vmstate_tlbmas = { .version_id = 1, .minimum_version_id = 1, .needed = tlbmas_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, env.nb_tlb, @@ -684,7 +684,7 @@ static const VMStateDescription vmstate_compat = { .version_id = 1, .minimum_version_id = 1, .needed = compat_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(compat_pvr, PowerPCCPU), VMSTATE_END_OF_LIST() } @@ -700,7 +700,7 @@ static const VMStateDescription vmstate_reservation = { .version_id = 1, .minimum_version_id = 1, .needed = reservation_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), VMSTATE_UINTTL(env.reserve_length, PowerPCCPU), VMSTATE_UINTTL(env.reserve_val, PowerPCCPU), @@ -717,7 +717,7 @@ const VMStateDescription vmstate_ppc_cpu = { .minimum_version_id = 5, .pre_save = cpu_pre_save, .post_load = cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */ /* User mode architected state */ @@ -748,7 +748,7 @@ const VMStateDescription vmstate_ppc_cpu = { VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fpu, &vmstate_altivec, &vmstate_vsx, diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index c7535481d67..ea7e8443a8b 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -83,7 +83,7 @@ static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb, void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) { uintptr_t raddr = GETPC(); - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = ppc_env_mmu_index(env, false); void *host = probe_contiguous(env, addr, (32 - reg) * 4, MMU_DATA_LOAD, mmu_idx, raddr); @@ -105,7 +105,7 @@ void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) { uintptr_t raddr = GETPC(); - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = ppc_env_mmu_index(env, false); void *host = probe_contiguous(env, addr, (32 - reg) * 4, MMU_DATA_STORE, mmu_idx, raddr); @@ -135,7 +135,7 @@ static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, return; } - mmu_idx = cpu_mmu_index(env, false); + mmu_idx = ppc_env_mmu_index(env, false); host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr); if (likely(host)) { @@ -224,7 +224,7 @@ void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, return; } - mmu_idx = cpu_mmu_index(env, false); + mmu_idx = ppc_env_mmu_index(env, false); host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr); if (likely(host)) { @@ -276,7 +276,7 @@ static void dcbz_common(CPUPPCState *env, target_ulong addr, target_ulong mask, dcbz_size = env->dcache_line_size; uint32_t i; void *haddr; - int mmu_idx = epid ? PPC_TLB_EPID_STORE : cpu_mmu_index(env, false); + int mmu_idx = epid ? PPC_TLB_EPID_STORE : ppc_env_mmu_index(env, false); #if defined(TARGET_PPC64) /* Check for dcbz vs dcbzl on 970 */ diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c index a05bdf78c98..58e808dc96b 100644 --- a/target/ppc/misc_helper.c +++ b/target/ppc/misc_helper.c @@ -49,9 +49,6 @@ void helper_spr_core_write_generic(CPUPPCState *env, uint32_t sprn, CPUState *cs = env_cpu(env); CPUState *ccs; uint32_t nr_threads = cs->nr_threads; - uint32_t core_id = env->spr[SPR_PIR] & ~(nr_threads - 1); - - assert(core_id == env->spr[SPR_PIR] - env->spr[SPR_TIR]); if (nr_threads == 1) { env->spr[sprn] = val; @@ -238,7 +235,7 @@ target_ulong helper_load_dpdes(CPUPPCState *env) return dpdes; } - qemu_mutex_lock_iothread(); + bql_lock(); THREAD_SIBLING_FOREACH(cs, ccs) { PowerPCCPU *ccpu = POWERPC_CPU(ccs); CPUPPCState *cenv = &ccpu->env; @@ -248,7 +245,7 @@ target_ulong helper_load_dpdes(CPUPPCState *env) dpdes |= (0x1 << thread_id); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); return dpdes; } @@ -278,14 +275,14 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val) } /* Does iothread need to be locked for walking CPU list? */ - qemu_mutex_lock_iothread(); + bql_lock(); THREAD_SIBLING_FOREACH(cs, ccs) { PowerPCCPU *ccpu = POWERPC_CPU(ccs); uint32_t thread_id = ppc_cpu_tir(ccpu); ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id)); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif /* defined(TARGET_PPC64) */ diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c index 5823e039e64..690dff7a49b 100644 --- a/target/ppc/mmu-radix64.c +++ b/target/ppc/mmu-radix64.c @@ -300,8 +300,8 @@ static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr, if (nlb & mask) { qemu_log_mask(LOG_GUEST_ERROR, - "%s: misaligned page dir/table base: 0x"TARGET_FMT_lx - " page dir size: 0x"TARGET_FMT_lx"\n", + "%s: misaligned page dir/table base: 0x%" PRIx64 + " page dir size: 0x%" PRIx64 "\n", __func__, nlb, mask + 1); nlb &= ~mask; } @@ -324,8 +324,8 @@ static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr, if (base_addr & mask) { qemu_log_mask(LOG_GUEST_ERROR, - "%s: misaligned page dir base: 0x"TARGET_FMT_lx - " page dir size: 0x"TARGET_FMT_lx"\n", + "%s: misaligned page dir base: 0x%" PRIx64 + " page dir size: 0x%" PRIx64 "\n", __func__, base_addr, mask + 1); base_addr &= ~mask; } diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c index 6ca5d122070..751403f1c88 100644 --- a/target/ppc/mmu_common.c +++ b/target/ppc/mmu_common.c @@ -1561,9 +1561,9 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) * mapped by code TLBs, so we also try a MMU_INST_FETCH. */ if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, - cpu_mmu_index(&cpu->env, false), false) || + ppc_env_mmu_index(&cpu->env, false), false) || ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, - cpu_mmu_index(&cpu->env, true), false)) { + ppc_env_mmu_index(&cpu->env, true), false)) { return raddr & TARGET_PAGE_MASK; } return -1; diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index f87d35379a2..c071b4d5e21 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -749,12 +749,29 @@ target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) return ret; } +static void ppcemb_tlb_flush(CPUState *cs, ppcemb_tlb_t *tlb) +{ + unsigned mmu_idx = 0; + + if (tlb->prot & 0xf) { + mmu_idx |= 0x1; + } + if ((tlb->prot >> 4) & 0xf) { + mmu_idx |= 0x2; + } + if (tlb->attr & 1) { + mmu_idx <<= 2; + } + + tlb_flush_range_by_mmuidx(cs, tlb->EPN, tlb->size, mmu_idx, + TARGET_LONG_BITS); +} + void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, target_ulong val) { CPUState *cs = env_cpu(env); ppcemb_tlb_t *tlb; - target_ulong page, end; qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry, @@ -762,14 +779,11 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, entry &= PPC4XX_TLB_ENTRY_MASK; tlb = &env->tlb.tlbe[entry]; /* Invalidate previous TLB (if it's valid) */ - if (tlb->prot & PAGE_VALID) { - end = tlb->EPN + tlb->size; + if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) { qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, - (int)entry, tlb->EPN, end); - for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { - tlb_flush_page(cs, page); - } + (int)entry, tlb->EPN, tlb->EPN + tlb->size); + ppcemb_tlb_flush(cs, tlb); } tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) & PPC4XX_TLBHI_SIZE_MASK); @@ -803,27 +817,25 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, tlb->prot & PAGE_WRITE ? 'w' : '-', tlb->prot & PAGE_EXEC ? 'x' : '-', tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); - /* Invalidate new TLB (if valid) */ - if (tlb->prot & PAGE_VALID) { - end = tlb->EPN + tlb->size; - qemu_log_mask(CPU_LOG_MMU, "%s: invalidate TLB %d start " - TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, - (int)entry, tlb->EPN, end); - for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { - tlb_flush_page(cs, page); - } - } } void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, target_ulong val) { + CPUState *cs = env_cpu(env); ppcemb_tlb_t *tlb; qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry, val); entry &= PPC4XX_TLB_ENTRY_MASK; tlb = &env->tlb.tlbe[entry]; + /* Invalidate previous TLB (if it's valid) */ + if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) { + qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " + TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, + (int)entry, tlb->EPN, tlb->EPN + tlb->size); + ppcemb_tlb_flush(cs, tlb); + } tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; tlb->prot = PAGE_READ; @@ -841,8 +853,6 @@ void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, tlb->prot & PAGE_WRITE ? 'w' : '-', tlb->prot & PAGE_EXEC ? 'x' : '-', tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); - - env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; } target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) @@ -850,54 +860,61 @@ target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); } +static bool mmubooke_pid_match(CPUPPCState *env, ppcemb_tlb_t *tlb) +{ + if (tlb->PID == env->spr[SPR_BOOKE_PID]) { + return true; + } + if (!env->nb_pids) { + return false; + } + + if (env->spr[SPR_BOOKE_PID1] && tlb->PID == env->spr[SPR_BOOKE_PID1]) { + return true; + } + if (env->spr[SPR_BOOKE_PID2] && tlb->PID == env->spr[SPR_BOOKE_PID2]) { + return true; + } + + return false; +} + /* PowerPC 440 TLB management */ void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, target_ulong value) { ppcemb_tlb_t *tlb; - target_ulong EPN, RPN, size; - int do_flush_tlbs; qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n", __func__, word, (int)entry, value); - do_flush_tlbs = 0; entry &= 0x3F; tlb = &env->tlb.tlbe[entry]; + + /* Invalidate previous TLB (if it's valid) */ + if ((tlb->prot & PAGE_VALID) && mmubooke_pid_match(env, tlb)) { + qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " + TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, + (int)entry, tlb->EPN, tlb->EPN + tlb->size); + ppcemb_tlb_flush(env_cpu(env), tlb); + } + switch (word) { default: /* Just here to please gcc */ case 0: - EPN = value & 0xFFFFFC00; - if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) { - do_flush_tlbs = 1; - } - tlb->EPN = EPN; - size = booke_tlb_to_page_size((value >> 4) & 0xF); - if ((tlb->prot & PAGE_VALID) && tlb->size < size) { - do_flush_tlbs = 1; - } - tlb->size = size; + tlb->EPN = value & 0xFFFFFC00; + tlb->size = booke_tlb_to_page_size((value >> 4) & 0xF); tlb->attr &= ~0x1; tlb->attr |= (value >> 8) & 1; if (value & 0x200) { tlb->prot |= PAGE_VALID; } else { - if (tlb->prot & PAGE_VALID) { - tlb->prot &= ~PAGE_VALID; - do_flush_tlbs = 1; - } + tlb->prot &= ~PAGE_VALID; } tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; - if (do_flush_tlbs) { - tlb_flush(env_cpu(env)); - } break; case 1: - RPN = value & 0xFFFFFC0F; - if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) { - tlb_flush(env_cpu(env)); - } - tlb->RPN = RPN; + tlb->RPN = value & 0xFFFFFC0F; break; case 2: tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); diff --git a/target/ppc/ppc-qmp-cmds.c b/target/ppc/ppc-qmp-cmds.c index f9acc210562..a25d86a8d19 100644 --- a/target/ppc/ppc-qmp-cmds.c +++ b/target/ppc/ppc-qmp-cmds.c @@ -103,7 +103,11 @@ const MonitorDef monitor_defs[] = { { "xer", 0, &monitor_get_xer }, { "msr", offsetof(CPUPPCState, msr) }, { "tbu", 0, &monitor_get_tbu, }, +#if defined(TARGET_PPC64) + { "tb", 0, &monitor_get_tbl, }, +#else { "tbl", 0, &monitor_get_tbl, }, +#endif { NULL }, }; @@ -133,8 +137,7 @@ static int ppc_cpu_get_reg_num(const char *numstr, int maxnum, int *pregnum) int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval) { int i, regnum; - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); /* General purpose registers */ if ((qemu_tolower(name[0]) == 'r') && @@ -181,8 +184,7 @@ static void ppc_cpu_defs_entry(gpointer data, gpointer user_data) typename = object_class_get_name(oc); info = g_malloc0(sizeof(*info)); - info->name = g_strndup(typename, - strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX)); + info->name = cpu_model_from_type(typename); QAPI_LIST_PREPEND(*first, info); } diff --git a/target/ppc/tcg-stub.c b/target/ppc/tcg-stub.c index aadcf59d261..740d796b98c 100644 --- a/target/ppc/tcg-stub.c +++ b/target/ppc/tcg-stub.c @@ -28,18 +28,3 @@ void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp) void destroy_ppc_opcodes(PowerPCCPU *cpu) { } - -target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu, - SpaprMachineState *spapr, - target_ulong shift) -{ - g_assert_not_reached(); -} - -target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu, - SpaprMachineState *spapr, - target_ulong flags, - target_ulong shift) -{ - g_assert_not_reached(); -} diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c index 08a6b47ee08..39d397416ee 100644 --- a/target/ppc/timebase_helper.c +++ b/target/ppc/timebase_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" +#include "hw/ppc/ppc.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "qemu/log.h" @@ -59,19 +60,55 @@ target_ulong helper_load_purr(CPUPPCState *env) void helper_store_purr(CPUPPCState *env, target_ulong val) { - cpu_ppc_store_purr(env, val); + CPUState *cs = env_cpu(env); + CPUState *ccs; + uint32_t nr_threads = cs->nr_threads; + + if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) { + cpu_ppc_store_purr(env, val); + return; + } + + THREAD_SIBLING_FOREACH(cs, ccs) { + CPUPPCState *cenv = &POWERPC_CPU(ccs)->env; + cpu_ppc_store_purr(cenv, val); + } } #endif #if !defined(CONFIG_USER_ONLY) void helper_store_tbl(CPUPPCState *env, target_ulong val) { - cpu_ppc_store_tbl(env, val); + CPUState *cs = env_cpu(env); + CPUState *ccs; + uint32_t nr_threads = cs->nr_threads; + + if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) { + cpu_ppc_store_tbl(env, val); + return; + } + + THREAD_SIBLING_FOREACH(cs, ccs) { + CPUPPCState *cenv = &POWERPC_CPU(ccs)->env; + cpu_ppc_store_tbl(cenv, val); + } } void helper_store_tbu(CPUPPCState *env, target_ulong val) { - cpu_ppc_store_tbu(env, val); + CPUState *cs = env_cpu(env); + CPUState *ccs; + uint32_t nr_threads = cs->nr_threads; + + if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) { + cpu_ppc_store_tbu(env, val); + return; + } + + THREAD_SIBLING_FOREACH(cs, ccs) { + CPUPPCState *cenv = &POWERPC_CPU(ccs)->env; + cpu_ppc_store_tbu(cenv, val); + } } void helper_store_atbl(CPUPPCState *env, target_ulong val) @@ -101,17 +138,53 @@ target_ulong helper_load_hdecr(CPUPPCState *env) void helper_store_hdecr(CPUPPCState *env, target_ulong val) { - cpu_ppc_store_hdecr(env, val); + CPUState *cs = env_cpu(env); + CPUState *ccs; + uint32_t nr_threads = cs->nr_threads; + + if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) { + cpu_ppc_store_hdecr(env, val); + return; + } + + THREAD_SIBLING_FOREACH(cs, ccs) { + CPUPPCState *cenv = &POWERPC_CPU(ccs)->env; + cpu_ppc_store_hdecr(cenv, val); + } } void helper_store_vtb(CPUPPCState *env, target_ulong val) { - cpu_ppc_store_vtb(env, val); + CPUState *cs = env_cpu(env); + CPUState *ccs; + uint32_t nr_threads = cs->nr_threads; + + if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) { + cpu_ppc_store_vtb(env, val); + return; + } + + THREAD_SIBLING_FOREACH(cs, ccs) { + CPUPPCState *cenv = &POWERPC_CPU(ccs)->env; + cpu_ppc_store_vtb(cenv, val); + } } void helper_store_tbu40(CPUPPCState *env, target_ulong val) { - cpu_ppc_store_tbu40(env, val); + CPUState *cs = env_cpu(env); + CPUState *ccs; + uint32_t nr_threads = cs->nr_threads; + + if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) { + cpu_ppc_store_tbu40(env, val); + return; + } + + THREAD_SIBLING_FOREACH(cs, ccs) { + CPUPPCState *cenv = &POWERPC_CPU(ccs)->env; + cpu_ppc_store_tbu40(cenv, val); + } } target_ulong helper_load_40x_pit(CPUPPCState *env) @@ -145,15 +218,233 @@ void helper_store_booke_tsr(CPUPPCState *env, target_ulong val) } #if defined(TARGET_PPC64) -/* POWER processor Timebase Facility */ +/* + * POWER processor Timebase Facility + */ + +/* + * The TBST is the timebase state machine, which is a per-core machine that + * is used to synchronize the core TB with the ChipTOD. States 3,4,5 are + * not used in POWER8/9/10. + * + * The state machine gets driven by writes to TFMR SPR from the core, and + * by signals from the ChipTOD. The state machine table for common + * transitions is as follows (according to hardware specs, not necessarily + * this implementation): + * + * | Cur | Event | New | + * +----------------+----------------------------------+-----+ + * | 0 RESET | TFMR |= LOAD_TOD_MOD | 1 | + * | 1 SEND_TOD_MOD | "immediate transition" | 2 | + * | 2 NOT_SET | mttbu/mttbu40/mttbl | 2 | + * | 2 NOT_SET | TFMR |= MOVE_CHIP_TOD_TO_TB | 6 | + * | 6 SYNC_WAIT | "sync pulse from ChipTOD" | 7 | + * | 7 GET_TOD | ChipTOD xscom MOVE_TOD_TO_TB_REG | 8 | + * | 8 TB_RUNNING | mttbu/mttbu40 | 8 | + * | 8 TB_RUNNING | TFMR |= LOAD_TOD_MOD | 1 | + * | 8 TB_RUNNING | mttbl | 9 | + * | 9 TB_ERROR | TFMR |= CLEAR_TB_ERRORS | 0 | + * + * - LOAD_TOD_MOD will also move states 2,6 to state 1, omitted from table + * because it's not a typical init flow. + * + * - The ERROR state can be entered from most/all other states on invalid + * states (e.g., if some TFMR control bit is set from a state where it's + * not listed to cause a transition away from), omitted to avoid clutter. + * + * Note: mttbl causes a timebase error because this inevitably causes + * ticks to be lost and TB to become unsynchronized, whereas TB can be + * adjusted using mttbu* without losing ticks. mttbl behaviour is not + * modelled. + * + * Note: the TB state machine does not actually cause any real TB adjustment! + * TB starts out synchronized across all vCPUs (hardware threads) in + * QMEU, so for now the purpose of the TBST and ChipTOD model is simply + * to step through firmware initialisation sequences. + */ +static unsigned int tfmr_get_tb_state(uint64_t tfmr) +{ + return (tfmr & TFMR_TBST_ENCODED) >> (63 - 31); +} + +static uint64_t tfmr_new_tb_state(uint64_t tfmr, unsigned int tbst) +{ + tfmr &= ~TFMR_TBST_LAST; + tfmr |= (tfmr & TFMR_TBST_ENCODED) >> 4; /* move state to last state */ + tfmr &= ~TFMR_TBST_ENCODED; + tfmr |= (uint64_t)tbst << (63 - 31); /* move new state to state */ + + if (tbst == TBST_TB_RUNNING) { + tfmr |= TFMR_TB_VALID; + } else { + tfmr &= ~TFMR_TB_VALID; + } + + return tfmr; +} + +static void write_tfmr(CPUPPCState *env, target_ulong val) +{ + CPUState *cs = env_cpu(env); + + if (cs->nr_threads == 1) { + env->spr[SPR_TFMR] = val; + } else { + CPUState *ccs; + THREAD_SIBLING_FOREACH(cs, ccs) { + CPUPPCState *cenv = &POWERPC_CPU(ccs)->env; + cenv->spr[SPR_TFMR] = val; + } + } +} + +static void tb_state_machine_step(CPUPPCState *env) +{ + uint64_t tfmr = env->spr[SPR_TFMR]; + unsigned int tbst = tfmr_get_tb_state(tfmr); + + if (!(tfmr & TFMR_TB_ECLIPZ) || tbst == TBST_TB_ERROR) { + return; + } + + if (env->pnv_tod_tbst.tb_sync_pulse_timer) { + env->pnv_tod_tbst.tb_sync_pulse_timer--; + } else { + tfmr |= TFMR_TB_SYNC_OCCURED; + write_tfmr(env, tfmr); + } + + if (env->pnv_tod_tbst.tb_state_timer) { + env->pnv_tod_tbst.tb_state_timer--; + return; + } + + if (tfmr & TFMR_LOAD_TOD_MOD) { + tfmr &= ~TFMR_LOAD_TOD_MOD; + if (tbst == TBST_GET_TOD) { + tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR); + tfmr |= TFMR_FIRMWARE_CONTROL_ERROR; + } else { + tfmr = tfmr_new_tb_state(tfmr, TBST_SEND_TOD_MOD); + /* State seems to transition immediately */ + tfmr = tfmr_new_tb_state(tfmr, TBST_NOT_SET); + } + } else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) { + if (tbst == TBST_SYNC_WAIT) { + tfmr = tfmr_new_tb_state(tfmr, TBST_GET_TOD); + env->pnv_tod_tbst.tb_state_timer = 3; + } else if (tbst == TBST_GET_TOD) { + if (env->pnv_tod_tbst.tod_sent_to_tb) { + tfmr = tfmr_new_tb_state(tfmr, TBST_TB_RUNNING); + tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB; + env->pnv_tod_tbst.tb_ready_for_tod = 0; + env->pnv_tod_tbst.tod_sent_to_tb = 0; + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB " + "state machine in invalid state 0x%x\n", tbst); + tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR); + tfmr |= TFMR_FIRMWARE_CONTROL_ERROR; + env->pnv_tod_tbst.tb_ready_for_tod = 0; + } + } + + write_tfmr(env, tfmr); +} + target_ulong helper_load_tfmr(CPUPPCState *env) { - return env->spr[SPR_TFMR]; + tb_state_machine_step(env); + + return env->spr[SPR_TFMR] | TFMR_TB_ECLIPZ; } void helper_store_tfmr(CPUPPCState *env, target_ulong val) { - env->spr[SPR_TFMR] = val; + uint64_t tfmr = env->spr[SPR_TFMR]; + uint64_t clear_on_write; + unsigned int tbst = tfmr_get_tb_state(tfmr); + + if (!(val & TFMR_TB_ECLIPZ)) { + qemu_log_mask(LOG_UNIMP, "TFMR non-ECLIPZ mode not implemented\n"); + tfmr &= ~TFMR_TBST_ENCODED; + tfmr &= ~TFMR_TBST_LAST; + goto out; + } + + /* Update control bits */ + tfmr = (tfmr & ~TFMR_CONTROL_MASK) | (val & TFMR_CONTROL_MASK); + + /* Several bits are clear-on-write, only one is implemented so far */ + clear_on_write = val & TFMR_FIRMWARE_CONTROL_ERROR; + tfmr &= ~clear_on_write; + + /* + * mtspr always clears this. The sync pulse timer makes it come back + * after the second mfspr. + */ + tfmr &= ~TFMR_TB_SYNC_OCCURED; + env->pnv_tod_tbst.tb_sync_pulse_timer = 1; + + if (ppc_cpu_tir(env_archcpu(env)) != 0 && + (val & (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB))) { + qemu_log_mask(LOG_UNIMP, "TFMR timebase state machine can only be " + "driven by thread 0\n"); + goto out; + } + + if (((tfmr | val) & (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) == + (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) { + qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: LOAD_TOD_MOD and " + "MOVE_CHIP_TOD_TO_TB both set\n"); + tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR); + tfmr |= TFMR_FIRMWARE_CONTROL_ERROR; + env->pnv_tod_tbst.tb_ready_for_tod = 0; + goto out; + } + + if (tfmr & TFMR_CLEAR_TB_ERRORS) { + /* + * Workbook says TFMR_CLEAR_TB_ERRORS should be written twice. + * This is not simulated/required here. + */ + tfmr = tfmr_new_tb_state(tfmr, TBST_RESET); + tfmr &= ~TFMR_CLEAR_TB_ERRORS; + tfmr &= ~TFMR_LOAD_TOD_MOD; + tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB; + tfmr &= ~TFMR_FIRMWARE_CONTROL_ERROR; /* XXX: should this be cleared? */ + env->pnv_tod_tbst.tb_ready_for_tod = 0; + env->pnv_tod_tbst.tod_sent_to_tb = 0; + goto out; + } + + if (tbst == TBST_TB_ERROR) { + qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: mtspr TFMR in TB_ERROR" + " state\n"); + tfmr |= TFMR_FIRMWARE_CONTROL_ERROR; + return; + } + + if (tfmr & TFMR_LOAD_TOD_MOD) { + /* Wait for an arbitrary 3 mfspr until the next state transition. */ + env->pnv_tod_tbst.tb_state_timer = 3; + } else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) { + if (tbst == TBST_NOT_SET) { + tfmr = tfmr_new_tb_state(tfmr, TBST_SYNC_WAIT); + env->pnv_tod_tbst.tb_ready_for_tod = 1; + env->pnv_tod_tbst.tb_state_timer = 3; /* arbitrary */ + } else { + qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB " + "not in TB not set state 0x%x\n", + tbst); + tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR); + tfmr |= TFMR_FIRMWARE_CONTROL_ERROR; + env->pnv_tod_tbst.tb_ready_for_tod = 0; + } + } + +out: + write_tfmr(env, tfmr); } #endif @@ -173,9 +464,9 @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn) } else { int ret; - qemu_mutex_lock_iothread(); + bql_lock(); ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (unlikely(ret != 0)) { qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); @@ -196,9 +487,9 @@ void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val) POWERPC_EXCP_INVAL_INVAL, GETPC()); } else { int ret; - qemu_mutex_lock_iothread(); + bql_lock(); ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (unlikely(ret != 0)) { qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 329da4d518e..93ffec787c8 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -247,13 +247,24 @@ static inline bool gen_serialize(DisasContext *ctx) return true; } -#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) +#if !defined(CONFIG_USER_ONLY) +#if defined(TARGET_PPC64) +static inline bool gen_serialize_core(DisasContext *ctx) +{ + if (ctx->flags & POWERPC_FLAG_SMT) { + return gen_serialize(ctx); + } + return true; +} +#endif + static inline bool gen_serialize_core_lpar(DisasContext *ctx) { +#if defined(TARGET_PPC64) if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) { return gen_serialize(ctx); } - +#endif return true; } #endif @@ -285,33 +296,26 @@ static inline void gen_update_nip(DisasContext *ctx, target_ulong nip) tcg_gen_movi_tl(cpu_nip, nip); } -static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) +static void gen_exception_err_nip(DisasContext *ctx, uint32_t excp, + uint32_t error, target_ulong nip) { TCGv_i32 t0, t1; - /* - * These are all synchronous exceptions, we set the PC back to the - * faulting instruction - */ - gen_update_nip(ctx, ctx->cia); + gen_update_nip(ctx, nip); t0 = tcg_constant_i32(excp); t1 = tcg_constant_i32(error); gen_helper_raise_exception_err(tcg_env, t0, t1); ctx->base.is_jmp = DISAS_NORETURN; } -static void gen_exception(DisasContext *ctx, uint32_t excp) +static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, + uint32_t error) { - TCGv_i32 t0; - /* * These are all synchronous exceptions, we set the PC back to the * faulting instruction */ - gen_update_nip(ctx, ctx->cia); - t0 = tcg_constant_i32(excp); - gen_helper_raise_exception(tcg_env, t0); - ctx->base.is_jmp = DISAS_NORETURN; + gen_exception_err_nip(ctx, excp, error, ctx->cia); } static void gen_exception_nip(DisasContext *ctx, uint32_t excp, @@ -325,6 +329,15 @@ static void gen_exception_nip(DisasContext *ctx, uint32_t excp, ctx->base.is_jmp = DISAS_NORETURN; } +static inline void gen_exception(DisasContext *ctx, uint32_t excp) +{ + /* + * These are all synchronous exceptions, we set the PC back to the + * faulting instruction + */ + gen_exception_nip(ctx, excp, ctx->cia); +} + #if !defined(CONFIG_USER_ONLY) static void gen_ppc_maybe_interrupt(DisasContext *ctx) { @@ -667,12 +680,20 @@ void spr_read_atbu(DisasContext *ctx, int gprn, int sprn) #if !defined(CONFIG_USER_ONLY) void spr_write_tbl(DisasContext *ctx, int sprn, int gprn) { + if (!gen_serialize_core_lpar(ctx)) { + return; + } + translator_io_start(&ctx->base); gen_helper_store_tbl(tcg_env, cpu_gpr[gprn]); } void spr_write_tbu(DisasContext *ctx, int sprn, int gprn) { + if (!gen_serialize_core_lpar(ctx)) { + return; + } + translator_io_start(&ctx->base); gen_helper_store_tbu(tcg_env, cpu_gpr[gprn]); } @@ -696,6 +717,9 @@ void spr_read_purr(DisasContext *ctx, int gprn, int sprn) void spr_write_purr(DisasContext *ctx, int sprn, int gprn) { + if (!gen_serialize_core_lpar(ctx)) { + return; + } translator_io_start(&ctx->base); gen_helper_store_purr(tcg_env, cpu_gpr[gprn]); } @@ -709,6 +733,9 @@ void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn) void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn) { + if (!gen_serialize_core_lpar(ctx)) { + return; + } translator_io_start(&ctx->base); gen_helper_store_hdecr(tcg_env, cpu_gpr[gprn]); } @@ -721,12 +748,18 @@ void spr_read_vtb(DisasContext *ctx, int gprn, int sprn) void spr_write_vtb(DisasContext *ctx, int sprn, int gprn) { + if (!gen_serialize_core_lpar(ctx)) { + return; + } translator_io_start(&ctx->base); gen_helper_store_vtb(tcg_env, cpu_gpr[gprn]); } void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn) { + if (!gen_serialize_core_lpar(ctx)) { + return; + } translator_io_start(&ctx->base); gen_helper_store_tbu40(tcg_env, cpu_gpr[gprn]); } @@ -1220,11 +1253,18 @@ void spr_write_hmer(DisasContext *ctx, int sprn, int gprn) void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn) { + /* Reading TFMR can cause it to be updated, so serialize threads here too */ + if (!gen_serialize_core(ctx)) { + return; + } gen_helper_load_tfmr(cpu_gpr[gprn], tcg_env); } void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn) { + if (!gen_serialize_core(ctx)) { + return; + } gen_helper_store_tfmr(tcg_env, cpu_gpr[gprn]); } @@ -1697,61 +1737,6 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_mov_tl(ret, t0); } } -/* Add functions with two operands */ -#define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ - ca, glue(ca, 32), \ - add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ -} -/* Add functions with one operand and one immediate */ -#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \ - add_ca, compute_ca, compute_ov) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv t0 = tcg_constant_tl(const_val); \ - gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ - cpu_gpr[rA(ctx->opcode)], t0, \ - ca, glue(ca, 32), \ - add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ -} - -/* add add. addo addo. */ -GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0) -GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1) -/* addc addc. addco addco. */ -GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0) -GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1) -/* adde adde. addeo addeo. */ -GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0) -GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1) -/* addme addme. addmeo addmeo. */ -GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0) -GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1) -/* addex */ -GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0); -/* addze addze. addzeo addzeo.*/ -GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0) -GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1) -/* addic addic.*/ -static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0) -{ - TCGv c = tcg_constant_tl(SIMM(ctx->opcode)); - gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], - c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0); -} - -static void gen_addic(DisasContext *ctx) -{ - gen_op_addic(ctx, 0); -} - -static void gen_addic_(DisasContext *ctx) -{ - gen_op_addic(ctx, 1); -} static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, int sign, int compute_ov) @@ -2172,47 +2157,6 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_mov_tl(ret, t0); } } -/* Sub functions with Two operands functions */ -#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ - add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ -} -/* Sub functions with one operand and one immediate */ -#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ - add_ca, compute_ca, compute_ov) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv t0 = tcg_constant_tl(const_val); \ - gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ - cpu_gpr[rA(ctx->opcode)], t0, \ - add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ -} -/* subf subf. subfo subfo. */ -GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) -GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) -/* subfc subfc. subfco subfco. */ -GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) -GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) -/* subfe subfe. subfeo subfo. */ -GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) -GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) -/* subfme subfme. subfmeo subfmeo. */ -GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) -GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) -/* subfze subfze. subfzeo subfzeo.*/ -GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) -GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) - -/* subfic */ -static void gen_subfic(DisasContext *ctx) -{ - TCGv c = tcg_constant_tl(SIMM(ctx->opcode)); - gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], - c, 0, 1, 0, 0); -} /* neg neg. nego nego. */ static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) @@ -6486,8 +6430,6 @@ GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300), GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL), -GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), -GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER), GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER), GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER), @@ -6498,7 +6440,6 @@ GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B), #endif GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER), GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER), -GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER), @@ -6709,25 +6650,6 @@ GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE, GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), #endif -#undef GEN_INT_ARITH_ADD -#undef GEN_INT_ARITH_ADD_CONST -#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \ -GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER), -#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \ - add_ca, compute_ca, compute_ov) \ -GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER), -GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0) -GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1) -GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0) -GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1) -GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0) -GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1) -GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0) -GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1) -GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300), -GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0) -GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1) - #undef GEN_INT_ARITH_DIVW #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER) @@ -6766,24 +6688,6 @@ GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02), GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17), #endif -#undef GEN_INT_ARITH_SUBF -#undef GEN_INT_ARITH_SUBF_CONST -#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ -GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER), -#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ - add_ca, compute_ca, compute_ov) \ -GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER), -GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) -GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) -GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) -GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) -GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) -GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) -GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) -GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) -GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) -GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) - #undef GEN_LOGICAL1 #undef GEN_LOGICAL2 #define GEN_LOGICAL2(name, tcg_op, opc, type) \ @@ -7518,7 +7422,7 @@ static const TranslatorOps ppc_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc index 51c6fa73300..0c66465d967 100644 --- a/target/ppc/translate/fixedpoint-impl.c.inc +++ b/target/ppc/translate/fixedpoint-impl.c.inc @@ -325,6 +325,76 @@ static bool trans_ADDPCIS(DisasContext *ctx, arg_DX *a) return true; } +static bool trans_ADDEX(DisasContext *ctx, arg_X *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb], + cpu_ov, cpu_ov32, true, true, false, false); + return true; +} + +static bool do_add_D(DisasContext *ctx, arg_D *a, bool add_ca, bool compute_ca, + bool compute_ov, bool compute_rc0) +{ + gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], + tcg_constant_tl(a->si), cpu_ca, cpu_ca32, + add_ca, compute_ca, compute_ov, compute_rc0); + return true; +} + +static bool do_add_XO(DisasContext *ctx, arg_XO *a, bool add_ca, + bool compute_ca) +{ + gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb], + cpu_ca, cpu_ca32, add_ca, compute_ca, a->oe, a->rc); + return true; +} + +static bool do_add_const_XO(DisasContext *ctx, arg_XO_ta *a, TCGv const_val, + bool add_ca, bool compute_ca) +{ + gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], const_val, + cpu_ca, cpu_ca32, add_ca, compute_ca, a->oe, a->rc); + return true; +} + +TRANS(ADD, do_add_XO, false, false); +TRANS(ADDC, do_add_XO, false, true); +TRANS(ADDE, do_add_XO, true, true); +TRANS(ADDME, do_add_const_XO, tcg_constant_tl(-1LL), true, true); +TRANS(ADDZE, do_add_const_XO, tcg_constant_tl(0), true, true); +TRANS(ADDIC, do_add_D, false, true, false, false); +TRANS(ADDIC_, do_add_D, false, true, false, true); + +static bool trans_SUBFIC(DisasContext *ctx, arg_D *a) +{ + gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], + tcg_constant_tl(a->si), false, true, false, false); + return true; +} + +static bool do_subf_XO(DisasContext *ctx, arg_XO *a, bool add_ca, + bool compute_ca) +{ + gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb], + add_ca, compute_ca, a->oe, a->rc); + return true; +} + +static bool do_subf_const_XO(DisasContext *ctx, arg_XO_ta *a, TCGv const_val, + bool add_ca, bool compute_ca) +{ + gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], const_val, + add_ca, compute_ca, a->oe, a->rc); + return true; +} + +TRANS(SUBF, do_subf_XO, false, false) +TRANS(SUBFC, do_subf_XO, false, true) +TRANS(SUBFE, do_subf_XO, true, true) +TRANS(SUBFME, do_subf_const_XO, tcg_constant_tl(-1LL), true, true) +TRANS(SUBFZE, do_subf_const_XO, tcg_constant_tl(0), true, true) + static bool trans_INVALID(DisasContext *ctx, arg_INVALID *a) { gen_invalid(ctx); diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 4b91c3489d4..b56e615c247 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -1183,7 +1183,7 @@ static void glue(gen_, name)(DisasContext *ctx) \ /* * Support for Altivec instructions that use bit 31 (Rc) as an opcode - * bit but also use bit 21 as an actual Rc bit. In general, thse pairs + * bit but also use bit 21 as an actual Rc bit. In general, these pairs * come from different versions of the ISA, so we must also support a * pair of flags for each instruction. */ diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c index 7ff76f7a061..a4d07a0d0dd 100644 --- a/target/ppc/user_only_helper.c +++ b/target/ppc/user_only_helper.c @@ -27,8 +27,7 @@ void ppc_cpu_record_sigsegv(CPUState *cs, vaddr address, MMUAccessType access_type, bool maperr, uintptr_t retaddr) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; + CPUPPCState *env = cpu_env(cs); int exception, error_code; /* diff --git a/target/riscv/Kconfig b/target/riscv/Kconfig index b9e5932f13f..adb7de3f37d 100644 --- a/target/riscv/Kconfig +++ b/target/riscv/Kconfig @@ -1,5 +1,7 @@ config RISCV32 bool + select ARM_COMPATIBLE_SEMIHOSTING # for do_common_semihosting() config RISCV64 bool + select ARM_COMPATIBLE_SEMIHOSTING # for do_common_semihosting() diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h index 91b3361decc..3670cfe6d9a 100644 --- a/target/riscv/cpu-qom.h +++ b/target/riscv/cpu-qom.h @@ -23,6 +23,8 @@ #define TYPE_RISCV_CPU "riscv-cpu" #define TYPE_RISCV_DYNAMIC_CPU "riscv-dynamic-cpu" +#define TYPE_RISCV_VENDOR_CPU "riscv-vendor-cpu" +#define TYPE_RISCV_BARE_CPU "riscv-bare-cpu" #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX) @@ -32,6 +34,12 @@ #define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") #define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") #define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128") +#define TYPE_RISCV_CPU_RV32I RISCV_CPU_TYPE_NAME("rv32i") +#define TYPE_RISCV_CPU_RV32E RISCV_CPU_TYPE_NAME("rv32e") +#define TYPE_RISCV_CPU_RV64I RISCV_CPU_TYPE_NAME("rv64i") +#define TYPE_RISCV_CPU_RV64E RISCV_CPU_TYPE_NAME("rv64e") +#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64") +#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64") #define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex") #define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c") #define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index d5fa0f33150..0519e69c736 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -29,8 +29,10 @@ #include "qapi/visitor.h" #include "qemu/error-report.h" #include "hw/qdev-properties.h" +#include "hw/core/qdev-prop-internal.h" #include "migration/vmstate.h" #include "fpu/softfloat-helpers.h" +#include "sysemu/device_tree.h" #include "sysemu/kvm.h" #include "sysemu/tcg.h" #include "kvm/kvm_riscv.h" @@ -41,9 +43,9 @@ #include "RuntimeCommon.h" /* RISC-V CPU definitions */ -static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; +static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, - RVC, RVS, RVU, RVH, RVJ, RVG, 0}; + RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; /* * From vector_helper.c @@ -56,6 +58,25 @@ const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, #define BYTE(x) (x) #endif +bool riscv_cpu_is_32bit(RISCVCPU *cpu) +{ + return riscv_cpu_mxl(&cpu->env) == MXL_RV32; +} + +/* Hash that stores general user set numeric options */ +static GHashTable *general_user_opts; + +static void cpu_option_add_user_setting(const char *optname, uint32_t value) +{ + g_hash_table_insert(general_user_opts, (gpointer)optname, + GUINT_TO_POINTER(value)); +} + +bool riscv_cpu_option_set(const char *optname) +{ + return g_hash_table_contains(general_user_opts, optname); +} + #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} @@ -80,8 +101,14 @@ const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, * instead. */ const RISCVIsaExtData isa_edata_arr[] = { + ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), + ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), + ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), + ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), + ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), + ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), @@ -90,6 +117,10 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), + ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), + ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), + ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), + ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), @@ -121,6 +152,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), + ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), @@ -150,8 +182,13 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), + ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), + ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), + ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), + ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), + ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), @@ -186,6 +223,11 @@ void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) *ext_enabled = en; } +bool riscv_cpu_is_vendor(Object *cpu_obj) +{ + return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; +} + const char * const riscv_int_regnames[] = { "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", @@ -277,12 +319,16 @@ const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) } } -void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) +void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) { - env->misa_mxl_max = env->misa_mxl = mxl; env->misa_ext_mask = env->misa_ext = ext; } +int riscv_cpu_max_xlen(RISCVCPUClass *mcc) +{ + return 16 << mcc->misa_mxl_max; +} + #ifndef CONFIG_USER_ONLY static uint8_t satp_mode_from_str(const char *satp_mode_str) { @@ -373,6 +419,17 @@ static void set_satp_mode_max_supported(RISCVCPU *cpu, /* Set the satp mode to the max supported */ static void set_satp_mode_default_map(RISCVCPU *cpu) { + /* + * Bare CPUs do not default to the max available. + * Users must set a valid satp_mode in the command + * line. + */ + if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { + warn_report("No satp mode set. Defaulting to 'bare'"); + cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); + return; + } + cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; } #endif @@ -381,11 +438,7 @@ static void riscv_any_cpu_init(Object *obj) { RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; -#if defined(TARGET_RISCV32) - riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); -#elif defined(TARGET_RISCV64) - riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); -#endif + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(RISCV_CPU(obj), @@ -406,25 +459,29 @@ static void riscv_max_cpu_init(Object *obj) { RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; - RISCVMXL mlx = MXL_RV64; -#ifdef TARGET_RISCV32 - mlx = MXL_RV32; -#endif - riscv_cpu_set_misa(env, mlx, 0); + cpu->cfg.mmu = true; + cpu->cfg.pmp = true; + env->priv_ver = PRIV_VERSION_LATEST; #ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ? - VM_1_10_SV32 : VM_1_10_SV57); +#ifdef TARGET_RISCV32 + set_satp_mode_max_supported(cpu, VM_1_10_SV32); +#else + set_satp_mode_max_supported(cpu, VM_1_10_SV57); +#endif #endif } #if defined(TARGET_RISCV64) static void rv64_base_cpu_init(Object *obj) { - CPURISCVState *env = &RISCV_CPU(obj)->env; - /* We set this in the realise function */ - riscv_cpu_set_misa(env, MXL_RV64, 0); + RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + + cpu->cfg.mmu = true; + cpu->cfg.pmp = true; + /* Set latest version of privileged specification */ env->priv_ver = PRIV_VERSION_LATEST; #ifndef CONFIG_USER_ONLY @@ -436,8 +493,7 @@ static void rv64_sifive_u_cpu_init(Object *obj) { RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; - riscv_cpu_set_misa(env, MXL_RV64, - RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); @@ -455,7 +511,7 @@ static void rv64_sifive_e_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(cpu, VM_1_10_MBARE); @@ -472,7 +528,7 @@ static void rv64_thead_c906_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); + riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); env->priv_ver = PRIV_VERSION_1_11_0; cpu->cfg.ext_zfa = true; @@ -503,7 +559,7 @@ static void rv64_veyron_v1_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); + riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); env->priv_ver = PRIV_VERSION_1_12_0; /* Enable ISA extensions */ @@ -540,27 +596,46 @@ static void rv64_veyron_v1_cpu_init(Object *obj) static void rv128_base_cpu_init(Object *obj) { + RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + if (qemu_tcg_mttcg_enabled()) { /* Missing 128-bit aligned atomics */ error_report("128-bit RISC-V currently does not work with Multi " "Threaded TCG. Please use: -accel tcg,thread=single"); exit(EXIT_FAILURE); } - CPURISCVState *env = &RISCV_CPU(obj)->env; - /* We set this in the realise function */ - riscv_cpu_set_misa(env, MXL_RV128, 0); + + cpu->cfg.mmu = true; + cpu->cfg.pmp = true; + /* Set latest version of privileged specification */ env->priv_ver = PRIV_VERSION_LATEST; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); #endif } + +static void rv64i_bare_cpu_init(Object *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + riscv_cpu_set_misa_ext(env, RVI); +} + +static void rv64e_bare_cpu_init(Object *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + riscv_cpu_set_misa_ext(env, RVE); +} #else static void rv32_base_cpu_init(Object *obj) { - CPURISCVState *env = &RISCV_CPU(obj)->env; - /* We set this in the realise function */ - riscv_cpu_set_misa(env, MXL_RV32, 0); + RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + + cpu->cfg.mmu = true; + cpu->cfg.pmp = true; + /* Set latest version of privileged specification */ env->priv_ver = PRIV_VERSION_LATEST; #ifndef CONFIG_USER_ONLY @@ -572,8 +647,7 @@ static void rv32_sifive_u_cpu_init(Object *obj) { RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; - riscv_cpu_set_misa(env, MXL_RV32, - RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); @@ -591,7 +665,7 @@ static void rv32_sifive_e_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(cpu, VM_1_10_MBARE); @@ -608,7 +682,7 @@ static void rv32_ibex_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); env->priv_ver = PRIV_VERSION_1_12_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(cpu, VM_1_10_MBARE); @@ -625,7 +699,7 @@ static void rv32_imafcu_nommu_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(cpu, VM_1_10_MBARE); @@ -636,6 +710,18 @@ static void rv32_imafcu_nommu_cpu_init(Object *obj) cpu->cfg.ext_zicsr = true; cpu->cfg.pmp = true; } + +static void rv32i_bare_cpu_init(Object *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + riscv_cpu_set_misa_ext(env, RVI); +} + +static void rv32e_bare_cpu_init(Object *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + riscv_cpu_set_misa_ext(env, RVE); +} #endif static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) @@ -649,9 +735,7 @@ static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_strfreev(cpuname); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU)) { - return NULL; - } + return oc; } @@ -662,8 +746,7 @@ char *riscv_cpu_get_name(RISCVCPU *cpu) g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); - return g_strndup(typename, - strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX)); + return cpu_model_from_type(typename); } static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) @@ -779,7 +862,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) csr_ops[csrno].name, val); } } - uint16_t vlenb = cpu->cfg.vlen >> 3; + uint16_t vlenb = cpu->cfg.vlenb; for (i = 0; i < 32; i++) { qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); @@ -833,6 +916,11 @@ static bool riscv_cpu_has_work(CPUState *cs) #endif } +static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return riscv_env_mmu_index(cpu_env(cs), ifetch); +} + static void riscv_cpu_reset_hold(Object *obj) { #ifndef CONFIG_USER_ONLY @@ -841,14 +929,14 @@ static void riscv_cpu_reset_hold(Object *obj) #endif CPUState *cs = CPU(obj); RISCVCPU *cpu = RISCV_CPU(cs); - RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); CPURISCVState *env = &cpu->env; if (mcc->parent_phases.hold) { mcc->parent_phases.hold(obj); } #ifndef CONFIG_USER_ONLY - env->misa_mxl = env->misa_mxl_max; + env->misa_mxl = mcc->misa_mxl_max; env->priv = PRV_M; env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); if (env->misa_mxl > MXL_RV32) { @@ -876,9 +964,9 @@ static void riscv_cpu_reset_hold(Object *obj) env->two_stage_lookup = false; env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | - (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); - env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | - (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); + (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? + MENVCFG_ADUE : 0); + env->henvcfg = 0; /* Initialized default priorities of local interrupts. */ for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { @@ -897,6 +985,14 @@ static void riscv_cpu_reset_hold(Object *obj) /* mmte is supposed to have pm.current hardwired to 1 */ env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); + /* + * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor + * extension is enabled. + */ + if (riscv_has_ext(env, RVH)) { + env->mideleg |= HS_MODE_INTERRUPTS; + } + /* * Clear mseccfg and unlock all the PMP entries upon reset. * This is allowed as per the priv and smepmp specifications @@ -949,7 +1045,7 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) #ifndef CONFIG_USER_ONLY static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) { - bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; + bool rv32 = riscv_cpu_is_32bit(cpu); uint8_t satp_mode_map_max, satp_mode_supported_max; /* The CPU wants the OS to decide which satp mode to use */ @@ -1025,19 +1121,6 @@ void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) { Error *local_err = NULL; - /* - * KVM accel does not have a specialized finalize() - * callback because its extensions are validated - * in the get()/set() callbacks of each property. - */ - if (tcg_enabled()) { - riscv_tcg_cpu_finalize_features(cpu, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return; - } - } - #ifndef CONFIG_USER_ONLY riscv_cpu_satp_mode_finalize(cpu, &local_err); if (local_err != NULL) { @@ -1045,6 +1128,20 @@ void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) return; } #endif + + if (tcg_enabled()) { + riscv_tcg_cpu_finalize_features(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + } else if (kvm_enabled()) { + riscv_kvm_cpu_finalize_features(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + } } static void riscv_cpu_realize(DeviceState *dev, Error **errp) @@ -1216,15 +1313,21 @@ static void riscv_cpu_post_init(Object *obj) static void riscv_cpu_init(Object *obj) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + + env->misa_mxl = mcc->misa_mxl_max; + memset(cpu->env_exprs, 0, sizeof(cpu->env_exprs)); _sym_register_expression_region(cpu->env_exprs, sizeof(cpu->env_exprs)); - #ifndef CONFIG_USER_ONLY qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); #endif /* CONFIG_USER_ONLY */ + general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); + /* * The timer and performance counters extensions were supported * in QEMU before they were added as discrete extensions in the @@ -1234,6 +1337,43 @@ static void riscv_cpu_init(Object *obj) */ RISCV_CPU(obj)->cfg.ext_zicntr = true; RISCV_CPU(obj)->cfg.ext_zihpm = true; + + /* Default values for non-bool cpu properties */ + cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); + cpu->cfg.vlenb = 128 >> 3; + cpu->cfg.elen = 64; + cpu->cfg.cbom_blocksize = 64; + cpu->cfg.cbop_blocksize = 64; + cpu->cfg.cboz_blocksize = 64; + cpu->env.vext_ver = VEXT_VERSION_1_00_0; +} + +static void riscv_bare_cpu_init(Object *obj) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + + /* + * Bare CPUs do not inherit the timer and performance + * counters from the parent class (see riscv_cpu_init() + * for info on why the parent enables them). + * + * Users have to explicitly enable these counters for + * bare CPUs. + */ + cpu->cfg.ext_zicntr = false; + cpu->cfg.ext_zihpm = false; + + /* Set to QEMU's first supported priv version */ + cpu->env.priv_ver = PRIV_VERSION_1_10_0; + + /* + * Support all available satp_mode settings. The default + * value will be set to MBARE if the user doesn't set + * satp_mode manually (see set_satp_mode_default()). + */ +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_SV64); +#endif } typedef struct misa_ext_info { @@ -1261,8 +1401,29 @@ static const MISAExtInfo misa_ext_info_arr[] = { MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), MISA_EXT_INFO(RVV, "v", "Vector operations"), MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), + MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") }; +static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) +{ + CPUClass *cc = CPU_CLASS(mcc); + + /* Validate that MISA_MXL is set properly. */ + switch (mcc->misa_mxl_max) { +#ifdef TARGET_RISCV64 + case MXL_RV64: + case MXL_RV128: + cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; + break; +#endif + case MXL_RV32: + cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; + break; + default: + g_assert_not_reached(); + } +} + static int riscv_validate_misa_info_idx(uint32_t bit) { int idx; @@ -1307,17 +1468,28 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), + MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), + MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), + MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), + MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), + MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), + MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), + MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), + MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), + MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), + MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), + MULTI_EXT_CFG_BOOL("svade", ext_svade, false), MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), @@ -1343,6 +1515,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), + MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), @@ -1350,6 +1523,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), + MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), @@ -1403,15 +1577,18 @@ const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { /* These are experimental so mark with 'x-' */ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { - MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), - MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), - - MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), - MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), + DEFINE_PROP_END_OF_LIST(), +}; - MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), - MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), - MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), +/* + * 'Named features' is the name we give to extensions that we + * don't want to expose to users. They are either immutable + * (always enabled/disable) or they'll vary depending on + * the resulting CPU state. They have riscv,isa strings + * and priv_ver like regular extensions. + */ +const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { + MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), DEFINE_PROP_END_OF_LIST(), }; @@ -1433,26 +1610,46 @@ const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { DEFINE_PROP_END_OF_LIST(), }; +static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, + Error **errp) +{ + g_autofree char *cpuname = riscv_cpu_get_name(cpu); + error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", + cpuname, propname); +} + static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { RISCVCPU *cpu = RISCV_CPU(obj); - uint8_t pmu_num; + uint8_t pmu_num, curr_pmu_num; + uint32_t pmu_mask; visit_type_uint8(v, name, &pmu_num, errp); + curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); + + if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, curr_pmu_num); + return; + } + if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { error_setg(errp, "Number of counters exceeds maximum available"); return; } if (pmu_num == 0) { - cpu->cfg.pmu_mask = 0; + pmu_mask = 0; } else { - cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, pmu_num); + pmu_mask = MAKE_64BIT_MASK(3, pmu_num); } warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); + cpu->cfg.pmu_mask = pmu_mask; + cpu_option_add_user_setting("pmu-mask", pmu_mask); } static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, @@ -1464,100 +1661,403 @@ static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, visit_type_uint8(v, name, &pmu_num, errp); } -const PropertyInfo prop_pmu_num = { +static const PropertyInfo prop_pmu_num = { .name = "pmu-num", .get = prop_pmu_num_get, .set = prop_pmu_num_set, }; -Property riscv_cpu_options[] = { - DEFINE_PROP_UINT32("pmu-mask", RISCVCPU, cfg.pmu_mask, MAKE_64BIT_MASK(3, 16)), - {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ +static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint32_t value; + uint8_t pmu_num; - DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), - DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), + visit_type_uint32(v, name, &value, errp); - DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), - DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), + if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %x\n", + name, cpu->cfg.pmu_mask); + return; + } - DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), - DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), + pmu_num = ctpop32(value); - DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), - DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), + if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { + error_setg(errp, "Number of counters exceeds maximum available"); + return; + } - DEFINE_PROP_END_OF_LIST(), + cpu_option_add_user_setting(name, value); + cpu->cfg.pmu_mask = value; +} + +static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; + + visit_type_uint8(v, name, &pmu_mask, errp); +} + +static const PropertyInfo prop_pmu_mask = { + .name = "pmu-mask", + .get = prop_pmu_mask_get, + .set = prop_pmu_mask_set, }; -static Property riscv_cpu_properties[] = { - DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), +static void prop_mmu_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + bool value; -#ifndef CONFIG_USER_ONLY - DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), -#endif + visit_type_bool(v, name, &value, errp); - DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), + if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, "mmu", errp); + return; + } - DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), - DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), + cpu_option_add_user_setting(name, value); + cpu->cfg.mmu = value; +} - /* - * write_misa() is marked as experimental for now so mark - * it with -x and default to 'false'. - */ - DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), - DEFINE_PROP_END_OF_LIST(), +static void prop_mmu_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + bool value = RISCV_CPU(obj)->cfg.mmu; + + visit_type_bool(v, name, &value, errp); +} + +static const PropertyInfo prop_mmu = { + .name = "mmu", + .get = prop_mmu_get, + .set = prop_mmu_set, }; -static const gchar *riscv_gdb_arch_name(CPUState *cs) +static void prop_pmp_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { - RISCVCPU *cpu = RISCV_CPU(cs); - CPURISCVState *env = &cpu->env; + RISCVCPU *cpu = RISCV_CPU(obj); + bool value; - switch (riscv_cpu_mxl(env)) { - case MXL_RV32: - return "riscv:rv32"; - case MXL_RV64: - case MXL_RV128: - return "riscv:rv64"; + visit_type_bool(v, name, &value, errp); + + if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.pmp = value; +} + +static void prop_pmp_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + bool value = RISCV_CPU(obj)->cfg.pmp; + + visit_type_bool(v, name, &value, errp); +} + +static const PropertyInfo prop_pmp = { + .name = "pmp", + .get = prop_pmp_get, + .set = prop_pmp_set, +}; + +static int priv_spec_from_str(const char *priv_spec_str) +{ + int priv_version = -1; + + if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { + priv_version = PRIV_VERSION_1_12_0; + } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { + priv_version = PRIV_VERSION_1_11_0; + } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { + priv_version = PRIV_VERSION_1_10_0; + } + + return priv_version; +} + +static const char *priv_spec_to_str(int priv_version) +{ + switch (priv_version) { + case PRIV_VERSION_1_10_0: + return PRIV_VER_1_10_0_STR; + case PRIV_VERSION_1_11_0: + return PRIV_VER_1_11_0_STR; + case PRIV_VERSION_1_12_0: + return PRIV_VER_1_12_0_STR; default: - g_assert_not_reached(); + return NULL; } } -static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) +static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = RISCV_CPU(obj); + g_autofree char *value = NULL; + int priv_version = -1; + + visit_type_str(v, name, &value, errp); - if (strcmp(xmlname, "riscv-csr.xml") == 0) { - return cpu->dyn_csr_xml; - } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { - return cpu->dyn_vreg_xml; + priv_version = priv_spec_from_str(value); + if (priv_version < 0) { + error_setg(errp, "Unsupported privilege spec version '%s'", value); + return; + } + + if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %s\n", name, + object_property_get_str(obj, name, NULL)); + return; } - return NULL; + cpu_option_add_user_setting(name, priv_version); + cpu->env.priv_ver = priv_version; } -#ifndef CONFIG_USER_ONLY -static int64_t riscv_get_arch_id(CPUState *cs) +static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = RISCV_CPU(obj); + const char *value = priv_spec_to_str(cpu->env.priv_ver); - return cpu->env.mhartid; + visit_type_str(v, name, (char **)&value, errp); } -#include "hw/core/sysemu-cpu-ops.h" +static const PropertyInfo prop_priv_spec = { + .name = "priv_spec", + .get = prop_priv_spec_get, + .set = prop_priv_spec_set, +}; -static const struct SysemuCPUOps riscv_sysemu_ops = { - .get_phys_page_debug = riscv_cpu_get_phys_page_debug, - .write_elf64_note = riscv_cpu_write_elf64_note, - .write_elf32_note = riscv_cpu_write_elf32_note, - .legacy_vmsd = &vmstate_riscv_cpu, +static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + g_autofree char *value = NULL; + + visit_type_str(v, name, &value, errp); + + if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { + error_setg(errp, "Unsupported vector spec version '%s'", value); + return; + } + + cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); + cpu->env.vext_ver = VEXT_VERSION_1_00_0; +} + +static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + const char *value = VEXT_VER_1_00_0_STR; + + visit_type_str(v, name, (char **)&value, errp); +} + +static const PropertyInfo prop_vext_spec = { + .name = "vext_spec", + .get = prop_vext_spec_get, + .set = prop_vext_spec_set, +}; + +static void prop_vlen_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (!is_power_of_2(value)) { + error_setg(errp, "Vector extension VLEN must be power of 2"); + return; + } + + if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, cpu->cfg.vlenb << 3); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.vlenb = value >> 3; +} + +static void prop_vlen_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; + + visit_type_uint16(v, name, &value, errp); +} + +static const PropertyInfo prop_vlen = { + .name = "vlen", + .get = prop_vlen_get, + .set = prop_vlen_set, +}; + +static void prop_elen_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (!is_power_of_2(value)) { + error_setg(errp, "Vector extension ELEN must be power of 2"); + return; + } + + if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, cpu->cfg.elen); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.elen = value; +} + +static void prop_elen_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint16_t value = RISCV_CPU(obj)->cfg.elen; + + visit_type_uint16(v, name, &value, errp); +} + +static const PropertyInfo prop_elen = { + .name = "elen", + .get = prop_elen_get, + .set = prop_elen_set, +}; + +static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, cpu->cfg.cbom_blocksize); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.cbom_blocksize = value; +} + +static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; + + visit_type_uint16(v, name, &value, errp); +} + +static const PropertyInfo prop_cbom_blksize = { + .name = "cbom_blocksize", + .get = prop_cbom_blksize_get, + .set = prop_cbom_blksize_set, +}; + +static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, cpu->cfg.cbop_blocksize); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.cbop_blocksize = value; +} + +static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; + + visit_type_uint16(v, name, &value, errp); +} + +static const PropertyInfo prop_cbop_blksize = { + .name = "cbop_blocksize", + .get = prop_cbop_blksize_get, + .set = prop_cbop_blksize_set, }; -#endif -static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) +static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, cpu->cfg.cboz_blocksize); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.cboz_blocksize = value; +} + +static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; + + visit_type_uint16(v, name, &value, errp); +} + +static const PropertyInfo prop_cboz_blksize = { + .name = "cboz_blocksize", + .get = prop_cboz_blksize_get, + .set = prop_cboz_blksize_set, +}; + +static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { bool dynamic_cpu = riscv_cpu_is_dynamic(obj); RISCVCPU *cpu = RISCV_CPU(obj); @@ -1577,16 +2077,22 @@ static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, cpu->cfg.mvendorid = value; } -static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) +static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { - bool value = RISCV_CPU(obj)->cfg.mvendorid; + uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; - visit_type_bool(v, name, &value, errp); + visit_type_uint32(v, name, &value, errp); } -static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) +static const PropertyInfo prop_mvendorid = { + .name = "mvendorid", + .get = prop_mvendorid_get, + .set = prop_mvendorid_set, +}; + +static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { bool dynamic_cpu = riscv_cpu_is_dynamic(obj); RISCVCPU *cpu = RISCV_CPU(obj); @@ -1606,16 +2112,22 @@ static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, cpu->cfg.mimpid = value; } -static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) +static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { - bool value = RISCV_CPU(obj)->cfg.mimpid; + uint64_t value = RISCV_CPU(obj)->cfg.mimpid; - visit_type_bool(v, name, &value, errp); + visit_type_uint64(v, name, &value, errp); } -static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) +static const PropertyInfo prop_mimpid = { + .name = "mimpid", + .get = prop_mimpid_get, + .set = prop_mimpid_set, +}; + +static void prop_marchid_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { bool dynamic_cpu = riscv_cpu_is_dynamic(obj); RISCVCPU *cpu = RISCV_CPU(obj); @@ -1656,15 +2168,170 @@ static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, cpu->cfg.marchid = value; } -static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) +static void prop_marchid_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { - bool value = RISCV_CPU(obj)->cfg.marchid; + uint64_t value = RISCV_CPU(obj)->cfg.marchid; - visit_type_bool(v, name, &value, errp); + visit_type_uint64(v, name, &value, errp); } -static void riscv_cpu_class_init(ObjectClass *c, void *data) +static const PropertyInfo prop_marchid = { + .name = "marchid", + .get = prop_marchid_get, + .set = prop_marchid_set, +}; + +/* + * RVA22U64 defines some 'named features' that are cache + * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa + * and Zicclsm. They are always implemented in TCG and + * doesn't need to be manually enabled by the profile. + */ +static RISCVCPUProfile RVA22U64 = { + .parent = NULL, + .name = "rva22u64", + .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, + .satp_mode = RISCV_PROFILE_ATTR_UNUSED, + .ext_offsets = { + CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), + CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), + CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), + CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), + CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), + CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), + + /* mandatory named features for this profile */ + CPU_CFG_OFFSET(ext_zic64b), + + RISCV_PROFILE_EXT_LIST_END + } +}; + +/* + * As with RVA22U64, RVA22S64 also defines 'named features'. + * + * Cache related features that we consider enabled since we don't + * implement cache: Ssccptr + * + * Other named features that we already implement: Sstvecd, Sstvala, + * Sscounterenw + * + * The remaining features/extensions comes from RVA22U64. + */ +static RISCVCPUProfile RVA22S64 = { + .parent = &RVA22U64, + .name = "rva22s64", + .misa_ext = RVS, + .priv_spec = PRIV_VERSION_1_12_0, + .satp_mode = VM_1_10_SV39, + .ext_offsets = { + /* rva22s64 exts */ + CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), + CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), + + RISCV_PROFILE_EXT_LIST_END + } +}; + +RISCVCPUProfile *riscv_profiles[] = { + &RVA22U64, + &RVA22S64, + NULL, +}; + +static Property riscv_cpu_properties[] = { + DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), + + {.name = "pmu-mask", .info = &prop_pmu_mask}, + {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ + + {.name = "mmu", .info = &prop_mmu}, + {.name = "pmp", .info = &prop_pmp}, + + {.name = "priv_spec", .info = &prop_priv_spec}, + {.name = "vext_spec", .info = &prop_vext_spec}, + + {.name = "vlen", .info = &prop_vlen}, + {.name = "elen", .info = &prop_elen}, + + {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, + {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, + {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, + + {.name = "mvendorid", .info = &prop_mvendorid}, + {.name = "mimpid", .info = &prop_mimpid}, + {.name = "marchid", .info = &prop_marchid}, + +#ifndef CONFIG_USER_ONLY + DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), +#endif + + DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), + + DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), + DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), + + /* + * write_misa() is marked as experimental for now so mark + * it with -x and default to 'false'. + */ + DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), + DEFINE_PROP_END_OF_LIST(), +}; + +#if defined(TARGET_RISCV64) +static void rva22u64_profile_cpu_init(Object *obj) +{ + rv64i_bare_cpu_init(obj); + + RVA22U64.enabled = true; +} + +static void rva22s64_profile_cpu_init(Object *obj) +{ + rv64i_bare_cpu_init(obj); + + RVA22S64.enabled = true; +} +#endif + +static const gchar *riscv_gdb_arch_name(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + + switch (riscv_cpu_mxl(env)) { + case MXL_RV32: + return "riscv:rv32"; + case MXL_RV64: + case MXL_RV128: + return "riscv:rv64"; + default: + g_assert_not_reached(); + } +} + +#ifndef CONFIG_USER_ONLY +static int64_t riscv_get_arch_id(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + + return cpu->env.mhartid; +} + +#include "hw/core/sysemu-cpu-ops.h" + +static const struct SysemuCPUOps riscv_sysemu_ops = { + .get_phys_page_debug = riscv_cpu_get_phys_page_debug, + .write_elf64_note = riscv_cpu_write_elf64_note, + .write_elf32_note = riscv_cpu_write_elf32_note, + .legacy_vmsd = &vmstate_riscv_cpu, +}; +#endif + +static void riscv_cpu_common_class_init(ObjectClass *c, void *data) { RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); @@ -1679,12 +2346,12 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) cc->class_by_name = riscv_cpu_class_by_name; cc->has_work = riscv_cpu_has_work; + cc->mmu_index = riscv_cpu_mmu_index; cc->dump_state = riscv_cpu_dump_state; cc->set_pc = riscv_cpu_set_pc; cc->get_pc = riscv_cpu_get_pc; cc->gdb_read_register = riscv_cpu_gdb_read_register; cc->gdb_write_register = riscv_cpu_gdb_write_register; - cc->gdb_num_core_regs = 33; cc->gdb_stop_before_watchpoint = true; cc->disas_set_info = riscv_cpu_disas_set_info; #ifndef CONFIG_USER_ONLY @@ -1692,18 +2359,16 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) cc->get_arch_id = riscv_get_arch_id; #endif cc->gdb_arch_name = riscv_gdb_arch_name; - cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; - - object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, - cpu_set_mvendorid, NULL, NULL); - object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, - cpu_set_mimpid, NULL, NULL); + device_class_set_props(dc, riscv_cpu_properties); +} - object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, - cpu_set_marchid, NULL, NULL); +static void riscv_cpu_class_init(ObjectClass *c, void *data) +{ + RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); - device_class_set_props(dc, riscv_cpu_properties); + mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; + riscv_cpu_validate_misa_mxl(mcc); } static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, @@ -1726,10 +2391,13 @@ static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, char *riscv_isa_string(RISCVCPU *cpu) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); int i; const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); char *isa_str = g_new(char, maxlen); - char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); + int xlen = riscv_cpu_max_xlen(mcc); + char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); + for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { *p++ = qemu_tolower(riscv_single_letter_exts[i]); @@ -1742,47 +2410,102 @@ char *riscv_isa_string(RISCVCPU *cpu) return isa_str; } -static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) +#ifndef CONFIG_USER_ONLY +static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) { - ObjectClass *class_a = (ObjectClass *)a; - ObjectClass *class_b = (ObjectClass *)b; - const char *name_a, *name_b; + int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); + char **extensions = g_new(char *, maxlen); - name_a = object_class_get_name(class_a); - name_b = object_class_get_name(class_b); - return strcmp(name_a, name_b); -} + for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { + if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { + extensions[*count] = g_new(char, 2); + snprintf(extensions[*count], 2, "%c", + qemu_tolower(riscv_single_letter_exts[i])); + (*count)++; + } + } -static void riscv_cpu_list_entry(gpointer data, gpointer user_data) -{ - const char *typename = object_class_get_name(OBJECT_CLASS(data)); - int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); + for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { + if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { + extensions[*count] = g_strdup(edata->name); + (*count)++; + } + } - qemu_printf("%.*s\n", len, typename); + return extensions; } -void riscv_cpu_list(void) +void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) { - GSList *list; + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); + const size_t maxlen = sizeof("rv128i"); + g_autofree char *isa_base = g_new(char, maxlen); + g_autofree char *riscv_isa; + char **isa_extensions; + int count = 0; + int xlen = riscv_cpu_max_xlen(mcc); + + riscv_isa = riscv_isa_string(cpu); + qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); + + snprintf(isa_base, maxlen, "rv%di", xlen); + qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); + + isa_extensions = riscv_isa_extensions_list(cpu, &count); + qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", + isa_extensions, count); + + for (int i = 0; i < count; i++) { + g_free(isa_extensions[i]); + } - list = object_class_get_list(TYPE_RISCV_CPU, false); - list = g_slist_sort(list, riscv_cpu_list_compare); - g_slist_foreach(list, riscv_cpu_list_entry, NULL); - g_slist_free(list); + g_free(isa_extensions); } +#endif + +#define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ + { \ + .name = (type_name), \ + .parent = TYPE_RISCV_CPU, \ + .instance_init = (initfn), \ + .class_init = riscv_cpu_class_init, \ + .class_data = (void *)(misa_mxl_max) \ + } + +#define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ + { \ + .name = (type_name), \ + .parent = TYPE_RISCV_DYNAMIC_CPU, \ + .instance_init = (initfn), \ + .class_init = riscv_cpu_class_init, \ + .class_data = (void *)(misa_mxl_max) \ + } + +#define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ + { \ + .name = (type_name), \ + .parent = TYPE_RISCV_VENDOR_CPU, \ + .instance_init = (initfn), \ + .class_init = riscv_cpu_class_init, \ + .class_data = (void *)(misa_mxl_max) \ + } -#define DEFINE_CPU(type_name, initfn) \ - { \ - .name = type_name, \ - .parent = TYPE_RISCV_CPU, \ - .instance_init = initfn \ +#define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ + { \ + .name = (type_name), \ + .parent = TYPE_RISCV_BARE_CPU, \ + .instance_init = (initfn), \ + .class_init = riscv_cpu_class_init, \ + .class_data = (void *)(misa_mxl_max) \ } -#define DEFINE_DYNAMIC_CPU(type_name, initfn) \ - { \ - .name = type_name, \ - .parent = TYPE_RISCV_DYNAMIC_CPU, \ - .instance_init = initfn \ +#define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ + { \ + .name = (type_name), \ + .parent = TYPE_RISCV_BARE_CPU, \ + .instance_init = (initfn), \ + .class_init = riscv_cpu_class_init, \ + .class_data = (void *)(misa_mxl_max) \ } static const TypeInfo riscv_cpu_type_infos[] = { @@ -1795,29 +2518,48 @@ static const TypeInfo riscv_cpu_type_infos[] = { .instance_post_init = riscv_cpu_post_init, .abstract = true, .class_size = sizeof(RISCVCPUClass), - .class_init = riscv_cpu_class_init, + .class_init = riscv_cpu_common_class_init, }, { .name = TYPE_RISCV_DYNAMIC_CPU, .parent = TYPE_RISCV_CPU, .abstract = true, }, - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init), + { + .name = TYPE_RISCV_VENDOR_CPU, + .parent = TYPE_RISCV_CPU, + .abstract = true, + }, + { + .name = TYPE_RISCV_BARE_CPU, + .parent = TYPE_RISCV_CPU, + .instance_init = riscv_bare_cpu_init, + .abstract = true, + }, #if defined(TARGET_RISCV32) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), + DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), + DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), #elif defined(TARGET_RISCV64) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), + DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), + DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), #endif }; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index cc7fa18c4b6..a67b86f6b7f 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -24,6 +24,7 @@ #include "hw/registerfields.h" #include "hw/qdev-properties.h" #include "exec/cpu-defs.h" +#include "exec/gdbstub.h" #include "qemu/cpu-float.h" #include "qom/object.h" #include "qemu/int128.h" @@ -69,6 +70,7 @@ typedef struct CPUArchState CPURISCVState; #define RVH RV('H') #define RVJ RV('J') #define RVG RV('G') +#define RVB RV('B') extern const uint32_t misa_bits[]; const char *riscv_get_misa_ext_name(uint32_t bit); @@ -76,7 +78,26 @@ const char *riscv_get_misa_ext_description(uint32_t bit); #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop) +typedef struct riscv_cpu_profile { + struct riscv_cpu_profile *parent; + const char *name; + uint32_t misa_ext; + bool enabled; + bool user_set; + int priv_spec; + int satp_mode; + const int32_t ext_offsets[]; +} RISCVCPUProfile; + +#define RISCV_PROFILE_EXT_LIST_END -1 +#define RISCV_PROFILE_ATTR_UNUSED -1 + +extern RISCVCPUProfile *riscv_profiles[]; + /* Privileged specification version */ +#define PRIV_VER_1_10_0_STR "v1.10.0" +#define PRIV_VER_1_11_0_STR "v1.11.0" +#define PRIV_VER_1_12_0_STR "v1.12.0" enum { PRIV_VERSION_1_10_0 = 0, PRIV_VERSION_1_11_0, @@ -86,6 +107,7 @@ enum { }; #define VEXT_VERSION_1_00_0 0x00010000 +#define VEXT_VER_1_00_0_STR "v1.0" enum { TRANSLATE_SUCCESS, @@ -164,12 +186,10 @@ struct CPUArchState { target_ulong guest_phys_fault_addr; target_ulong priv_ver; - target_ulong bext_ver; target_ulong vext_ver; /* RISCVMXL, but uint32_t for vmstate migration */ uint32_t misa_mxl; /* current mxl */ - uint32_t misa_mxl_max; /* max mxl for this cpu */ uint32_t misa_ext; /* current extensions */ uint32_t misa_ext_mask; /* max ext for this cpu */ uint32_t xl; /* current xlen */ @@ -251,7 +271,7 @@ struct CPUArchState { target_ulong hstatus; target_ulong hedeleg; uint64_t hideleg; - target_ulong hcounteren; + uint32_t hcounteren; target_ulong htval; target_ulong htinst; target_ulong hgatp; @@ -314,10 +334,10 @@ struct CPUArchState { */ bool two_stage_indirect_lookup; - target_ulong scounteren; - target_ulong mcounteren; + uint32_t scounteren; + uint32_t mcounteren; - target_ulong mcountinhibit; + uint32_t mcountinhibit; /* PMU counter state */ PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS]; @@ -345,6 +365,7 @@ struct CPUArchState { target_ulong tdata1[RV_MAX_TRIGGERS]; target_ulong tdata2[RV_MAX_TRIGGERS]; target_ulong tdata3[RV_MAX_TRIGGERS]; + target_ulong mcontext; struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS]; struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS]; QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS]; @@ -427,8 +448,8 @@ struct ArchCPU { /* space for symbolic expressions corresponding to env */ void *env_exprs[512 + 1]; /* TCG_MAX_TEMPS + 1 (for NULL) */ - char *dyn_csr_xml; - char *dyn_vreg_xml; + GDBFeature dyn_csr_feature; + GDBFeature dyn_vreg_feature; /* Configuration Settings */ RISCVCPUConfig cfg; @@ -452,6 +473,7 @@ struct RISCVCPUClass { DeviceRealize parent_realize; ResettablePhases parent_phases; + uint32_t misa_mxl_max; /* max mxl for this cpu */ }; static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) @@ -484,7 +506,7 @@ target_ulong riscv_cpu_get_geilen(CPURISCVState *env); void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen); bool riscv_cpu_vector_enabled(CPURISCVState *env); void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); -int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch); +int riscv_env_mmu_index(CPURISCVState *env, bool ifetch); G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); @@ -492,12 +514,11 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); char *riscv_isa_string(RISCVCPU *cpu); -void riscv_cpu_list(void); - -#define cpu_list riscv_cpu_list -#define cpu_mmu_index riscv_cpu_mmu_index +int riscv_cpu_max_xlen(RISCVCPUClass *mcc); +bool riscv_cpu_option_set(const char *optname); #ifndef CONFIG_USER_ONLY +void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename); void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, @@ -672,17 +693,24 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env) * = 256 >> 7 * = 2 */ -static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype) +static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew, + int8_t lmul) { - uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW); - int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3); - return cpu->cfg.vlen >> (sew + 3 - lmul); + uint32_t vlen = vlenb << 3; + + /* + * We need to use 'vlen' instead of 'vlenb' to + * preserve the '+ 3' in the formula. Otherwise + * we risk a negative shift if vsew < lmul. + */ + return vlen >> (vsew + 3 - lmul); } void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, uint64_t *cs_base, uint32_t *pflags); void riscv_cpu_update_mask(CPURISCVState *env); +bool riscv_cpu_is_32bit(RISCVCPU *cpu); RISCVException riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, @@ -758,7 +786,8 @@ enum riscv_pmu_event_idx { /* used by tcg/tcg-cpu.c*/ void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en); bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset); -void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext); +void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext); +bool riscv_cpu_is_vendor(Object *cpu_obj); typedef struct RISCVCPUMultiExtConfig { const char *name; @@ -769,8 +798,8 @@ typedef struct RISCVCPUMultiExtConfig { extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[]; extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[]; extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[]; +extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[]; extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[]; -extern Property riscv_cpu_options[]; typedef struct isa_ext_data { const char *name; diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index ebd7917d490..fc2068ee4dc 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -32,14 +32,6 @@ #define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT) #define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA) -/* Vector Fixed-Point round model */ -#define FSR_VXRM_SHIFT 9 -#define FSR_VXRM (0x3 << FSR_VXRM_SHIFT) - -/* Vector Fixed-Point saturation flag */ -#define FSR_VXSAT_SHIFT 8 -#define FSR_VXSAT (0x1 << FSR_VXSAT_SHIFT) - /* Control and Status Registers */ /* User Trap Setup */ @@ -361,6 +353,7 @@ #define CSR_TDATA2 0x7a2 #define CSR_TDATA3 0x7a3 #define CSR_TINFO 0x7a4 +#define CSR_MCONTEXT 0x7a8 /* Debug Mode Registers */ #define CSR_DCSR 0x7b0 @@ -905,4 +898,10 @@ typedef enum RISCVException { /* JVT CSR bits */ #define JVT_MODE 0x3F #define JVT_BASE (~0x3F) + +/* Debug Sdtrig CSR masks */ +#define MCONTEXT32 0x0000003F +#define MCONTEXT64 0x0000000000001FFFULL +#define MCONTEXT32_HCONTEXT 0x0000007F +#define MCONTEXT64_HCONTEXT 0x0000000000003FFFULL #endif diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index f4605fb190b..cb750154bd7 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -65,11 +65,13 @@ struct RISCVCPUConfig { bool ext_zicntr; bool ext_zicsr; bool ext_zicbom; + bool ext_zicbop; bool ext_zicboz; bool ext_zicond; bool ext_zihintntl; bool ext_zihintpause; bool ext_zihpm; + bool ext_ztso; bool ext_smstateen; bool ext_sstc; bool ext_svadu; @@ -77,6 +79,9 @@ struct RISCVCPUConfig { bool ext_svnapot; bool ext_svpbmt; bool ext_zdinx; + bool ext_zaamo; + bool ext_zacas; + bool ext_zalrsc; bool ext_zawrs; bool ext_zfa; bool ext_zfbfmin; @@ -120,6 +125,18 @@ struct RISCVCPUConfig { uint64_t marchid; uint64_t mimpid; + /* Named features */ + bool ext_svade; + bool ext_zic64b; + + /* + * Always 'true' booleans for named features + * TCG always implement/can't be user disabled, + * based on spec version. + */ + bool has_priv_1_12; + bool has_priv_1_11; + /* Vendor-specific custom extensions */ bool ext_xtheadba; bool ext_xtheadbb; @@ -135,13 +152,10 @@ struct RISCVCPUConfig { bool ext_XVentanaCondOps; uint32_t pmu_mask; - char *priv_spec; - char *user_spec; - char *bext_spec; - char *vext_spec; - uint16_t vlen; + uint16_t vlenb; uint16_t elen; uint16_t cbom_blocksize; + uint16_t cbop_blocksize; uint16_t cboz_blocksize; bool mmu; bool pmp; diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index e7e23b34f45..fc090d729a1 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -33,7 +33,7 @@ #include "debug.h" #include "tcg/oversized-guest.h" -int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) +int riscv_env_mmu_index(CPURISCVState *env, bool ifetch) { #ifdef CONFIG_USER_ONLY return 0; @@ -81,13 +81,16 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, * which is not supported by GVEC. So we set vl_eq_vlmax flag to true * only when maxsz >= 8 bytes. */ - uint32_t vlmax = vext_get_vlmax(cpu, env->vtype); - uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW); - uint32_t maxsz = vlmax << sew; + + /* lmul encoded as in DisasContext::lmul */ + int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3); + uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW); + uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); + uint32_t maxsz = vlmax << vsew; bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && (maxsz >= 8); flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); - flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew); + flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew); flags = FIELD_DP32(flags, TB_FLAGS, LMUL, FIELD_EX64(env->vtype, VTYPE, VLMUL)); flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); @@ -106,7 +109,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, #else flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); - flags |= cpu_mmu_index(env, 0); + flags |= riscv_env_mmu_index(env, 0); fs = get_field(env->mstatus, MSTATUS_FS); vs = get_field(env->mstatus, MSTATUS_VS); @@ -655,7 +658,7 @@ void riscv_cpu_interrupt(CPURISCVState *env) uint64_t gein, vsgein = 0, vstip = 0, irqf = 0; CPUState *cs = env_cpu(env); - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); if (env->virt_enabled) { gein = get_field(env->hstatus, HSTATUS_VGEIN); @@ -681,7 +684,7 @@ uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value) /* No need to update mip for VSTIP */ mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask; - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); env->mip = (env->mip & ~mask) | (value & mask); @@ -904,7 +907,9 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, } bool pbmte = env->menvcfg & MENVCFG_PBMTE; - bool adue = env->menvcfg & MENVCFG_ADUE; + bool svade = riscv_cpu_cfg(env)->ext_svade; + bool svadu = riscv_cpu_cfg(env)->ext_svadu; + bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade; if (first_stage && two_stage && env->virt_enabled) { pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); @@ -1079,9 +1084,18 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, return TRANSLATE_FAIL; } - /* If necessary, set accessed and dirty bits. */ - target_ulong updated_pte = pte | PTE_A | - (access_type == MMU_DATA_STORE ? PTE_D : 0); + target_ulong updated_pte = pte; + + /* + * If ADUE is enabled, set accessed and dirty bits. + * Otherwise raise an exception if necessary. + */ + if (adue) { + updated_pte |= PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0); + } else if (!(pte & PTE_A) || + (access_type == MMU_DATA_STORE && !(pte & PTE_D))) { + return TRANSLATE_FAIL; + } /* Page table updates need to be atomic with MTTCG enabled */ if (updated_pte != pte && !is_debug) { @@ -1200,7 +1214,7 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) CPURISCVState *env = &cpu->env; hwaddr phys_addr; int prot; - int mmu_idx = cpu_mmu_index(&cpu->env, false); + int mmu_idx = riscv_env_mmu_index(&cpu->env, false); if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, true, env->virt_enabled, true)) { @@ -1209,7 +1223,7 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) if (env->virt_enabled) { if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, - 0, mmu_idx, false, true, true)) { + 0, MMUIdx_U, false, true, true)) { return -1; } } @@ -1301,7 +1315,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, bool two_stage_lookup = mmuidx_2stage(mmu_idx); bool two_stage_indirect_error = false; int ret = TRANSLATE_FAIL; - int mode = mmu_idx; + int mode = mmuidx_priv(mmu_idx); /* default TLB page size */ target_ulong tlb_size = TARGET_PAGE_SIZE; @@ -1749,8 +1763,8 @@ void riscv_cpu_do_interrupt(CPUState *cs) * See if we need to adjust cause. Yes if its VS mode interrupt * no if hypervisor has delegated one of hs mode's interrupt */ - if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || - cause == IRQ_VS_EXT) { + if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || + cause == IRQ_VS_EXT)) { cause = cause - 1; } write_gva = false; diff --git a/target/riscv/csr.c b/target/riscv/csr.c index c50a33397c5..726096444fa 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -195,8 +195,11 @@ static RISCVException mctr(CPURISCVState *env, int csrno) if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) { /* Offset for RV32 mhpmcounternh counters */ - base_csrno += 0x80; + csrno -= 0x80; } + + g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31); + ctr_index = csrno - base_csrno; if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) { /* The PMU is not enabled or counter is out of range */ @@ -239,7 +242,7 @@ static RISCVException any32(CPURISCVState *env, int csrno) } -static int aia_any(CPURISCVState *env, int csrno) +static RISCVException aia_any(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_smaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -248,7 +251,7 @@ static int aia_any(CPURISCVState *env, int csrno) return any(env, csrno); } -static int aia_any32(CPURISCVState *env, int csrno) +static RISCVException aia_any32(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_smaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -266,7 +269,7 @@ static RISCVException smode(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } -static int smode32(CPURISCVState *env, int csrno) +static RISCVException smode32(CPURISCVState *env, int csrno) { if (riscv_cpu_mxl(env) != MXL_RV32) { return RISCV_EXCP_ILLEGAL_INST; @@ -275,7 +278,7 @@ static int smode32(CPURISCVState *env, int csrno) return smode(env, csrno); } -static int aia_smode(CPURISCVState *env, int csrno) +static RISCVException aia_smode(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -284,7 +287,7 @@ static int aia_smode(CPURISCVState *env, int csrno) return smode(env, csrno); } -static int aia_smode32(CPURISCVState *env, int csrno) +static RISCVException aia_smode32(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -493,7 +496,7 @@ static RISCVException pointer_masking(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } -static int aia_hmode(CPURISCVState *env, int csrno) +static RISCVException aia_hmode(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -502,7 +505,7 @@ static int aia_hmode(CPURISCVState *env, int csrno) return hmode(env, csrno); } -static int aia_hmode32(CPURISCVState *env, int csrno) +static RISCVException aia_hmode32(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -678,9 +681,10 @@ static RISCVException read_vl(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vlenb(CPURISCVState *env, int csrno, + target_ulong *val) { - *val = riscv_cpu_cfg(env)->vlen >> 3; + *val = riscv_cpu_cfg(env)->vlenb; return RISCV_EXCP_NONE; } @@ -735,17 +739,19 @@ static RISCVException write_vstart(CPURISCVState *env, int csrno, * The vstart CSR is defined to have only enough writable bits * to hold the largest element index, i.e. lg2(VLEN) bits. */ - env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlen)); + env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3)); return RISCV_EXCP_NONE; } -static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vcsr(CPURISCVState *env, int csrno, + target_ulong *val) { *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT); return RISCV_EXCP_NONE; } -static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vcsr(CPURISCVState *env, int csrno, + target_ulong val) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -795,13 +801,15 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hpmcounter(CPURISCVState *env, int csrno, + target_ulong *val) { *val = get_ticks(false); return RISCV_EXCP_NONE; } -static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno, + target_ulong *val) { *val = get_ticks(true); return RISCV_EXCP_NONE; @@ -809,7 +817,8 @@ static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) #else /* CONFIG_USER_ONLY */ -static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mhpmevent(CPURISCVState *env, int csrno, + target_ulong *val) { int evt_index = csrno - CSR_MCOUNTINHIBIT; @@ -818,7 +827,8 @@ static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val) return RISCV_EXCP_NONE; } -static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmevent(CPURISCVState *env, int csrno, + target_ulong val) { int evt_index = csrno - CSR_MCOUNTINHIBIT; uint64_t mhpmevt_val = val; @@ -834,7 +844,8 @@ static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val) return RISCV_EXCP_NONE; } -static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno, + target_ulong *val) { int evt_index = csrno - CSR_MHPMEVENT3H + 3; @@ -843,7 +854,8 @@ static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val) return RISCV_EXCP_NONE; } -static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno, + target_ulong val) { int evt_index = csrno - CSR_MHPMEVENT3H + 3; uint64_t mhpmevth_val = val; @@ -857,7 +869,8 @@ static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val) return RISCV_EXCP_NONE; } -static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, + target_ulong val) { int ctr_idx = csrno - CSR_MCYCLE; PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; @@ -882,7 +895,8 @@ static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) return RISCV_EXCP_NONE; } -static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, + target_ulong val) { int ctr_idx = csrno - CSR_MCYCLEH; PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; @@ -942,7 +956,8 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, return RISCV_EXCP_NONE; } -static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hpmcounter(CPURISCVState *env, int csrno, + target_ulong *val) { uint16_t ctr_index; @@ -957,7 +972,8 @@ static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) return riscv_pmu_read_ctr(env, val, false, ctr_index); } -static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno, + target_ulong *val) { uint16_t ctr_index; @@ -972,7 +988,8 @@ static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) return riscv_pmu_read_ctr(env, val, true, ctr_index); } -static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_scountovf(CPURISCVState *env, int csrno, + target_ulong *val) { int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT; int i; @@ -1278,8 +1295,34 @@ static RISCVException read_mstatus(CPURISCVState *env, int csrno, static bool validate_vm(CPURISCVState *env, target_ulong vm) { - return (vm & 0xf) <= - satp_mode_max_from_map(riscv_cpu_cfg(env)->satp_mode.map); + uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map; + return get_field(mode_supported, (1 << vm)); +} + +static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp, + target_ulong val) +{ + target_ulong mask; + bool vm; + if (riscv_cpu_mxl(env) == MXL_RV32) { + vm = validate_vm(env, get_field(val, SATP32_MODE)); + mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); + } else { + vm = validate_vm(env, get_field(val, SATP64_MODE)); + mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN); + } + + if (vm && mask) { + /* + * The ISA defines SATP.MODE=Bare as "no translation", but we still + * pass these through QEMU's TLB emulation as it improves + * performance. Flushing the TLB on SATP writes with paging + * enabled avoids leaking those invalid cached mappings. + */ + tlb_flush(env_cpu(env)); + return val; + } + return old_xatp; } static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp, @@ -1328,11 +1371,14 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR | - MSTATUS_TW | MSTATUS_VS; + MSTATUS_TW; if (riscv_has_ext(env, RVF)) { mask |= MSTATUS_FS; } + if (riscv_has_ext(env, RVV)) { + mask |= MSTATUS_VS; + } if (xl != MXL_RV32 || env->debugger) { if (riscv_has_ext(env, RVH)) { @@ -1632,7 +1678,8 @@ static RISCVException rmw_mvienh(CPURISCVState *env, int csrno, return ret; } -static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mtopi(CPURISCVState *env, int csrno, + target_ulong *val) { int irq; uint8_t iprio; @@ -1672,8 +1719,9 @@ static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno) }; } -static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val, - target_ulong new_val, target_ulong wr_mask) +static RISCVException rmw_xiselect(CPURISCVState *env, int csrno, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) { target_ulong *iselect; @@ -1752,8 +1800,9 @@ static int rmw_iprio(target_ulong xlen, return 0; } -static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val, - target_ulong new_val, target_ulong wr_mask) +static RISCVException rmw_xireg(CPURISCVState *env, int csrno, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) { bool virt, isel_reserved; uint8_t *iprio; @@ -1827,8 +1876,9 @@ static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val, return RISCV_EXCP_NONE; } -static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val, - target_ulong new_val, target_ulong wr_mask) +static RISCVException rmw_xtopei(CPURISCVState *env, int csrno, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) { bool virt; int ret = -EINVAL; @@ -2109,7 +2159,7 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno, /* * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0 * henvcfg.stce is read_only 0 when menvcfg.stce = 0 - * henvcfg.hade is read_only 0 when menvcfg.hade = 0 + * henvcfg.adue is read_only 0 when menvcfg.adue = 0 */ *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) | env->menvcfg); @@ -2997,35 +3047,16 @@ static RISCVException read_satp(CPURISCVState *env, int csrno, static RISCVException write_satp(CPURISCVState *env, int csrno, target_ulong val) { - target_ulong mask; - bool vm; - if (!riscv_cpu_cfg(env)->mmu) { return RISCV_EXCP_NONE; } - if (riscv_cpu_mxl(env) == MXL_RV32) { - vm = validate_vm(env, get_field(val, SATP32_MODE)); - mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); - } else { - vm = validate_vm(env, get_field(val, SATP64_MODE)); - mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN); - } - - if (vm && mask) { - /* - * The ISA defines SATP.MODE=Bare as "no translation", but we still - * pass these through QEMU's TLB emulation as it improves - * performance. Flushing the TLB on SATP writes with paging - * enabled avoids leaking those invalid cached mappings. - */ - tlb_flush(env_cpu(env)); - env->satp = val; - } + env->satp = legalize_xatp(env, env->satp, val); return RISCV_EXCP_NONE; } -static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vstopi(CPURISCVState *env, int csrno, + target_ulong *val) { int irq, ret; target_ulong topei; @@ -3114,7 +3145,8 @@ static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val) return RISCV_EXCP_NONE; } -static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_stopi(CPURISCVState *env, int csrno, + target_ulong *val) { int irq; uint8_t iprio; @@ -3506,7 +3538,7 @@ static RISCVException read_hgatp(CPURISCVState *env, int csrno, static RISCVException write_hgatp(CPURISCVState *env, int csrno, target_ulong val) { - env->hgatp = val; + env->hgatp = legalize_xatp(env, env->hgatp, val); return RISCV_EXCP_NONE; } @@ -3570,19 +3602,21 @@ static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hvictl(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->hvictl; return RISCV_EXCP_NONE; } -static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hvictl(CPURISCVState *env, int csrno, + target_ulong val) { env->hvictl = val & HVICTL_VALID_MASK; return RISCV_EXCP_NONE; } -static int read_hvipriox(CPURISCVState *env, int first_index, +static RISCVException read_hvipriox(CPURISCVState *env, int first_index, uint8_t *iprio, target_ulong *val) { int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); @@ -3608,7 +3642,7 @@ static int read_hvipriox(CPURISCVState *env, int first_index, return RISCV_EXCP_NONE; } -static int write_hvipriox(CPURISCVState *env, int first_index, +static RISCVException write_hvipriox(CPURISCVState *env, int first_index, uint8_t *iprio, target_ulong val) { int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); @@ -3634,42 +3668,50 @@ static int write_hvipriox(CPURISCVState *env, int first_index, return RISCV_EXCP_NONE; } -static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hviprio1(CPURISCVState *env, int csrno, + target_ulong *val) { return read_hvipriox(env, 0, env->hviprio, val); } -static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hviprio1(CPURISCVState *env, int csrno, + target_ulong val) { return write_hvipriox(env, 0, env->hviprio, val); } -static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hviprio1h(CPURISCVState *env, int csrno, + target_ulong *val) { return read_hvipriox(env, 4, env->hviprio, val); } -static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hviprio1h(CPURISCVState *env, int csrno, + target_ulong val) { return write_hvipriox(env, 4, env->hviprio, val); } -static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hviprio2(CPURISCVState *env, int csrno, + target_ulong *val) { return read_hvipriox(env, 8, env->hviprio, val); } -static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hviprio2(CPURISCVState *env, int csrno, + target_ulong val) { return write_hvipriox(env, 8, env->hviprio, val); } -static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hviprio2h(CPURISCVState *env, int csrno, + target_ulong *val) { return read_hvipriox(env, 12, env->hviprio, val); } -static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hviprio2h(CPURISCVState *env, int csrno, + target_ulong val) { return write_hvipriox(env, 12, env->hviprio, val); } @@ -3693,7 +3735,8 @@ static RISCVException write_vsstatus(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vstvec(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vstvec; return RISCV_EXCP_NONE; @@ -3772,7 +3815,7 @@ static RISCVException read_vsatp(CPURISCVState *env, int csrno, static RISCVException write_vsatp(CPURISCVState *env, int csrno, target_ulong val) { - env->vsatp = val; + env->vsatp = legalize_xatp(env, env->vsatp, val); return RISCV_EXCP_NONE; } @@ -3900,6 +3943,31 @@ static RISCVException read_tinfo(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException read_mcontext(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mcontext; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mcontext(CPURISCVState *env, int csrno, + target_ulong val) +{ + bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false; + int32_t mask; + + if (riscv_has_ext(env, RVH)) { + /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */ + mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT; + } else { + /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */ + mask = rv32 ? MCONTEXT32 : MCONTEXT64; + } + + env->mcontext = val & mask; + return RISCV_EXCP_NONE; +} + /* * Functions to access Pointer Masking feature registers * We have to check if current priv lvl could modify @@ -4794,11 +4862,12 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, /* Debug CSRs */ - [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect }, - [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata }, - [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata }, - [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata }, - [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore }, + [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect }, + [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata }, + [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata }, + [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata }, + [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore }, + [CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext }, /* User Pointer Masking */ [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte }, diff --git a/target/riscv/debug.c b/target/riscv/debug.c index 4945d1a1f25..e30d99cc2f0 100644 --- a/target/riscv/debug.c +++ b/target/riscv/debug.c @@ -940,4 +940,6 @@ void riscv_trigger_reset_hold(CPURISCVState *env) env->cpu_watchpoint[i] = NULL; timer_del(env->itrigger_timer[i]); } + + env->mcontext = 0; } diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index 58b3ace0fe9..be7a02cd903 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -49,6 +49,7 @@ static const struct TypeSize vec_lanes[] = { int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; target_ulong tmp; @@ -61,7 +62,7 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) return 0; } - switch (env->misa_mxl_max) { + switch (mcc->misa_mxl_max) { case MXL_RV32: return gdb_get_reg32(mem_buf, tmp); case MXL_RV64: @@ -75,12 +76,13 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; int length = 0; target_ulong tmp; - switch (env->misa_mxl_max) { + switch (mcc->misa_mxl_max) { case MXL_RV32: tmp = (int32_t)ldl_p(mem_buf); length = 4; @@ -106,8 +108,11 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return length; } -static int riscv_gdb_get_fpu(CPURISCVState *env, GByteArray *buf, int n) +static int riscv_gdb_get_fpu(CPUState *cs, GByteArray *buf, int n) { + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + if (n < 32) { if (env->misa_ext & RVD) { return gdb_get_reg64(buf, env->fpr[n]); @@ -119,8 +124,11 @@ static int riscv_gdb_get_fpu(CPURISCVState *env, GByteArray *buf, int n) return 0; } -static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n) +static int riscv_gdb_set_fpu(CPUState *cs, uint8_t *mem_buf, int n) { + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + if (n < 32) { env->fpr[n] = ldq_p(mem_buf); /* always 64-bit */ return sizeof(uint64_t); @@ -128,9 +136,11 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n) return 0; } -static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) +static int riscv_gdb_get_vector(CPUState *cs, GByteArray *buf, int n) { - uint16_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + uint16_t vlenb = cpu->cfg.vlenb; if (n < 32) { int i; int cnt = 0; @@ -144,9 +154,11 @@ static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) return 0; } -static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n) +static int riscv_gdb_set_vector(CPUState *cs, uint8_t *mem_buf, int n) { - uint16_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + uint16_t vlenb = cpu->cfg.vlenb; if (n < 32) { int i; for (i = 0; i < vlenb; i += 8) { @@ -158,8 +170,11 @@ static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n) return 0; } -static int riscv_gdb_get_csr(CPURISCVState *env, GByteArray *buf, int n) +static int riscv_gdb_get_csr(CPUState *cs, GByteArray *buf, int n) { + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + if (n < CSR_TABLE_SIZE) { target_ulong val = 0; int result; @@ -172,8 +187,11 @@ static int riscv_gdb_get_csr(CPURISCVState *env, GByteArray *buf, int n) return 0; } -static int riscv_gdb_set_csr(CPURISCVState *env, uint8_t *mem_buf, int n) +static int riscv_gdb_set_csr(CPUState *cs, uint8_t *mem_buf, int n) { + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + if (n < CSR_TABLE_SIZE) { target_ulong val = ldtul_p(mem_buf); int result; @@ -186,25 +204,31 @@ static int riscv_gdb_set_csr(CPURISCVState *env, uint8_t *mem_buf, int n) return 0; } -static int riscv_gdb_get_virtual(CPURISCVState *cs, GByteArray *buf, int n) +static int riscv_gdb_get_virtual(CPUState *cs, GByteArray *buf, int n) { if (n == 0) { #ifdef CONFIG_USER_ONLY return gdb_get_regl(buf, 0); #else - return gdb_get_regl(buf, cs->priv); + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + + return gdb_get_regl(buf, env->priv); #endif } return 0; } -static int riscv_gdb_set_virtual(CPURISCVState *cs, uint8_t *mem_buf, int n) +static int riscv_gdb_set_virtual(CPUState *cs, uint8_t *mem_buf, int n) { if (n == 0) { #ifndef CONFIG_USER_ONLY - cs->priv = ldtul_p(mem_buf) & 0x3; - if (cs->priv == PRV_RESERVED) { - cs->priv = PRV_S; + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + + env->priv = ldtul_p(mem_buf) & 0x3; + if (env->priv == PRV_RESERVED) { + env->priv = PRV_S; } #endif return sizeof(target_ulong); @@ -212,13 +236,15 @@ static int riscv_gdb_set_virtual(CPURISCVState *cs, uint8_t *mem_buf, int n) return 0; } -static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) +static GDBFeature *riscv_gen_dynamic_csr_feature(CPUState *cs, int base_reg) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - GString *s = g_string_new(NULL); + GDBFeatureBuilder builder; riscv_csr_predicate_fn predicate; - int bitsize = 16 << env->misa_mxl_max; + int bitsize = riscv_cpu_max_xlen(mcc); + const char *name; int i; #if !defined(CONFIG_USER_ONLY) @@ -230,9 +256,9 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) bitsize = 64; } - g_string_printf(s, ""); - g_string_append_printf(s, ""); - g_string_append_printf(s, ""); + gdb_feature_builder_init(&builder, &cpu->dyn_csr_feature, + "org.gnu.gdb.riscv.csr", "riscv-csr.xml", + base_reg); for (i = 0; i < CSR_TABLE_SIZE; i++) { if (env->priv_ver < csr_ops[i].min_priv_ver) { @@ -240,112 +266,105 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) } predicate = csr_ops[i].predicate; if (predicate && (predicate(env, i) == RISCV_EXCP_NONE)) { - if (csr_ops[i].name) { - g_string_append_printf(s, "", base_reg + i); + + gdb_feature_builder_append_reg(&builder, name, bitsize, i, + "int", NULL); } } - g_string_append_printf(s, ""); - - cpu->dyn_csr_xml = g_string_free(s, false); + gdb_feature_builder_end(&builder); #if !defined(CONFIG_USER_ONLY) env->debugger = false; #endif - return CSR_TABLE_SIZE; + return &cpu->dyn_csr_feature; } -static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg) +static GDBFeature *ricsv_gen_dynamic_vector_feature(CPUState *cs, int base_reg) { RISCVCPU *cpu = RISCV_CPU(cs); - GString *s = g_string_new(NULL); - g_autoptr(GString) ts = g_string_new(""); - int reg_width = cpu->cfg.vlen; - int num_regs = 0; + int reg_width = cpu->cfg.vlenb; + GDBFeatureBuilder builder; int i; - g_string_printf(s, ""); - g_string_append_printf(s, ""); - g_string_append_printf(s, ""); + gdb_feature_builder_init(&builder, &cpu->dyn_vreg_feature, + "org.gnu.gdb.riscv.vector", "riscv-vector.xml", + base_reg); /* First define types and totals in a whole VL */ for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { int count = reg_width / vec_lanes[i].size; - g_string_printf(ts, "%s", vec_lanes[i].id); - g_string_append_printf(s, - "", - ts->str, vec_lanes[i].gdb_type, count); + gdb_feature_builder_append_tag( + &builder, "", + vec_lanes[i].id, vec_lanes[i].gdb_type, count); } /* Define unions */ - g_string_append_printf(s, ""); + gdb_feature_builder_append_tag(&builder, ""); for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { - g_string_append_printf(s, "", - vec_lanes[i].suffix, - vec_lanes[i].id); + gdb_feature_builder_append_tag(&builder, + "", + vec_lanes[i].suffix, vec_lanes[i].id); } - g_string_append(s, ""); + gdb_feature_builder_append_tag(&builder, ""); /* Define vector registers */ for (i = 0; i < 32; i++) { - g_string_append_printf(s, - "", - i, reg_width, base_reg++); - num_regs++; + gdb_feature_builder_append_reg(&builder, g_strdup_printf("v%d", i), + reg_width, i, "riscv_vector", "vector"); } - g_string_append_printf(s, ""); + gdb_feature_builder_end(&builder); - cpu->dyn_vreg_xml = g_string_free(s, false); - return num_regs; + return &cpu->dyn_vreg_feature; } void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; if (env->misa_ext & RVD) { gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu, - 32, "riscv-64bit-fpu.xml", 0); + gdb_find_static_feature("riscv-64bit-fpu.xml"), + 0); } else if (env->misa_ext & RVF) { gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu, - 32, "riscv-32bit-fpu.xml", 0); + gdb_find_static_feature("riscv-32bit-fpu.xml"), + 0); } if (env->misa_ext & RVV) { - int base_reg = cs->gdb_num_regs; gdb_register_coprocessor(cs, riscv_gdb_get_vector, riscv_gdb_set_vector, - ricsv_gen_dynamic_vector_xml(cs, base_reg), - "riscv-vector.xml", 0); + ricsv_gen_dynamic_vector_feature(cs, cs->gdb_num_regs), + 0); } - switch (env->misa_mxl_max) { + switch (mcc->misa_mxl_max) { case MXL_RV32: gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual, - 1, "riscv-32bit-virtual.xml", 0); + gdb_find_static_feature("riscv-32bit-virtual.xml"), + 0); break; case MXL_RV64: case MXL_RV128: gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual, - 1, "riscv-64bit-virtual.xml", 0); + gdb_find_static_feature("riscv-64bit-virtual.xml"), + 0); break; default: g_assert_not_reached(); } if (cpu->cfg.ext_zicsr) { - int base_reg = cs->gdb_num_regs; gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr, - riscv_gen_dynamic_csr_xml(cs, base_reg), - "riscv-csr.xml", 0); + riscv_gen_dynamic_csr_feature(cs, cs->gdb_num_regs), + 0); } } diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 33597fe2bb1..f22df04cfd1 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -1004,3 +1004,9 @@ vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1 vsm4k_vi 100001 1 ..... ..... 010 ..... 1110111 @r_vm_1 vsm4r_vv 101000 1 ..... 10000 010 ..... 1110111 @r2_vm_1 vsm4r_vs 101001 1 ..... 10000 010 ..... 1110111 @r2_vm_1 + +# *** RV32 Zacas Standard Extension *** +amocas_w 00101 . . ..... ..... 010 ..... 0101111 @atom_st +amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st +# *** RV64 Zacas Standard Extension *** +amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index 5f194a447bb..4a9e4591d14 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -18,6 +18,18 @@ * this program. If not, see . */ +#define REQUIRE_A_OR_ZAAMO(ctx) do { \ + if (!ctx->cfg_ptr->ext_zaamo && !has_ext(ctx, RVA)) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_A_OR_ZALRSC(ctx) do { \ + if (!ctx->cfg_ptr->ext_zalrsc && !has_ext(ctx, RVA)) { \ + return false; \ + } \ +} while (0) + static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) { TCGv src1; @@ -28,7 +40,11 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); } tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop); - if (a->aq) { + /* + * TSO defines AMOs as acquire+release-RCsc, but does not define LR/SC as + * AMOs. Instead treat them like loads. + */ + if (a->aq || ctx->ztso) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); } @@ -64,9 +80,10 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) gen_set_label(l1); /* * Address comparison failure. However, we still need to - * provide the memory barrier implied by AQ/RL. + * provide the memory barrier implied by AQ/RL/TSO. */ - tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL); + TCGBar bar_strl = (ctx->ztso || a->rl) ? TCG_BAR_STRL : 0; + tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + bar_strl); gen_set_gpr(ctx, a->rd, tcg_constant_tl(1)); gen_set_label(l2); @@ -96,132 +113,143 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZALRSC(ctx); return gen_lr(ctx, a, (MO_ALIGN | MO_TESL)); } static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZALRSC(ctx); return gen_sc(ctx, a, (MO_ALIGN | MO_TESL)); } static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); } static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZALRSC(ctx); return gen_lr(ctx, a, MO_ALIGN | MO_TEUQ); } static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZALRSC(ctx); return gen_sc(ctx, a, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); } diff --git a/target/riscv/insn_trans/trans_rvbf16.c.inc b/target/riscv/insn_trans/trans_rvbf16.c.inc index 4e39c00884c..0a9cd1ec315 100644 --- a/target/riscv/insn_trans/trans_rvbf16.c.inc +++ b/target/riscv/insn_trans/trans_rvbf16.c.inc @@ -71,11 +71,8 @@ static bool trans_vfncvtbf16_f_f_w(DisasContext *ctx, arg_vfncvtbf16_f_f_w *a) if (opfv_narrow_check(ctx, a) && (ctx->sew == MO_16)) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul); @@ -83,11 +80,10 @@ static bool trans_vfncvtbf16_f_f_w(DisasContext *ctx, arg_vfncvtbf16_f_f_w *a) data = FIELD_DP32(data, VDATA, VMA, ctx->vma); tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0), vreg_ofs(ctx, a->rs2), tcg_env, - ctx->cfg_ptr->vlen / 8, - ctx->cfg_ptr->vlen / 8, data, + ctx->cfg_ptr->vlenb, + ctx->cfg_ptr->vlenb, data, gen_helper_vfncvtbf16_f_f_w); - mark_vs_dirty(ctx); - gen_set_label(over); + finalize_rvv_inst(ctx); return true; } return false; @@ -100,11 +96,8 @@ static bool trans_vfwcvtbf16_f_f_v(DisasContext *ctx, arg_vfwcvtbf16_f_f_v *a) if (opfv_widen_check(ctx, a) && (ctx->sew == MO_16)) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul); @@ -112,11 +105,10 @@ static bool trans_vfwcvtbf16_f_f_v(DisasContext *ctx, arg_vfwcvtbf16_f_f_v *a) data = FIELD_DP32(data, VDATA, VMA, ctx->vma); tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0), vreg_ofs(ctx, a->rs2), tcg_env, - ctx->cfg_ptr->vlen / 8, - ctx->cfg_ptr->vlen / 8, data, + ctx->cfg_ptr->vlenb, + ctx->cfg_ptr->vlenb, data, gen_helper_vfwcvtbf16_f_f_v); - mark_vs_dirty(ctx); - gen_set_label(over); + finalize_rvv_inst(ctx); return true; } return false; @@ -130,11 +122,8 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a) if (require_rvv(ctx) && vext_check_isa_ill(ctx) && (ctx->sew == MO_16) && vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm)) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul); @@ -143,11 +132,10 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a) tcg_gen_gvec_4_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0), vreg_ofs(ctx, a->rs1), vreg_ofs(ctx, a->rs2), tcg_env, - ctx->cfg_ptr->vlen / 8, - ctx->cfg_ptr->vlen / 8, data, + ctx->cfg_ptr->vlenb, + ctx->cfg_ptr->vlenb, data, gen_helper_vfwmaccbf16_vv); - mark_vs_dirty(ctx); - gen_set_label(over); + finalize_rvv_inst(ctx); return true; } return false; diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index faf6d65064f..ad40d3e87f7 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -266,12 +266,20 @@ static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop) static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { + bool out; + decode_save_opc(ctx); if (get_xl(ctx) == MXL_RV128) { - return gen_load_i128(ctx, a, memop); + out = gen_load_i128(ctx, a, memop); } else { - return gen_load_tl(ctx, a, memop); + out = gen_load_tl(ctx, a, memop); + } + + if (ctx->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); } + + return out; } static bool trans_lb(DisasContext *ctx, arg_lb *a) @@ -328,6 +336,10 @@ static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop) TCGv addr = get_address(ctx, a->rs1, a->imm); TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); + if (ctx->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); + } + tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop); return true; } diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 78bd363310d..7d84e7d8124 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -167,7 +167,7 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) gen_helper_vsetvl(dst, tcg_env, s1, s2); gen_set_gpr(s, rd, dst); - mark_vs_dirty(s); + finalize_rvv_inst(s); gen_update_pc(s, s->cur_insn_len); lookup_and_goto_ptr(s); @@ -187,7 +187,7 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) gen_helper_vsetvl(dst, tcg_env, s1, s2); gen_set_gpr(s, rd, dst); - mark_vs_dirty(s); + finalize_rvv_inst(s); gen_update_pc(s, s->cur_insn_len); lookup_and_goto_ptr(s); s->base.is_jmp = DISAS_NORETURN; @@ -217,7 +217,7 @@ static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a) /* vector register offset from env */ static uint32_t vreg_ofs(DisasContext *s, int reg) { - return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlen / 8; + return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlenb; } /* check functions */ @@ -616,9 +616,6 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, TCGv base; TCGv_i32 desc; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); base = get_gpr(s, rs1, EXT_NONE); @@ -627,22 +624,40 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, * As simd_desc supports at most 2048 bytes, and in this implementation, * the max vector group length is 4096 bytes. So split it into two parts. * - * The first part is vlen in bytes, encoded in maxsz of simd_desc. + * The first part is vlen in bytes (vlenb), encoded in maxsz of simd_desc. * The second part is lmul, encoded in data of simd_desc. */ - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); + /* + * According to the specification + * + * Additionally, if the Ztso extension is implemented, then vector memory + * instructions in the V extension and Zve family of extensions follow + * RVTSO at the instruction level. The Ztso extension does not + * strengthen the ordering of intra-instruction element accesses. + * + * as a result neither ordered nor unordered accesses from the V + * instructions need ordering within the loop but we do still need barriers + * around the loop. + */ + if (is_store && s->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); + } + + mark_vs_dirty(s); + fn(dest, mask, base, tcg_env, desc); - if (!is_store) { - mark_vs_dirty(s); + if (!is_store && s->ztso) { + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); } - gen_set_label(over); + finalize_rvv_inst(s); return true; } @@ -778,32 +793,27 @@ typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv, static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, uint32_t data, gen_helper_ldst_stride *fn, - DisasContext *s, bool is_store) + DisasContext *s) { TCGv_ptr dest, mask; TCGv base, stride; TCGv_i32 desc; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); base = get_gpr(s, rs1, EXT_NONE); stride = get_gpr(s, rs2, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); - fn(dest, mask, base, stride, tcg_env, desc); + mark_vs_dirty(s); - if (!is_store) { - mark_vs_dirty(s); - } + fn(dest, mask, base, stride, tcg_env, desc); - gen_set_label(over); + finalize_rvv_inst(s); return true; } @@ -827,7 +837,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) data = FIELD_DP32(data, VDATA, NF, a->nf); data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VMA, s->vma); - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); } static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -861,7 +871,7 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) return false; } - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); } static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -884,33 +894,28 @@ typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv, static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t data, gen_helper_ldst_index *fn, - DisasContext *s, bool is_store) + DisasContext *s) { TCGv_ptr dest, mask, index; TCGv base; TCGv_i32 desc; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); index = tcg_temp_new_ptr(); base = get_gpr(s, rs1, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(index, tcg_env, vreg_ofs(s, vs2)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); - fn(dest, mask, base, index, tcg_env, desc); + mark_vs_dirty(s); - if (!is_store) { - mark_vs_dirty(s); - } + fn(dest, mask, base, index, tcg_env, desc); - gen_set_label(over); + finalize_rvv_inst(s); return true; } @@ -953,7 +958,7 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) data = FIELD_DP32(data, VDATA, NF, a->nf); data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VMA, s->vma); - return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false); + return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s); } static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -1005,7 +1010,7 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); - return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true); + return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s); } static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -1030,22 +1035,18 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, TCGv base; TCGv_i32 desc; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); base = get_gpr(s, rs1, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); fn(dest, mask, base, tcg_env, desc); - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } @@ -1083,32 +1084,26 @@ GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check) typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32); static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, - uint32_t width, gen_helper_ldst_whole *fn, - DisasContext *s, bool is_store) + gen_helper_ldst_whole *fn, + DisasContext *s) { - uint32_t evl = (s->cfg_ptr->vlen / 8) * nf / width; - TCGLabel *over = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, evl, over); - TCGv_ptr dest; TCGv base; TCGv_i32 desc; uint32_t data = FIELD_DP32(0, VDATA, NF, nf); dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); base = get_gpr(s, rs1, EXT_NONE); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); - fn(dest, base, tcg_env, desc); + mark_vs_dirty(s); - if (!is_store) { - mark_vs_dirty(s); - } - gen_set_label(over); + fn(dest, base, tcg_env, desc); + finalize_rvv_inst(s); return true; } @@ -1116,42 +1111,42 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, * load and store whole register instructions ignore vtype and vl setting. * Thus, we don't need to check vill bit. (Section 7.9) */ -#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, WIDTH, IS_STORE) \ +#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF) \ static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ { \ if (require_rvv(s) && \ QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \ - return ldst_whole_trans(a->rd, a->rs1, ARG_NF, WIDTH, \ - gen_helper_##NAME, s, IS_STORE); \ + return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \ + gen_helper_##NAME, s); \ } \ return false; \ } -GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, 1, false) -GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, 2, false) -GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, 4, false) -GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, 8, false) -GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, 1, false) -GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, 2, false) -GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, 4, false) -GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, 8, false) -GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, 1, false) -GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, 2, false) -GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, 4, false) -GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, 8, false) -GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, 1, false) -GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, 2, false) -GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, 4, false) -GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, 8, false) +GEN_LDST_WHOLE_TRANS(vl1re8_v, 1) +GEN_LDST_WHOLE_TRANS(vl1re16_v, 1) +GEN_LDST_WHOLE_TRANS(vl1re32_v, 1) +GEN_LDST_WHOLE_TRANS(vl1re64_v, 1) +GEN_LDST_WHOLE_TRANS(vl2re8_v, 2) +GEN_LDST_WHOLE_TRANS(vl2re16_v, 2) +GEN_LDST_WHOLE_TRANS(vl2re32_v, 2) +GEN_LDST_WHOLE_TRANS(vl2re64_v, 2) +GEN_LDST_WHOLE_TRANS(vl4re8_v, 4) +GEN_LDST_WHOLE_TRANS(vl4re16_v, 4) +GEN_LDST_WHOLE_TRANS(vl4re32_v, 4) +GEN_LDST_WHOLE_TRANS(vl4re64_v, 4) +GEN_LDST_WHOLE_TRANS(vl8re8_v, 8) +GEN_LDST_WHOLE_TRANS(vl8re16_v, 8) +GEN_LDST_WHOLE_TRANS(vl8re32_v, 8) +GEN_LDST_WHOLE_TRANS(vl8re64_v, 8) /* * The vector whole register store instructions are encoded similar to * unmasked unit-stride store of elements with EEW=8. */ -GEN_LDST_WHOLE_TRANS(vs1r_v, 1, 1, true) -GEN_LDST_WHOLE_TRANS(vs2r_v, 2, 1, true) -GEN_LDST_WHOLE_TRANS(vs4r_v, 4, 1, true) -GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1, true) +GEN_LDST_WHOLE_TRANS(vs1r_v, 1) +GEN_LDST_WHOLE_TRANS(vs2r_v, 2) +GEN_LDST_WHOLE_TRANS(vs4r_v, 4) +GEN_LDST_WHOLE_TRANS(vs8r_v, 8) /* *** Vector Integer Arithmetic Instructions @@ -1160,12 +1155,12 @@ GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1, true) /* * MAXSZ returns the maximum vector size can be operated in bytes, * which is used in GVEC IR when vl_eq_vlmax flag is set to true - * to accerlate vector operation. + * to accelerate vector operation. */ static inline uint32_t MAXSZ(DisasContext *s) { - int scale = s->lmul - 3; - return s->cfg_ptr->vlen >> -scale; + int max_sz = s->cfg_ptr->vlenb * 8; + return max_sz >> (3 - s->lmul); } static bool opivv_check(DisasContext *s, arg_rmrr *a) @@ -1182,10 +1177,6 @@ static inline bool do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn, gen_helper_gvec_4_ptr *fn) { - TCGLabel *over = gen_new_label(); - - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1), @@ -1199,11 +1190,10 @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn, data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fn); + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); } - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } @@ -1235,9 +1225,6 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, TCGv_i32 desc; uint32_t data = 0; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); @@ -1248,8 +1235,8 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); data = FIELD_DP32(data, VDATA, VMA, s->vma); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2)); @@ -1257,8 +1244,7 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, fn(dest, mask, src1, src2, tcg_env, desc); - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } @@ -1283,7 +1269,7 @@ do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn, gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), src1, MAXSZ(s), MAXSZ(s)); - mark_vs_dirty(s); + finalize_rvv_inst(s); return true; } return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); @@ -1397,9 +1383,6 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, TCGv_i32 desc; uint32_t data = 0; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); @@ -1410,8 +1393,8 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); data = FIELD_DP32(data, VDATA, VMA, s->vma); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2)); @@ -1419,8 +1402,7 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, fn(dest, mask, src1, src2, tcg_env, desc); - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } @@ -1434,7 +1416,7 @@ do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn, if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s)); - mark_vs_dirty(s); + finalize_rvv_inst(s); return true; } return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode); @@ -1482,8 +1464,6 @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, { if (checkfn(s, a)) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); @@ -1492,11 +1472,10 @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } return false; @@ -1558,8 +1537,6 @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a, { if (opiwv_widen_check(s, a)) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); @@ -1568,10 +1545,9 @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a, tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fn); - mark_vs_dirty(s); - gen_set_label(over); + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); + finalize_rvv_inst(s); return true; } return false; @@ -1630,8 +1606,6 @@ static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm, gen_helper_gvec_4_ptr *fn, DisasContext *s) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); data = FIELD_DP32(data, VDATA, VM, vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); @@ -1639,10 +1613,9 @@ static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm, data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1), - vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fn); - mark_vs_dirty(s); - gen_set_label(over); + vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); + finalize_rvv_inst(s); return true; } @@ -1775,7 +1748,7 @@ do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn, gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), src1, MAXSZ(s), MAXSZ(s)); - mark_vs_dirty(s); + finalize_rvv_inst(s); return true; } return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); @@ -1821,8 +1794,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ gen_helper_##NAME##_h, \ gen_helper_##NAME##_w, \ }; \ - TCGLabel *over = gen_new_label(); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ @@ -1831,11 +1802,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew]); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -2032,16 +2002,13 @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h, gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d, }; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fns[s->sew]); - gen_set_label(over); } - mark_vs_dirty(s); + finalize_rvv_inst(s); return true; } return false; @@ -2055,8 +2022,6 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) /* vmv.v.x has rs2 = 0 and vm = 1 */ vext_check_ss(s, a->rd, 0, 1)) { TCGv s1; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); s1 = get_gpr(s, a->rs1, EXT_SIGN); @@ -2082,14 +2047,13 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) }; tcg_gen_ext_tl_i64(s1_i64, s1); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1_i64, tcg_env, desc); } - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } return false; @@ -2105,7 +2069,6 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a) if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), simm); - mark_vs_dirty(s); } else { TCGv_i32 desc; TCGv_i64 s1; @@ -2116,19 +2079,15 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a) gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h, gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d, }; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); s1 = tcg_constant_i64(simm); dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1, tcg_env, desc); - - mark_vs_dirty(s); - gen_set_label(over); } + finalize_rvv_inst(s); return true; } return false; @@ -2262,9 +2221,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ gen_helper_##NAME##_w, \ gen_helper_##NAME##_d, \ }; \ - TCGLabel *over = gen_new_label(); \ gen_set_rm(s, RISCV_FRM_DYN); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ @@ -2275,11 +2232,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew - 1]); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -2297,14 +2253,11 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, TCGv_i32 desc; TCGv_i64 t1; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); - dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2)); @@ -2316,8 +2269,7 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, fn(dest, mask, t1, src2, tcg_env, desc); - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } @@ -2380,9 +2332,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ static gen_helper_gvec_4_ptr * const fns[2] = { \ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ }; \ - TCGLabel *over = gen_new_label(); \ gen_set_rm(s, RISCV_FRM_DYN); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);\ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ @@ -2391,11 +2341,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew - 1]); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -2454,9 +2403,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ static gen_helper_gvec_4_ptr * const fns[2] = { \ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ }; \ - TCGLabel *over = gen_new_label(); \ gen_set_rm(s, RISCV_FRM_DYN); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ @@ -2465,11 +2412,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew - 1]); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -2571,9 +2517,7 @@ static bool do_opfv(DisasContext *s, arg_rmr *a, { if (checkfn(s, a)) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); gen_set_rm_chkfrm(s, rm); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); @@ -2581,10 +2525,9 @@ static bool do_opfv(DisasContext *s, arg_rmr *a, data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs2), tcg_env, - s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fn); - mark_vs_dirty(s); - gen_set_label(over); + s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); + finalize_rvv_inst(s); return true; } return false; @@ -2671,7 +2614,6 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), t1); - mark_vs_dirty(s); } else { TCGv_ptr dest; TCGv_i32 desc; @@ -2683,23 +2625,19 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d, }; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); t1 = tcg_temp_new_i64(); /* NaN-box f[rs1] */ do_nanbox(s, t1, cpu_fpr[a->rs1]); dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd)); fns[s->sew - 1](dest, t1, tcg_env, desc); - - mark_vs_dirty(s); - gen_set_label(over); } + finalize_rvv_inst(s); return true; } return false; @@ -2760,9 +2698,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ gen_helper_##HELPER##_h, \ gen_helper_##HELPER##_w, \ }; \ - TCGLabel *over = gen_new_label(); \ gen_set_rm_chkfrm(s, FRM); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ @@ -2770,11 +2706,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew - 1]); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -2811,9 +2746,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ gen_helper_##NAME##_h, \ gen_helper_##NAME##_w, \ }; \ - TCGLabel *over = gen_new_label(); \ gen_set_rm(s, RISCV_FRM_DYN); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ @@ -2821,11 +2754,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew]); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -2878,9 +2810,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ gen_helper_##HELPER##_h, \ gen_helper_##HELPER##_w, \ }; \ - TCGLabel *over = gen_new_label(); \ gen_set_rm_chkfrm(s, FRM); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ @@ -2888,11 +2818,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew - 1]); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -2927,9 +2856,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ gen_helper_##HELPER##_h, \ gen_helper_##HELPER##_w, \ }; \ - TCGLabel *over = gen_new_label(); \ gen_set_rm_chkfrm(s, FRM); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ @@ -2937,11 +2864,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew]); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -3018,8 +2944,6 @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \ vext_check_isa_ill(s)) { \ uint32_t data = 0; \ gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \ - TCGLabel *over = gen_new_label(); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ data = \ @@ -3027,10 +2951,9 @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, fn); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, fn); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -3061,8 +2984,8 @@ static bool trans_vcpop_m(DisasContext *s, arg_rmr *a) mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); dst = dest_gpr(s, a->rd); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); @@ -3090,8 +3013,8 @@ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a) mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); dst = dest_gpr(s, a->rd); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); @@ -3118,8 +3041,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ s->vstart_eq_zero) { \ uint32_t data = 0; \ gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \ - TCGLabel *over = gen_new_label(); \ - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ @@ -3128,11 +3049,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \ vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \ - tcg_env, s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, \ + tcg_env, s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, \ data, fn); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -3158,8 +3078,6 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a) require_align(a->rd, s->lmul) && s->vstart_eq_zero) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); @@ -3171,10 +3089,9 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a) }; tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs2), tcg_env, - s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fns[s->sew]); - mark_vs_dirty(s); - gen_set_label(over); + s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fns[s->sew]); + finalize_rvv_inst(s); return true; } return false; @@ -3188,8 +3105,6 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a) require_align(a->rd, s->lmul) && require_vm(a->vm, a->rd)) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); @@ -3200,11 +3115,10 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a) gen_helper_vid_v_w, gen_helper_vid_v_d, }; tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fns[s->sew]); - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } return false; @@ -3360,6 +3274,8 @@ static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a) vec_element_loadi(s, t1, a->rs2, 0, true); tcg_gen_trunc_i64_tl(dest, t1); gen_set_gpr(s, a->rd, dest); + tcg_gen_movi_tl(cpu_vstart, 0); + finalize_rvv_inst(s); return true; } return false; @@ -3386,8 +3302,9 @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a) s1 = get_gpr(s, a->rs1, EXT_NONE); tcg_gen_ext_tl_i64(t1, s1); vec_element_storei(s, a->rd, 0, t1); - mark_vs_dirty(s); gen_set_label(over); + tcg_gen_movi_tl(cpu_vstart, 0); + finalize_rvv_inst(s); return true; } return false; @@ -3414,6 +3331,8 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) } mark_fs_dirty(s); + tcg_gen_movi_tl(cpu_vstart, 0); + finalize_rvv_inst(s); return true; } return false; @@ -3439,8 +3358,10 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) do_nanbox(s, t1, cpu_fpr[a->rs1]); vec_element_storei(s, a->rd, 0, t1); - mark_vs_dirty(s); + gen_set_label(over); + tcg_gen_movi_tl(cpu_vstart, 0); + finalize_rvv_inst(s); return true; } return false; @@ -3535,8 +3456,7 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a) } if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { - int scale = s->lmul - (s->sew + 3); - int vlmax = s->cfg_ptr->vlen >> -scale; + int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul); TCGv_i64 dest = tcg_temp_new_i64(); if (a->rs1 == 0) { @@ -3547,7 +3467,7 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a) tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), dest); - mark_vs_dirty(s); + finalize_rvv_inst(s); } else { static gen_helper_opivx * const fns[4] = { gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h, @@ -3566,8 +3486,7 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a) } if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { - int scale = s->lmul - (s->sew + 3); - int vlmax = s->cfg_ptr->vlen >> -scale; + int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul); if (a->rs1 >= vlmax) { tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), 0); @@ -3576,7 +3495,7 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a) endian_ofs(s, a->rs2, a->rs1), MAXSZ(s), MAXSZ(s)); } - mark_vs_dirty(s); + finalize_rvv_inst(s); } else { static gen_helper_opivx * const fns[4] = { gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h, @@ -3613,47 +3532,40 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a) gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h, gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d, }; - TCGLabel *over = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); data = FIELD_DP32(data, VDATA, VTA, s->vta); tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fns[s->sew]); - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } return false; } /* - * Whole Vector Register Move Instructions ignore vtype and vl setting. - * Thus, we don't need to check vill bit. (Section 16.6) + * Whole Vector Register Move Instructions depend on vtype register(vsew). + * Thus, we need to check vill bit. (Section 16.6) */ #define GEN_VMV_WHOLE_TRANS(NAME, LEN) \ static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ { \ if (require_rvv(s) && \ + vext_check_isa_ill(s) && \ QEMU_IS_ALIGNED(a->rd, LEN) && \ QEMU_IS_ALIGNED(a->rs2, LEN)) { \ - uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \ + uint32_t maxsz = s->cfg_ptr->vlenb * LEN; \ if (s->vstart_eq_zero) { \ - /* EEW = 8 */ \ - tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \ + tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), \ vreg_ofs(s, a->rs2), maxsz, maxsz); \ - mark_vs_dirty(s); \ } else { \ - TCGLabel *over = gen_new_label(); \ - tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, maxsz, over); \ tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \ tcg_env, maxsz, maxsz, 0, gen_helper_vmvr_v); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ } \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -3681,8 +3593,6 @@ static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq) { uint32_t data = 0; gen_helper_gvec_3_ptr *fn; - TCGLabel *over = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); static gen_helper_gvec_3_ptr * const fns[6][4] = { { @@ -3723,11 +3633,10 @@ static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq) tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs2), tcg_env, - s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fn); + s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc index 3801c16829d..ae1f40174a1 100644 --- a/target/riscv/insn_trans/trans_rvvk.c.inc +++ b/target/riscv/insn_trans/trans_rvvk.c.inc @@ -164,8 +164,6 @@ GEN_OPIVX_GVEC_TRANS_CHECK(vandn_vx, andcs, zvkb_vx_check) gen_helper_##NAME##_w, \ gen_helper_##NAME##_d, \ }; \ - TCGLabel *over = gen_new_label(); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ @@ -174,10 +172,9 @@ GEN_OPIVX_GVEC_TRANS_CHECK(vandn_vx, andcs, zvkb_vx_check) data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, \ data, fns[s->sew]); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -249,14 +246,12 @@ GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check) TCGv_ptr rd_v, rs2_v; \ TCGv_i32 desc, egs; \ uint32_t data = 0; \ - TCGLabel *over = gen_new_label(); \ \ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \ /* save opcode for unwinding in case we throw an exception */ \ decode_save_opc(s); \ egs = tcg_constant_i32(EGS); \ gen_helper_egs_check(egs, tcg_env); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ } \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ @@ -267,12 +262,11 @@ GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check) rd_v = tcg_temp_new_ptr(); \ rs2_v = tcg_temp_new_ptr(); \ desc = tcg_constant_i32( \ - simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \ + simd_desc(s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, data)); \ tcg_gen_addi_ptr(rd_v, tcg_env, vreg_ofs(s, a->rd)); \ tcg_gen_addi_ptr(rs2_v, tcg_env, vreg_ofs(s, a->rs2)); \ gen_helper_##NAME(rd_v, rs2_v, tcg_env, desc); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -325,14 +319,12 @@ GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS) TCGv_ptr rd_v, rs2_v; \ TCGv_i32 uimm_v, desc, egs; \ uint32_t data = 0; \ - TCGLabel *over = gen_new_label(); \ \ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \ /* save opcode for unwinding in case we throw an exception */ \ decode_save_opc(s); \ egs = tcg_constant_i32(EGS); \ gen_helper_egs_check(egs, tcg_env); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ } \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ @@ -345,12 +337,11 @@ GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS) rs2_v = tcg_temp_new_ptr(); \ uimm_v = tcg_constant_i32(a->rs1); \ desc = tcg_constant_i32( \ - simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \ + simd_desc(s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, data)); \ tcg_gen_addi_ptr(rd_v, tcg_env, vreg_ofs(s, a->rd)); \ tcg_gen_addi_ptr(rs2_v, tcg_env, vreg_ofs(s, a->rs2)); \ gen_helper_##NAME(rd_v, rs2_v, uimm_v, tcg_env, desc); \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -394,7 +385,6 @@ GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS) { \ if (CHECK(s, a)) { \ uint32_t data = 0; \ - TCGLabel *over = gen_new_label(); \ TCGv_i32 egs; \ \ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \ @@ -402,7 +392,6 @@ GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS) decode_save_opc(s); \ egs = tcg_constant_i32(EGS); \ gen_helper_egs_check(egs, tcg_env); \ - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ } \ \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ @@ -413,11 +402,10 @@ GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS) \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, \ data, gen_helper_##NAME); \ \ - mark_vs_dirty(s); \ - gen_set_label(over); \ + finalize_rvv_inst(s); \ return true; \ } \ return false; \ @@ -448,7 +436,6 @@ static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a) { if (vsha_check(s, a)) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); TCGv_i32 egs; if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { @@ -456,7 +443,6 @@ static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a) decode_save_opc(s); egs = tcg_constant_i32(ZVKNH_EGS); gen_helper_egs_check(egs, tcg_env); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); } data = FIELD_DP32(data, VDATA, VM, a->vm); @@ -466,13 +452,12 @@ static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a) data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), - vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, + vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, s->sew == MO_32 ? gen_helper_vsha2cl32_vv : gen_helper_vsha2cl64_vv); - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } return false; @@ -482,7 +467,6 @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a) { if (vsha_check(s, a)) { uint32_t data = 0; - TCGLabel *over = gen_new_label(); TCGv_i32 egs; if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { @@ -490,7 +474,6 @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a) decode_save_opc(s); egs = tcg_constant_i32(ZVKNH_EGS); gen_helper_egs_check(egs, tcg_env); - tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); } data = FIELD_DP32(data, VDATA, VM, a->vm); @@ -500,13 +483,12 @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a) data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), - vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, + vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, s->sew == MO_32 ? gen_helper_vsha2ch32_vv : gen_helper_vsha2ch64_vv); - mark_vs_dirty(s); - gen_set_label(over); + finalize_rvv_inst(s); return true; } return false; diff --git a/target/riscv/insn_trans/trans_rvzacas.c.inc b/target/riscv/insn_trans/trans_rvzacas.c.inc new file mode 100644 index 00000000000..5d274d4c08b --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzacas.c.inc @@ -0,0 +1,150 @@ +/* + * RISC-V translation routines for the RV64 Zacas Standard Extension. + * + * Copyright (c) 2020-2023 PLCT Lab + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZACAS(ctx) do { \ + if (!ctx->cfg_ptr->ext_zacas) { \ + return false; \ + } \ +} while (0) + +static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop) +{ + TCGv dest = get_gpr(ctx, a->rd, EXT_NONE); + TCGv src1 = get_address(ctx, a->rs1, 0); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + decode_save_opc(ctx); + tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop); + + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_amocas_w(DisasContext *ctx, arg_amocas_w *a) +{ + REQUIRE_ZACAS(ctx); + return gen_cmpxchg(ctx, a, MO_ALIGN | MO_TESL); +} + +static TCGv_i64 get_gpr_pair(DisasContext *ctx, int reg_num) +{ + TCGv_i64 t; + + assert(get_ol(ctx) == MXL_RV32); + + if (reg_num == 0) { + return tcg_constant_i64(0); + } + + t = tcg_temp_new_i64(); + tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]); + return t; +} + +static void gen_set_gpr_pair(DisasContext *ctx, int reg_num, TCGv_i64 t) +{ + assert(get_ol(ctx) == MXL_RV32); + + if (reg_num != 0) { +#ifdef TARGET_RISCV32 + tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t); +#else + tcg_gen_ext32s_i64(cpu_gpr[reg_num], t); + tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32); +#endif + + if (get_xl_max(ctx) == MXL_RV128) { + tcg_gen_sari_tl(cpu_gprh[reg_num], cpu_gpr[reg_num], 63); + tcg_gen_sari_tl(cpu_gprh[reg_num + 1], cpu_gpr[reg_num + 1], 63); + } + } +} + +static bool gen_cmpxchg64(DisasContext *ctx, arg_atomic *a, MemOp mop) +{ + /* + * Encodings with odd numbered registers specified in rs2 and rd are + * reserved. + */ + if ((a->rs2 | a->rd) & 1) { + return false; + } + + TCGv_i64 dest = get_gpr_pair(ctx, a->rd); + TCGv src1 = get_address(ctx, a->rs1, 0); + TCGv_i64 src2 = get_gpr_pair(ctx, a->rs2); + + decode_save_opc(ctx); + tcg_gen_atomic_cmpxchg_i64(dest, src1, dest, src2, ctx->mem_idx, mop); + + gen_set_gpr_pair(ctx, a->rd, dest); + return true; +} + +static bool trans_amocas_d(DisasContext *ctx, arg_amocas_d *a) +{ + REQUIRE_ZACAS(ctx); + switch (get_ol(ctx)) { + case MXL_RV32: + return gen_cmpxchg64(ctx, a, MO_ALIGN | MO_TEUQ); + case MXL_RV64: + case MXL_RV128: + return gen_cmpxchg(ctx, a, MO_ALIGN | MO_TEUQ); + default: + g_assert_not_reached(); + } +} + +static bool trans_amocas_q(DisasContext *ctx, arg_amocas_q *a) +{ + REQUIRE_ZACAS(ctx); + REQUIRE_64BIT(ctx); + + /* + * Encodings with odd numbered registers specified in rs2 and rd are + * reserved. + */ + if ((a->rs2 | a->rd) & 1) { + return false; + } + +#ifdef TARGET_RISCV64 + TCGv_i128 dest = tcg_temp_new_i128(); + TCGv src1 = get_address(ctx, a->rs1, 0); + TCGv_i128 src2 = tcg_temp_new_i128(); + TCGv_i64 src2l = get_gpr(ctx, a->rs2, EXT_NONE); + TCGv_i64 src2h = get_gpr(ctx, a->rs2 == 0 ? 0 : a->rs2 + 1, EXT_NONE); + TCGv_i64 destl = get_gpr(ctx, a->rd, EXT_NONE); + TCGv_i64 desth = get_gpr(ctx, a->rd == 0 ? 0 : a->rd + 1, EXT_NONE); + + tcg_gen_concat_i64_i128(src2, src2l, src2h); + tcg_gen_concat_i64_i128(dest, destl, desth); + decode_save_opc(ctx); + tcg_gen_atomic_cmpxchg_i128(dest, src1, dest, src2, ctx->mem_idx, + (MO_ALIGN | MO_TEUO)); + + tcg_gen_extr_i128_i64(destl, desth, dest); + + if (a->rd != 0) { + gen_set_gpr(ctx, a->rd, destl); + gen_set_gpr(ctx, a->rd + 1, desth); + } +#endif + + return true; +} diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc index 2d992e14c4d..cd234ad9607 100644 --- a/target/riscv/insn_trans/trans_rvzce.c.inc +++ b/target/riscv/insn_trans/trans_rvzce.c.inc @@ -293,12 +293,14 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a) { REQUIRE_ZCMT(ctx); + TCGv addr = tcg_temp_new(); + /* * Update pc to current for the non-unwinding exception * that might come from cpu_ld*_code() in the helper. */ gen_update_pc(ctx, 0); - gen_helper_cm_jalt(cpu_pc, tcg_env, tcg_constant_i32(a->index)); + gen_helper_cm_jalt(addr, tcg_env, tcg_constant_i32(a->index)); /* c.jt vs c.jalt depends on the index. */ if (a->index >= 32) { @@ -307,6 +309,8 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a) gen_set_gpr(ctx, xRA, succ_pc); } + tcg_gen_mov_tl(cpu_pc, addr); + tcg_gen_lookup_and_goto_ptr(); ctx->base.is_jmp = DISAS_NORETURN; return true; diff --git a/target/riscv/insn_trans/trans_xthead.c.inc b/target/riscv/insn_trans/trans_xthead.c.inc index 810d76665a6..22488412d4d 100644 --- a/target/riscv/insn_trans/trans_xthead.c.inc +++ b/target/riscv/insn_trans/trans_xthead.c.inc @@ -296,7 +296,7 @@ NOP_PRIVCHECK(th_dcache_csw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) NOP_PRIVCHECK(th_dcache_cisw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) NOP_PRIVCHECK(th_dcache_isw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) NOP_PRIVCHECK(th_dcache_cpal1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) -NOP_PRIVCHECK(th_dcache_cval1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) +NOP_PRIVCHECK(th_dcache_cval1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU) NOP_PRIVCHECK(th_icache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) NOP_PRIVCHECK(th_icache_ialls, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) @@ -992,7 +992,6 @@ static bool trans_th_sfence_vmas(DisasContext *ctx, arg_th_sfence_vmas *a) #endif } -#ifndef CONFIG_USER_ONLY static void gen_th_sync_local(DisasContext *ctx) { /* @@ -1003,14 +1002,12 @@ static void gen_th_sync_local(DisasContext *ctx) tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; } -#endif static bool trans_th_sync(DisasContext *ctx, arg_th_sync *a) { (void) a; REQUIRE_XTHEADSYNC(ctx); -#ifndef CONFIG_USER_ONLY REQUIRE_PRIV_MSU(ctx); /* @@ -1019,9 +1016,6 @@ static bool trans_th_sync(DisasContext *ctx, arg_th_sync *a) gen_th_sync_local(ctx); return true; -#else - return false; -#endif } static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a) @@ -1029,7 +1023,6 @@ static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a) (void) a; REQUIRE_XTHEADSYNC(ctx); -#ifndef CONFIG_USER_ONLY REQUIRE_PRIV_MSU(ctx); /* @@ -1038,9 +1031,6 @@ static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a) gen_th_sync_local(ctx); return true; -#else - return false; -#endif } static bool trans_th_sync_is(DisasContext *ctx, arg_th_sync_is *a) diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 117e33cf90f..6a6c6cae80f 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -18,6 +18,7 @@ #include "qemu/osdep.h" #include +#include #include @@ -43,10 +44,13 @@ #include "kvm_riscv.h" #include "sbi_ecall_interface.h" #include "chardev/char-fe.h" -#include "migration/migration.h" +#include "migration/misc.h" #include "sysemu/runstate.h" #include "hw/riscv/numa.h" +#define PR_RISCV_V_SET_CONTROL 69 +#define PR_RISCV_V_VSTATE_CTRL_ON 2 + void riscv_kvm_aplic_request(void *opaque, int irq, int level) { kvm_set_irq(kvm_state, irq, !!level); @@ -54,7 +58,7 @@ void riscv_kvm_aplic_request(void *opaque, int irq, int level) static bool cap_has_mp_state; -static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type, +static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type, uint64_t idx) { uint64_t id = KVM_REG_RISCV | type | idx; @@ -72,18 +76,59 @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type, return id; } -#define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \ - KVM_REG_RISCV_CORE_REG(name)) +static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx) +{ + return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx; +} + +static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx) +{ + return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx; +} + +static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b) +{ + uint64_t size_ctz = __builtin_ctz(size_b); + + return id | (size_ctz << KVM_REG_SIZE_SHIFT); +} + +static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu, + uint64_t idx) +{ + uint64_t id; + size_t size_b; + + g_assert(idx < 32); + + id = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(idx); + size_b = cpu->cfg.vlenb; + + return kvm_encode_reg_size_id(id, size_b); +} + +#define RISCV_CORE_REG(env, name) \ + kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \ + KVM_REG_RISCV_CORE_REG(name)) + +#define RISCV_CSR_REG(env, name) \ + kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \ + KVM_REG_RISCV_CSR_REG(name)) -#define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \ - KVM_REG_RISCV_CSR_REG(name)) +#define RISCV_CONFIG_REG(env, name) \ + kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \ + KVM_REG_RISCV_CONFIG_REG(name)) -#define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \ +#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \ KVM_REG_RISCV_TIMER_REG(name)) -#define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx) +#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx) + +#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx) -#define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx) +#define RISCV_VECTOR_CSR_REG(env, name) \ + kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \ + KVM_REG_RISCV_VECTOR_CSR_REG(name)) #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \ do { \ @@ -101,17 +146,17 @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type, } \ } while (0) -#define KVM_RISCV_GET_TIMER(cs, env, name, reg) \ +#define KVM_RISCV_GET_TIMER(cs, name, reg) \ do { \ - int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \ + int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \ if (ret) { \ abort(); \ } \ } while (0) -#define KVM_RISCV_SET_TIMER(cs, env, name, reg) \ +#define KVM_RISCV_SET_TIMER(cs, name, reg) \ do { \ - int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \ + int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(name), ®); \ if (ret) { \ abort(); \ } \ @@ -121,7 +166,7 @@ typedef struct KVMCPUConfig { const char *name; const char *description; target_ulong offset; - int kvm_reg_id; + uint64_t kvm_reg_id; bool user_set; bool supported; } KVMCPUConfig; @@ -138,6 +183,7 @@ static KVMCPUConfig kvm_misa_ext_cfgs[] = { KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H), KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I), KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M), + KVM_MISA_CFG(RVV, KVM_RISCV_ISA_EXT_V), }; static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v, @@ -202,8 +248,8 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs) /* If we're here we're going to disable the MISA bit */ reg = 0; - id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT, - misa_cfg->kvm_reg_id); + id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + misa_cfg->kvm_reg_id); ret = kvm_set_one_reg(cs, id, ®); if (ret != 0) { /* @@ -229,13 +275,42 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = { KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM), KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ), KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR), + KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND), KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR), KVM_EXT_CFG("zifencei", ext_zifencei, KVM_RISCV_ISA_EXT_ZIFENCEI), + KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL), KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE), KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM), + KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA), + KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH), + KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN), KVM_EXT_CFG("zba", ext_zba, KVM_RISCV_ISA_EXT_ZBA), KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB), + KVM_EXT_CFG("zbc", ext_zbc, KVM_RISCV_ISA_EXT_ZBC), + KVM_EXT_CFG("zbkb", ext_zbkb, KVM_RISCV_ISA_EXT_ZBKB), + KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC), + KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX), KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS), + KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND), + KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE), + KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH), + KVM_EXT_CFG("zkr", ext_zkr, KVM_RISCV_ISA_EXT_ZKR), + KVM_EXT_CFG("zksed", ext_zksed, KVM_RISCV_ISA_EXT_ZKSED), + KVM_EXT_CFG("zksh", ext_zksh, KVM_RISCV_ISA_EXT_ZKSH), + KVM_EXT_CFG("zkt", ext_zkt, KVM_RISCV_ISA_EXT_ZKT), + KVM_EXT_CFG("zvbb", ext_zvbb, KVM_RISCV_ISA_EXT_ZVBB), + KVM_EXT_CFG("zvbc", ext_zvbc, KVM_RISCV_ISA_EXT_ZVBC), + KVM_EXT_CFG("zvfh", ext_zvfh, KVM_RISCV_ISA_EXT_ZVFH), + KVM_EXT_CFG("zvfhmin", ext_zvfhmin, KVM_RISCV_ISA_EXT_ZVFHMIN), + KVM_EXT_CFG("zvkb", ext_zvkb, KVM_RISCV_ISA_EXT_ZVKB), + KVM_EXT_CFG("zvkg", ext_zvkg, KVM_RISCV_ISA_EXT_ZVKG), + KVM_EXT_CFG("zvkned", ext_zvkned, KVM_RISCV_ISA_EXT_ZVKNED), + KVM_EXT_CFG("zvknha", ext_zvknha, KVM_RISCV_ISA_EXT_ZVKNHA), + KVM_EXT_CFG("zvknhb", ext_zvknhb, KVM_RISCV_ISA_EXT_ZVKNHB), + KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED), + KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH), + KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT), + KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN), KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA), KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC), KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL), @@ -327,29 +402,12 @@ static KVMCPUConfig kvm_cboz_blocksize = { .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) }; -static void kvm_cpu_set_cbomz_blksize(Object *obj, Visitor *v, - const char *name, - void *opaque, Error **errp) -{ - KVMCPUConfig *cbomz_cfg = opaque; - RISCVCPU *cpu = RISCV_CPU(obj); - uint16_t value, *host_val; - - if (!visit_type_uint16(v, name, &value, errp)) { - return; - } - - host_val = kvmconfig_get_cfg_addr(cpu, cbomz_cfg); - - if (value != *host_val) { - error_report("Unable to set %s to a different value than " - "the host (%u)", - cbomz_cfg->name, *host_val); - exit(EXIT_FAILURE); - } - - cbomz_cfg->user_set = true; -} +static KVMCPUConfig kvm_v_vlenb = { + .name = "vlenb", + .offset = CPU_CFG_OFFSET(vlenb), + .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_VECTOR | + KVM_REG_RISCV_VECTOR_CSR_REG(vlenb) +}; static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) { @@ -364,8 +422,8 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) continue; } - id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT, - multi_ext_cfg->kvm_reg_id); + id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + multi_ext_cfg->kvm_reg_id); reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg); ret = kvm_set_one_reg(cs, id, ®); if (ret != 0) { @@ -398,7 +456,7 @@ static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, } if (value) { - error_setg(errp, "extension %s is not available with KVM", + error_setg(errp, "'%s' is not available with KVM", propname); } } @@ -468,17 +526,14 @@ static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj) NULL, multi_cfg); } - object_property_add(cpu_obj, "cbom_blocksize", "uint16", - NULL, kvm_cpu_set_cbomz_blksize, - NULL, &kvm_cbom_blocksize); - - object_property_add(cpu_obj, "cboz_blocksize", "uint16", - NULL, kvm_cpu_set_cbomz_blksize, - NULL, &kvm_cboz_blocksize); - riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions); riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts); riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts); + + /* We don't have the needed KVM support for profiles */ + for (i = 0; riscv_profiles[i] != NULL; i++) { + riscv_cpu_add_kvm_unavail_prop(cpu_obj, riscv_profiles[i]->name); + } } static int kvm_riscv_get_regs_core(CPUState *cs) @@ -495,7 +550,7 @@ static int kvm_riscv_get_regs_core(CPUState *cs) env->pc = reg; for (i = 1; i < 32; i++) { - uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i); + uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); ret = kvm_get_one_reg(cs, id, ®); if (ret) { return ret; @@ -520,7 +575,7 @@ static int kvm_riscv_put_regs_core(CPUState *cs) } for (i = 1; i < 32; i++) { - uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i); + uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); reg = env->gpr[i]; ret = kvm_set_one_reg(cs, id, ®); if (ret) { @@ -574,7 +629,7 @@ static int kvm_riscv_get_regs_fp(CPUState *cs) if (riscv_has_ext(env, RVD)) { uint64_t reg; for (i = 0; i < 32; i++) { - ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), ®); + ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), ®); if (ret) { return ret; } @@ -586,7 +641,7 @@ static int kvm_riscv_get_regs_fp(CPUState *cs) if (riscv_has_ext(env, RVF)) { uint32_t reg; for (i = 0; i < 32; i++) { - ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), ®); + ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), ®); if (ret) { return ret; } @@ -608,7 +663,7 @@ static int kvm_riscv_put_regs_fp(CPUState *cs) uint64_t reg; for (i = 0; i < 32; i++) { reg = env->fpr[i]; - ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), ®); + ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(i), ®); if (ret) { return ret; } @@ -620,7 +675,7 @@ static int kvm_riscv_put_regs_fp(CPUState *cs) uint32_t reg; for (i = 0; i < 32; i++) { reg = env->fpr[i]; - ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), ®); + ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(i), ®); if (ret) { return ret; } @@ -639,10 +694,10 @@ static void kvm_riscv_get_regs_timer(CPUState *cs) return; } - KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time); - KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare); - KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state); - KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency); + KVM_RISCV_GET_TIMER(cs, time, env->kvm_timer_time); + KVM_RISCV_GET_TIMER(cs, compare, env->kvm_timer_compare); + KVM_RISCV_GET_TIMER(cs, state, env->kvm_timer_state); + KVM_RISCV_GET_TIMER(cs, frequency, env->kvm_timer_frequency); env->kvm_timer_dirty = true; } @@ -656,8 +711,8 @@ static void kvm_riscv_put_regs_timer(CPUState *cs) return; } - KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time); - KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare); + KVM_RISCV_SET_TIMER(cs, time, env->kvm_timer_time); + KVM_RISCV_SET_TIMER(cs, compare, env->kvm_timer_compare); /* * To set register of RISCV_TIMER_REG(state) will occur a error from KVM @@ -666,7 +721,7 @@ static void kvm_riscv_put_regs_timer(CPUState *cs) * TODO If KVM changes, adapt here. */ if (env->kvm_timer_state) { - KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state); + KVM_RISCV_SET_TIMER(cs, state, env->kvm_timer_state); } /* @@ -674,8 +729,8 @@ static void kvm_riscv_put_regs_timer(CPUState *cs) * frequency. Therefore, we should check whether they are the same here * during the migration. */ - if (migration_is_running(migrate_get_current()->state)) { - KVM_RISCV_GET_TIMER(cs, env, frequency, reg); + if (migration_is_running()) { + KVM_RISCV_GET_TIMER(cs, frequency, reg); if (reg != env->kvm_timer_frequency) { error_report("Dst Hosts timer frequency != Src Hosts"); } @@ -684,6 +739,124 @@ static void kvm_riscv_put_regs_timer(CPUState *cs) env->kvm_timer_dirty = false; } +uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs) +{ + uint64_t reg; + + KVM_RISCV_GET_TIMER(cs, frequency, reg); + + return reg; +} + +static int kvm_riscv_get_regs_vector(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + target_ulong reg; + uint64_t vreg_id; + int vreg_idx, ret = 0; + + if (!riscv_has_ext(env, RVV)) { + return 0; + } + + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®); + if (ret) { + return ret; + } + env->vstart = reg; + + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®); + if (ret) { + return ret; + } + env->vl = reg; + + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + if (ret) { + return ret; + } + env->vtype = reg; + + if (kvm_v_vlenb.supported) { + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), ®); + if (ret) { + return ret; + } + cpu->cfg.vlenb = reg; + + for (int i = 0; i < 32; i++) { + /* + * vreg[] is statically allocated using RV_VLEN_MAX. + * Use it instead of vlenb to calculate vreg_idx for + * simplicity. + */ + vreg_idx = i * RV_VLEN_MAX / 64; + vreg_id = kvm_riscv_vector_reg_id(cpu, i); + + ret = kvm_get_one_reg(cs, vreg_id, &env->vreg[vreg_idx]); + if (ret) { + return ret; + } + } + } + + return 0; +} + +static int kvm_riscv_put_regs_vector(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + target_ulong reg; + uint64_t vreg_id; + int vreg_idx, ret = 0; + + if (!riscv_has_ext(env, RVV)) { + return 0; + } + + reg = env->vstart; + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®); + if (ret) { + return ret; + } + + reg = env->vl; + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®); + if (ret) { + return ret; + } + + reg = env->vtype; + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + if (ret) { + return ret; + } + + if (kvm_v_vlenb.supported) { + reg = cpu->cfg.vlenb; + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), ®); + + for (int i = 0; i < 32; i++) { + /* + * vreg[] is statically allocated using RV_VLEN_MAX. + * Use it instead of vlenb to calculate vreg_idx for + * simplicity. + */ + vreg_idx = i * RV_VLEN_MAX / 64; + vreg_id = kvm_riscv_vector_reg_id(cpu, i); + + ret = kvm_set_one_reg(cs, vreg_id, &env->vreg[vreg_idx]); + if (ret) { + return ret; + } + } + } + + return ret; +} + typedef struct KVMScratchCPU { int kvmfd; int vmfd; @@ -746,24 +919,21 @@ static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) struct kvm_one_reg reg; int ret; - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(mvendorid)); + reg.id = RISCV_CONFIG_REG(env, mvendorid); reg.addr = (uint64_t)&cpu->cfg.mvendorid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { error_report("Unable to retrieve mvendorid from host, error %d", ret); } - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(marchid)); + reg.id = RISCV_CONFIG_REG(env, marchid); reg.addr = (uint64_t)&cpu->cfg.marchid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { error_report("Unable to retrieve marchid from host, error %d", ret); } - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(mimpid)); + reg.id = RISCV_CONFIG_REG(env, mimpid); reg.addr = (uint64_t)&cpu->cfg.mimpid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { @@ -778,8 +948,7 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu, struct kvm_one_reg reg; int ret; - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(isa)); + reg.id = RISCV_CONFIG_REG(env, isa); reg.addr = (uint64_t)&env->misa_ext_mask; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -800,8 +969,8 @@ static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, struct kvm_one_reg reg; int ret; - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - cbomz_cfg->kvm_reg_id); + reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + cbomz_cfg->kvm_reg_id); reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg); ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { @@ -822,8 +991,8 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i]; struct kvm_one_reg reg; - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT, - multi_ext_cfg->kvm_reg_id); + reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + multi_ext_cfg->kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { @@ -868,6 +1037,33 @@ static int uint64_cmp(const void *a, const void *b) return 0; } +static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, + struct kvm_reg_list *reglist) +{ + struct kvm_one_reg reg; + struct kvm_reg_list *reg_search; + uint64_t val; + int ret; + + reg_search = bsearch(&kvm_v_vlenb.kvm_reg_id, reglist->reg, reglist->n, + sizeof(uint64_t), uint64_cmp); + + if (reg_search) { + reg.id = kvm_v_vlenb.kvm_reg_id; + reg.addr = (uint64_t)&val; + + ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + error_report("Unable to read vlenb register, error code: %s", + strerrorname_np(errno)); + exit(EXIT_FAILURE); + } + + kvm_v_vlenb.supported = true; + cpu->cfg.vlenb = val; + } +} + static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) { KVMCPUConfig *multi_ext_cfg; @@ -914,8 +1110,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) { multi_ext_cfg = &kvm_multi_ext_cfgs[i]; - reg_id = kvm_riscv_reg_id(&cpu->env, KVM_REG_RISCV_ISA_EXT, - multi_ext_cfg->kvm_reg_id); + reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT, + multi_ext_cfg->kvm_reg_id); reg_search = bsearch(®_id, reglist->reg, reglist->n, sizeof(uint64_t), uint64_cmp); if (!reg_search) { @@ -942,6 +1138,10 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) if (cpu->cfg.ext_zicboz) { kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize); } + + if (riscv_has_ext(&cpu->env, RVV)) { + kvm_riscv_read_vlenb(cpu, kvmcpu, reglist); + } } static void riscv_init_kvm_registers(Object *cpu_obj) @@ -983,6 +1183,11 @@ int kvm_arch_get_registers(CPUState *cs) return ret; } + ret = kvm_riscv_get_regs_vector(cs); + if (ret) { + return ret; + } + return ret; } @@ -1023,6 +1228,11 @@ int kvm_arch_put_registers(CPUState *cs, int level) return ret; } + ret = kvm_riscv_put_regs_vector(cs); + if (ret) { + return ret; + } + if (KVM_PUT_RESET_STATE == level) { RISCVCPU *cpu = RISCV_CPU(cs); if (cs->cpu_index == 0) { @@ -1082,8 +1292,7 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) uint64_t id; int ret; - id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(mvendorid)); + id = RISCV_CONFIG_REG(env, mvendorid); /* * cfg.mvendorid is an uint32 but a target_ulong will * be written. Assign it to a target_ulong var to avoid @@ -1095,15 +1304,13 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) return ret; } - id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(marchid)); + id = RISCV_CONFIG_REG(env, marchid); ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid); if (ret != 0) { return ret; } - id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(mimpid)); + id = RISCV_CONFIG_REG(env, mimpid); ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid); return ret; @@ -1376,21 +1583,24 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, exit(1); } - socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1; - ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, - KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS, - &socket_bits, true, NULL); - if (ret < 0) { - error_report("KVM AIA: failed to set group_bits"); - exit(1); - } - ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, - KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT, - &group_shift, true, NULL); - if (ret < 0) { - error_report("KVM AIA: failed to set group_shift"); - exit(1); + if (socket_count > 1) { + socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1; + ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, + KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS, + &socket_bits, true, NULL); + if (ret < 0) { + error_report("KVM AIA: failed to set group_bits"); + exit(1); + } + + ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, + KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT, + &group_shift, true, NULL); + if (ret < 0) { + error_report("KVM AIA: failed to set group_shift"); + exit(1); + } } guest_bits = guest_num == 0 ? 0 : @@ -1464,19 +1674,116 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, static void kvm_cpu_instance_init(CPUState *cs) { Object *obj = OBJECT(RISCV_CPU(cs)); - DeviceState *dev = DEVICE(obj); riscv_init_kvm_registers(obj); kvm_riscv_add_cpu_user_properties(obj); +} - for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { - /* Check if we have a specific KVM handler for the option */ - if (object_property_find(obj, prop->name)) { - continue; +/* + * We'll get here via the following path: + * + * riscv_cpu_realize() + * -> cpu_exec_realizefn() + * -> kvm_cpu_realize() (via accel_cpu_common_realize()) + */ +static bool kvm_cpu_realize(CPUState *cs, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + int ret; + + if (riscv_has_ext(&cpu->env, RVV)) { + ret = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON); + if (ret) { + error_setg(errp, "Error in prctl PR_RISCV_V_SET_CONTROL, code: %s", + strerrorname_np(errno)); + return false; + } + } + + return true; +} + +void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) +{ + CPURISCVState *env = &cpu->env; + KVMScratchCPU kvmcpu; + struct kvm_one_reg reg; + uint64_t val; + int ret; + + /* short-circuit without spinning the scratch CPU */ + if (!cpu->cfg.ext_zicbom && !cpu->cfg.ext_zicboz && + !riscv_has_ext(env, RVV)) { + return; + } + + if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) { + error_setg(errp, "Unable to create scratch KVM cpu"); + return; + } + + if (cpu->cfg.ext_zicbom && + riscv_cpu_option_set(kvm_cbom_blocksize.name)) { + + reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + kvm_cbom_blocksize.kvm_reg_id); + reg.addr = (uint64_t)&val; + ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + error_setg(errp, "Unable to read cbom_blocksize, error %d", errno); + return; + } + + if (cpu->cfg.cbom_blocksize != val) { + error_setg(errp, "Unable to set cbom_blocksize to a different " + "value than the host (%lu)", val); + return; } - qdev_property_add_static(dev, prop); } + + if (cpu->cfg.ext_zicboz && + riscv_cpu_option_set(kvm_cboz_blocksize.name)) { + + reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + kvm_cboz_blocksize.kvm_reg_id); + reg.addr = (uint64_t)&val; + ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + error_setg(errp, "Unable to read cboz_blocksize, error %d", errno); + return; + } + + if (cpu->cfg.cboz_blocksize != val) { + error_setg(errp, "Unable to set cboz_blocksize to a different " + "value than the host (%lu)", val); + return; + } + } + + /* Users are setting vlen, not vlenb */ + if (riscv_has_ext(env, RVV) && riscv_cpu_option_set("vlen")) { + if (!kvm_v_vlenb.supported) { + error_setg(errp, "Unable to set 'vlenb': register not supported"); + return; + } + + reg.id = kvm_v_vlenb.kvm_reg_id; + reg.addr = (uint64_t)&val; + ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + error_setg(errp, "Unable to read vlenb register, error %d", errno); + return; + } + + if (cpu->cfg.vlenb != val) { + error_setg(errp, "Unable to set 'vlen' to a different " + "value than the host (%lu)", val * 8); + return; + } + } + + kvm_riscv_destroy_scratch_vcpu(&kvmcpu); } static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) @@ -1484,6 +1791,7 @@ static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); acc->cpu_instance_init = kvm_cpu_instance_init; + acc->cpu_target_realize = kvm_cpu_realize; } static const TypeInfo kvm_cpu_accel_type_info = { @@ -1499,14 +1807,14 @@ static void kvm_cpu_accel_register_types(void) } type_init(kvm_cpu_accel_register_types); -static void riscv_host_cpu_init(Object *obj) +static void riscv_host_cpu_class_init(ObjectClass *c, void *data) { - CPURISCVState *env = &RISCV_CPU(obj)->env; + RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); #if defined(TARGET_RISCV32) - env->misa_mxl_max = env->misa_mxl = MXL_RV32; + mcc->misa_mxl_max = MXL_RV32; #elif defined(TARGET_RISCV64) - env->misa_mxl_max = env->misa_mxl = MXL_RV64; + mcc->misa_mxl_max = MXL_RV64; #endif } @@ -1514,7 +1822,7 @@ static const TypeInfo riscv_kvm_cpu_type_infos[] = { { .name = TYPE_RISCV_CPU_HOST, .parent = TYPE_RISCV_CPU, - .instance_init = riscv_host_cpu_init, + .class_init = riscv_host_cpu_class_init, } }; diff --git a/target/riscv/kvm/kvm_riscv.h b/target/riscv/kvm/kvm_riscv.h index 8329cfab82c..58518988681 100644 --- a/target/riscv/kvm/kvm_riscv.h +++ b/target/riscv/kvm/kvm_riscv.h @@ -27,5 +27,7 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, uint64_t guest_num); void riscv_kvm_aplic_request(void *opaque, int irq, int level); int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state); +void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp); +uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs); #endif diff --git a/target/riscv/machine.c b/target/riscv/machine.c index fdde243e040..76f2150f78b 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -49,7 +49,7 @@ static const VMStateDescription vmstate_pmp_entry = { .name = "cpu/pmp/entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(addr_reg, pmp_entry_t), VMSTATE_UINT8(cfg_reg, pmp_entry_t), VMSTATE_END_OF_LIST() @@ -62,7 +62,7 @@ static const VMStateDescription vmstate_pmp = { .minimum_version_id = 1, .needed = pmp_needed, .post_load = pmp_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(env.pmp_state.pmp, RISCVCPU, MAX_RISCV_PMPS, 0, vmstate_pmp_entry, pmp_entry_t), VMSTATE_END_OF_LIST() @@ -79,14 +79,14 @@ static bool hyper_needed(void *opaque) static const VMStateDescription vmstate_hyper = { .name = "cpu/hyper", - .version_id = 3, - .minimum_version_id = 3, + .version_id = 4, + .minimum_version_id = 4, .needed = hyper_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.hstatus, RISCVCPU), VMSTATE_UINTTL(env.hedeleg, RISCVCPU), VMSTATE_UINT64(env.hideleg, RISCVCPU), - VMSTATE_UINTTL(env.hcounteren, RISCVCPU), + VMSTATE_UINT32(env.hcounteren, RISCVCPU), VMSTATE_UINTTL(env.htval, RISCVCPU), VMSTATE_UINTTL(env.htinst, RISCVCPU), VMSTATE_UINTTL(env.hgatp, RISCVCPU), @@ -138,7 +138,7 @@ static const VMStateDescription vmstate_vector = { .version_id = 2, .minimum_version_id = 2, .needed = vector_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64), VMSTATE_UINTTL(env.vxrm, RISCVCPU), VMSTATE_UINTTL(env.vxsat, RISCVCPU), @@ -163,7 +163,7 @@ static const VMStateDescription vmstate_pointermasking = { .version_id = 1, .minimum_version_id = 1, .needed = pointermasking_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.mmte, RISCVCPU), VMSTATE_UINTTL(env.mpmmask, RISCVCPU), VMSTATE_UINTTL(env.mpmbase, RISCVCPU), @@ -178,10 +178,9 @@ static const VMStateDescription vmstate_pointermasking = { static bool rv128_needed(void *opaque) { - RISCVCPU *cpu = opaque; - CPURISCVState *env = &cpu->env; + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(opaque); - return env->misa_mxl_max == MXL_RV128; + return mcc->misa_mxl_max == MXL_RV128; } static const VMStateDescription vmstate_rv128 = { @@ -189,7 +188,7 @@ static const VMStateDescription vmstate_rv128 = { .version_id = 1, .minimum_version_id = 1, .needed = rv128_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32), VMSTATE_UINT64(env.mscratchh, RISCVCPU), VMSTATE_UINT64(env.sscratchh, RISCVCPU), @@ -218,7 +217,7 @@ static const VMStateDescription vmstate_kvmtimer = { .minimum_version_id = 1, .needed = kvmtimer_needed, .post_load = cpu_kvmtimer_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU), VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU), VMSTATE_UINT64(env.kvm_timer_state, RISCVCPU), @@ -252,7 +251,7 @@ static const VMStateDescription vmstate_debug = { .minimum_version_id = 2, .needed = debug_needed, .post_load = debug_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.trigger_cur, RISCVCPU), VMSTATE_UINTTL_ARRAY(env.tdata1, RISCVCPU, RV_MAX_TRIGGERS), VMSTATE_UINTTL_ARRAY(env.tdata2, RISCVCPU, RV_MAX_TRIGGERS), @@ -283,7 +282,7 @@ static const VMStateDescription vmstate_smstateen = { .version_id = 1, .minimum_version_id = 1, .needed = smstateen_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.mstateen, RISCVCPU, 4), VMSTATE_UINT64_ARRAY(env.hstateen, RISCVCPU, 4), VMSTATE_UINT64_ARRAY(env.sstateen, RISCVCPU, 4), @@ -304,7 +303,7 @@ static const VMStateDescription vmstate_envcfg = { .version_id = 1, .minimum_version_id = 1, .needed = envcfg_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.menvcfg, RISCVCPU), VMSTATE_UINTTL(env.senvcfg, RISCVCPU), VMSTATE_UINT64(env.henvcfg, RISCVCPU), @@ -324,7 +323,7 @@ static const VMStateDescription vmstate_pmu_ctr_state = { .version_id = 1, .minimum_version_id = 1, .needed = pmu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState), VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState), VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState), @@ -346,7 +345,7 @@ static const VMStateDescription vmstate_jvt = { .version_id = 1, .minimum_version_id = 1, .needed = jvt_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.jvt, RISCVCPU), VMSTATE_END_OF_LIST() } @@ -354,10 +353,10 @@ static const VMStateDescription vmstate_jvt = { const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", - .version_id = 9, - .minimum_version_id = 9, + .version_id = 10, + .minimum_version_id = 10, .post_load = riscv_cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32), VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64), @@ -372,7 +371,7 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINTTL(env.vext_ver, RISCVCPU), VMSTATE_UINT32(env.misa_mxl, RISCVCPU), VMSTATE_UINT32(env.misa_ext, RISCVCPU), - VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU), + VMSTATE_UNUSED(4), VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU), VMSTATE_UINTTL(env.priv, RISCVCPU), VMSTATE_BOOL(env.virt_enabled, RISCVCPU), @@ -398,9 +397,9 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINTTL(env.mtval, RISCVCPU), VMSTATE_UINTTL(env.miselect, RISCVCPU), VMSTATE_UINTTL(env.siselect, RISCVCPU), - VMSTATE_UINTTL(env.scounteren, RISCVCPU), - VMSTATE_UINTTL(env.mcounteren, RISCVCPU), - VMSTATE_UINTTL(env.mcountinhibit, RISCVCPU), + VMSTATE_UINT32(env.scounteren, RISCVCPU), + VMSTATE_UINT32(env.mcounteren, RISCVCPU), + VMSTATE_UINT32(env.mcountinhibit, RISCVCPU), VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0, vmstate_pmu_ctr_state, PMUCTRState), VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS), @@ -411,7 +410,7 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_pmp, &vmstate_hyper, &vmstate_vector, diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 5355225d56c..f414aaebdba 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -157,7 +157,7 @@ void helper_cbo_zero(CPURISCVState *env, target_ulong address) { RISCVCPU *cpu = env_archcpu(env); uint16_t cbozlen = cpu->cfg.cboz_blocksize; - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = riscv_env_mmu_index(env, false); uintptr_t ra = GETPC(); void *mem; @@ -205,7 +205,7 @@ static void check_zicbom_access(CPURISCVState *env, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = riscv_env_mmu_index(env, false); uint16_t cbomlen = cpu->cfg.cbom_blocksize; void *phost; int ret; diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 162e88a90a6..2a76b611a00 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -126,7 +126,7 @@ static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) /* If !mseccfg.MML then ignore writes with encoding RW=01 */ if ((val & PMP_WRITE) && !(val & PMP_READ) && !MSECCFG_MML_ISSET(env)) { - val &= ~(PMP_WRITE | PMP_READ); + return false; } env->pmp_state.pmp[pmp_index].cfg_reg = val; pmp_update_rule_addr(env, pmp_index); @@ -150,8 +150,7 @@ void pmp_unlock_entries(CPURISCVState *env) } } -static void pmp_decode_napot(target_ulong a, target_ulong *sa, - target_ulong *ea) +static void pmp_decode_napot(hwaddr a, hwaddr *sa, hwaddr *ea) { /* * aaaa...aaa0 8-byte NAPOT range @@ -173,8 +172,8 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; target_ulong prev_addr = 0u; - target_ulong sa = 0u; - target_ulong ea = 0u; + hwaddr sa = 0u; + hwaddr ea = 0u; if (pmp_index >= 1u) { prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; @@ -227,8 +226,7 @@ void pmp_update_rule_nums(CPURISCVState *env) } } -static int pmp_is_in_range(CPURISCVState *env, int pmp_index, - target_ulong addr) +static int pmp_is_in_range(CPURISCVState *env, int pmp_index, hwaddr addr) { int result = 0; @@ -305,14 +303,14 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs, * Return true if a pmp rule match or default match * Return false if no match */ -bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, +bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, target_ulong mode) { int i = 0; int pmp_size = 0; - target_ulong s = 0; - target_ulong e = 0; + hwaddr s = 0; + hwaddr e = 0; /* Short cut if no rules */ if (0 == pmp_get_num_rules(env)) { @@ -624,12 +622,12 @@ target_ulong mseccfg_csr_read(CPURISCVState *env) * To avoid this we return a size of 1 (which means no caching) if the PMP * region only covers partial of the TLB page. */ -target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr) +target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr) { - target_ulong pmp_sa; - target_ulong pmp_ea; - target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); - target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; + hwaddr pmp_sa; + hwaddr pmp_ea; + hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); + hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; int i; /* diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index 9af8614cd4f..f5c10ce85c9 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -53,8 +53,8 @@ typedef struct { } pmp_entry_t; typedef struct { - target_ulong sa; - target_ulong ea; + hwaddr sa; + hwaddr ea; } pmp_addr_t; typedef struct { @@ -73,11 +73,11 @@ target_ulong mseccfg_csr_read(CPURISCVState *env); void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong val); target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); -bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, +bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, target_ulong mode); -target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr); +target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr); void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index); void pmp_update_rule_nums(CPURISCVState *env); uint32_t pmp_get_num_rules(CPURISCVState *env); diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h index 505fc850d38..7c0ad661e05 100644 --- a/target/riscv/pmu.h +++ b/target/riscv/pmu.h @@ -16,6 +16,9 @@ * this program. If not, see . */ +#ifndef RISCV_PMU_H +#define RISCV_PMU_H + #include "cpu.h" #include "qapi/error.h" @@ -31,3 +34,5 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx); void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name); int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx); + +#endif /* RISCV_PMU_H */ diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c index 2f2dbae7c85..d363dc318d9 100644 --- a/target/riscv/riscv-qmp-cmds.c +++ b/target/riscv/riscv-qmp-cmds.c @@ -26,8 +26,8 @@ #include "qapi/error.h" #include "qapi/qapi-commands-machine-target.h" +#include "qapi/qmp/qbool.h" #include "qapi/qmp/qdict.h" -#include "qapi/qmp/qerror.h" #include "qapi/qobject-input-visitor.h" #include "qapi/visitor.h" #include "qom/qom-qobject.h" @@ -44,8 +44,7 @@ static void riscv_cpu_add_definition(gpointer data, gpointer user_data) const char *typename = object_class_get_name(oc); ObjectClass *dyn_class; - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_RISCV_CPU)); + info->name = cpu_model_from_type(typename); info->q_typename = g_strdup(typename); dyn_class = object_class_dynamic_cast(oc, TYPE_RISCV_DYNAMIC_CPU); @@ -99,19 +98,50 @@ static void riscv_obj_add_multiext_props(Object *obj, QDict *qdict_out, } } +static void riscv_obj_add_named_feats_qdict(Object *obj, QDict *qdict_out) +{ + const RISCVCPUMultiExtConfig *named_cfg; + RISCVCPU *cpu = RISCV_CPU(obj); + QObject *value; + bool flag_val; + + for (int i = 0; riscv_cpu_named_features[i].name != NULL; i++) { + named_cfg = &riscv_cpu_named_features[i]; + flag_val = isa_ext_is_enabled(cpu, named_cfg->offset); + value = QOBJECT(qbool_from_bool(flag_val)); + + qdict_put_obj(qdict_out, named_cfg->name, value); + } +} + +static void riscv_obj_add_profiles_qdict(Object *obj, QDict *qdict_out) +{ + RISCVCPUProfile *profile; + QObject *value; + + for (int i = 0; riscv_profiles[i] != NULL; i++) { + profile = riscv_profiles[i]; + value = QOBJECT(qbool_from_bool(profile->enabled)); + + qdict_put_obj(qdict_out, profile->name, value); + } +} + static void riscv_cpuobj_validate_qdict_in(Object *obj, QObject *props, - const QDict *qdict_in, + const char *props_arg_name, Error **errp) { + const QDict *qdict_in; const QDictEntry *qe; Visitor *visitor; Error *local_err = NULL; visitor = qobject_input_visitor_new(props); - if (!visit_start_struct(visitor, NULL, NULL, 0, &local_err)) { + if (!visit_start_struct(visitor, props_arg_name, NULL, 0, &local_err)) { goto err; } + qdict_in = qobject_to(QDict, props); for (qe = qdict_first(qdict_in); qe; qe = qdict_next(qdict_in, qe)) { object_property_find_err(obj, qe->key, &local_err); if (local_err) { @@ -129,11 +159,6 @@ static void riscv_cpuobj_validate_qdict_in(Object *obj, QObject *props, goto err; } - riscv_cpu_finalize_features(RISCV_CPU(obj), &local_err); - if (local_err) { - goto err; - } - visit_end_struct(visitor, NULL); err: @@ -146,7 +171,6 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, Error **errp) { CpuModelExpansionInfo *expansion_info; - const QDict *qdict_in = NULL; QDict *qdict_out; ObjectClass *oc; Object *obj; @@ -164,14 +188,6 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, return NULL; } - if (model->props) { - qdict_in = qobject_to(QDict, model->props); - if (!qdict_in) { - error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict"); - return NULL; - } - } - obj = object_new(object_class_get_name(oc)); riscv_check_if_cpu_available(RISCV_CPU(obj), &local_err); @@ -181,8 +197,8 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, return NULL; } - if (qdict_in) { - riscv_cpuobj_validate_qdict_in(obj, model->props, qdict_in, + if (model->props) { + riscv_cpuobj_validate_qdict_in(obj, model->props, "model.props", &local_err); if (local_err) { error_propagate(errp, local_err); @@ -191,6 +207,13 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, } } + riscv_cpu_finalize_features(RISCV_CPU(obj), &local_err); + if (local_err) { + error_propagate(errp, local_err); + object_unref(obj); + return NULL; + } + expansion_info = g_new0(CpuModelExpansionInfo, 1); expansion_info->model = g_malloc0(sizeof(*expansion_info->model)); expansion_info->model->name = g_strdup(model->name); @@ -200,6 +223,8 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_extensions); riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_experimental_exts); riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_vendor_exts); + riscv_obj_add_named_feats_qdict(obj, qdict_out); + riscv_obj_add_profiles_qdict(obj, qdict_out); /* Add our CPU boolean options too */ riscv_obj_add_qdict_prop(obj, qdict_out, "mmu"); diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index 8a35683a345..b5b95e052d2 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -34,6 +34,7 @@ /* Hash that stores user set extensions */ static GHashTable *multi_ext_user_opts; +static GHashTable *misa_ext_user_opts; static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) { @@ -41,6 +42,52 @@ static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) GUINT_TO_POINTER(ext_offset)); } +static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) +{ + return g_hash_table_contains(misa_ext_user_opts, + GUINT_TO_POINTER(misa_bit)); +} + +static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) +{ + g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), + (gpointer)value); +} + +static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) +{ + g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), + (gpointer)value); +} + +static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, + bool enabled) +{ + CPURISCVState *env = &cpu->env; + + if (enabled) { + env->misa_ext |= bit; + env->misa_ext_mask |= bit; + } else { + env->misa_ext &= ~bit; + env->misa_ext_mask &= ~bit; + } +} + +static const char *cpu_priv_ver_to_str(int priv_ver) +{ + switch (priv_ver) { + case PRIV_VERSION_1_10_0: + return "v1.10.0"; + case PRIV_VERSION_1_11_0: + return "v1.11.0"; + case PRIV_VERSION_1_12_0: + return "v1.12.0"; + } + + g_assert_not_reached(); +} + static void riscv_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -82,7 +129,7 @@ static void riscv_restore_state_to_opc(CPUState *cs, env->bins = data[1]; } -static const struct TCGCPUOps riscv_tcg_ops = { +static const TCGCPUOps riscv_tcg_ops = { .initialize = riscv_translate_init, .synchronize_from_tb = riscv_cpu_synchronize_from_tb, .restore_state_to_opc = riscv_restore_state_to_opc, @@ -114,6 +161,72 @@ static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) g_assert_not_reached(); } +static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) +{ + const RISCVCPUMultiExtConfig *feat; + const RISCVIsaExtData *edata; + + for (edata = isa_edata_arr; edata->name != NULL; edata++) { + if (edata->ext_enable_offset == ext_offset) { + return edata->name; + } + } + + for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { + if (feat->offset == ext_offset) { + return feat->name; + } + } + + g_assert_not_reached(); +} + +static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) +{ + const RISCVCPUMultiExtConfig *feat; + + for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { + if (feat->offset == ext_offset) { + return true; + } + } + + return false; +} + +static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) +{ + /* + * All other named features are already enabled + * in riscv_tcg_cpu_instance_init(). + */ + if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) { + cpu->cfg.cbom_blocksize = 64; + cpu->cfg.cbop_blocksize = 64; + cpu->cfg.cboz_blocksize = 64; + } +} + +static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, + uint32_t ext_offset) +{ + int ext_priv_ver; + + if (env->priv_ver == PRIV_VERSION_LATEST) { + return; + } + + ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); + + if (env->priv_ver < ext_priv_ver) { + /* + * Note: the 'priv_spec' command line option, if present, + * will take precedence over this priv_ver bump. + */ + env->priv_ver = ext_priv_ver; + } +} + static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, bool value) { @@ -148,97 +261,24 @@ static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) } } -static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) -{ - RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); - CPUClass *cc = CPU_CLASS(mcc); - CPURISCVState *env = &cpu->env; - - /* Validate that MISA_MXL is set properly. */ - switch (env->misa_mxl_max) { -#ifdef TARGET_RISCV64 - case MXL_RV64: - case MXL_RV128: - cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; - break; -#endif - case MXL_RV32: - cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; - break; - default: - g_assert_not_reached(); - } - - if (env->misa_mxl_max != env->misa_mxl) { - error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); - return; - } -} - -static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) -{ - CPURISCVState *env = &cpu->env; - int priv_version = -1; - - if (cpu->cfg.priv_spec) { - if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { - priv_version = PRIV_VERSION_1_12_0; - } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { - priv_version = PRIV_VERSION_1_11_0; - } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { - priv_version = PRIV_VERSION_1_10_0; - } else { - error_setg(errp, - "Unsupported privilege spec version '%s'", - cpu->cfg.priv_spec); - return; - } - - env->priv_ver = priv_version; - } -} - static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, Error **errp) { - if (!is_power_of_2(cfg->vlen)) { - error_setg(errp, "Vector extension VLEN must be power of 2"); - return; - } + uint32_t vlen = cfg->vlenb << 3; - if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { + if (vlen > RV_VLEN_MAX || vlen < 128) { error_setg(errp, "Vector extension implementation only supports VLEN " "in the range [128, %d]", RV_VLEN_MAX); return; } - if (!is_power_of_2(cfg->elen)) { - error_setg(errp, "Vector extension ELEN must be power of 2"); - return; - } - if (cfg->elen > 64 || cfg->elen < 8) { error_setg(errp, "Vector extension implementation only supports ELEN " "in the range [8, 64]"); return; } - - if (cfg->vext_spec) { - if (!g_strcmp0(cfg->vext_spec, "v1.0")) { - env->vext_ver = VEXT_VERSION_1_00_0; - } else { - error_setg(errp, "Unsupported vector spec version '%s'", - cfg->vext_spec); - return; - } - } else if (env->vext_ver == 0) { - qemu_log("vector version is not specified, " - "use the default value v1.0\n"); - - env->vext_ver = VEXT_VERSION_1_00_0; - } } static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) @@ -273,40 +313,108 @@ static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) } } +static void riscv_cpu_update_named_features(RISCVCPU *cpu) +{ + if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) { + cpu->cfg.has_priv_1_11 = true; + } + + if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) { + cpu->cfg.has_priv_1_12 = true; + } + + /* zic64b is 1.12 or later */ + cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && + cpu->cfg.cbop_blocksize == 64 && + cpu->cfg.cboz_blocksize == 64 && + cpu->cfg.has_priv_1_12; +} + +static void riscv_cpu_validate_g(RISCVCPU *cpu) +{ + const char *warn_msg = "RVG mandates disabled extension %s"; + uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; + bool send_warn = cpu_misa_ext_is_user_set(RVG); + + for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { + uint32_t bit = g_misa_bits[i]; + + if (riscv_has_ext(&cpu->env, bit)) { + continue; + } + + if (!cpu_misa_ext_is_user_set(bit)) { + riscv_cpu_write_misa_bit(cpu, bit, true); + continue; + } + + if (send_warn) { + warn_report(warn_msg, riscv_get_misa_ext_name(bit)); + } + } + + if (!cpu->cfg.ext_zicsr) { + if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { + cpu->cfg.ext_zicsr = true; + } else if (send_warn) { + warn_report(warn_msg, "zicsr"); + } + } + + if (!cpu->cfg.ext_zifencei) { + if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { + cpu->cfg.ext_zifencei = true; + } else if (send_warn) { + warn_report(warn_msg, "zifencei"); + } + } +} + +static void riscv_cpu_validate_b(RISCVCPU *cpu) +{ + const char *warn_msg = "RVB mandates disabled extension %s"; + + if (!cpu->cfg.ext_zba) { + if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { + cpu->cfg.ext_zba = true; + } else { + warn_report(warn_msg, "zba"); + } + } + + if (!cpu->cfg.ext_zbb) { + if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { + cpu->cfg.ext_zbb = true; + } else { + warn_report(warn_msg, "zbb"); + } + } + + if (!cpu->cfg.ext_zbs) { + if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { + cpu->cfg.ext_zbs = true; + } else { + warn_report(warn_msg, "zbs"); + } + } +} + /* * Check consistency between chosen extensions while setting * cpu->cfg accordingly. */ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); CPURISCVState *env = &cpu->env; Error *local_err = NULL; - /* Do some ISA extension error checking */ - if (riscv_has_ext(env, RVG) && - !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && - riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && - riscv_has_ext(env, RVD) && - cpu->cfg.ext_zicsr && cpu->cfg.ext_zifencei)) { - - if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr)) && - !cpu->cfg.ext_zicsr) { - error_setg(errp, "RVG requires Zicsr but user set Zicsr to false"); - return; - } - - if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei)) && - !cpu->cfg.ext_zifencei) { - error_setg(errp, "RVG requires Zifencei but user set " - "Zifencei to false"); - return; - } - - cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zicsr), true); - cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zifencei), true); + if (riscv_has_ext(env, RVG)) { + riscv_cpu_validate_g(cpu); + } - env->misa_ext |= RVI | RVM | RVA | RVF | RVD; - env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; + if (riscv_has_ext(env, RVB)) { + riscv_cpu_validate_b(cpu); } if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { @@ -343,6 +451,11 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } + if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { + error_setg(errp, "Zacas extension requires A extension"); + return; + } + if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { error_setg(errp, "Zawrs extension requires A extension"); return; @@ -417,11 +530,6 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { - error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); - return; - } - if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); return; @@ -459,7 +567,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); - if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { + if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); } } @@ -467,7 +575,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); - if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { + if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); } if (riscv_has_ext(env, RVD)) { @@ -475,7 +583,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) } } - if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { + if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { error_setg(errp, "Zcf extension is only relevant to RV32"); return; } @@ -620,16 +728,110 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) riscv_cpu_disable_priv_spec_isa_exts(cpu); } -void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) +#ifndef CONFIG_USER_ONLY +static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, + RISCVCPUProfile *profile, + bool send_warn) +{ + int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); + + if (profile->satp_mode > satp_max) { + if (send_warn) { + bool is_32bit = riscv_cpu_is_32bit(cpu); + const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); + const char *cur_satp = satp_mode_str(satp_max, is_32bit); + + warn_report("Profile %s requires satp mode %s, " + "but satp mode %s was set", profile->name, + req_satp, cur_satp); + } + + return false; + } + + return true; +} +#endif + +static void riscv_cpu_validate_profile(RISCVCPU *cpu, + RISCVCPUProfile *profile) { CPURISCVState *env = &cpu->env; - Error *local_err = NULL; + const char *warn_msg = "Profile %s mandates disabled extension %s"; + bool send_warn = profile->user_set && profile->enabled; + bool parent_enabled, profile_impl = true; + int i; - riscv_cpu_validate_priv_spec(cpu, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return; +#ifndef CONFIG_USER_ONLY + if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { + profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, + send_warn); } +#endif + + if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && + profile->priv_spec != env->priv_ver) { + profile_impl = false; + + if (send_warn) { + warn_report("Profile %s requires priv spec %s, " + "but priv ver %s was set", profile->name, + cpu_priv_ver_to_str(profile->priv_spec), + cpu_priv_ver_to_str(env->priv_ver)); + } + } + + for (i = 0; misa_bits[i] != 0; i++) { + uint32_t bit = misa_bits[i]; + + if (!(profile->misa_ext & bit)) { + continue; + } + + if (!riscv_has_ext(&cpu->env, bit)) { + profile_impl = false; + + if (send_warn) { + warn_report(warn_msg, profile->name, + riscv_get_misa_ext_name(bit)); + } + } + } + + for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { + int ext_offset = profile->ext_offsets[i]; + + if (!isa_ext_is_enabled(cpu, ext_offset)) { + profile_impl = false; + + if (send_warn) { + warn_report(warn_msg, profile->name, + cpu_cfg_ext_get_name(ext_offset)); + } + } + } + + profile->enabled = profile_impl; + + if (profile->parent != NULL) { + parent_enabled = object_property_get_bool(OBJECT(cpu), + profile->parent->name, + NULL); + profile->enabled = profile->enabled && parent_enabled; + } +} + +static void riscv_cpu_validate_profiles(RISCVCPU *cpu) +{ + for (int i = 0; riscv_profiles[i] != NULL; i++) { + riscv_cpu_validate_profile(cpu, riscv_profiles[i]); + } +} + +void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) +{ + CPURISCVState *env = &cpu->env; + Error *local_err = NULL; riscv_cpu_validate_misa_priv(env, &local_err); if (local_err != NULL) { @@ -637,6 +839,9 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) return; } + riscv_cpu_update_named_features(cpu); + riscv_cpu_validate_profiles(cpu); + if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { /* * Enhanced PMP should only be available @@ -670,10 +875,9 @@ static bool riscv_cpu_is_generic(Object *cpu_obj) * -> cpu_exec_realizefn() * -> tcg_cpu_realize() (via accel_cpu_common_realize()) */ -static bool tcg_cpu_realize(CPUState *cs, Error **errp) +static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) { RISCVCPU *cpu = RISCV_CPU(cs); - Error *local_err = NULL; if (!riscv_cpu_tcg_compatible(cpu)) { g_autofree char *name = riscv_cpu_get_name(cpu); @@ -682,14 +886,9 @@ static bool tcg_cpu_realize(CPUState *cs, Error **errp) return false; } - riscv_cpu_validate_misa_mxl(cpu, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return false; - } - #ifndef CONFIG_USER_ONLY CPURISCVState *env = &cpu->env; + Error *local_err = NULL; CPU(cs)->tcg_cflags |= CF_PCREL; @@ -731,13 +930,15 @@ static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, target_ulong misa_bit = misa_ext_cfg->misa_bit; RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; - bool generic_cpu = riscv_cpu_is_generic(obj); + bool vendor_cpu = riscv_cpu_is_vendor(obj); bool prev_val, value; if (!visit_type_bool(v, name, &value, errp)) { return; } + cpu_misa_ext_add_user_opt(misa_bit, value); + prev_val = env->misa_ext & misa_bit; if (value == prev_val) { @@ -745,19 +946,23 @@ static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, } if (value) { - if (!generic_cpu) { + if (vendor_cpu) { g_autofree char *cpuname = riscv_cpu_get_name(cpu); error_setg(errp, "'%s' CPU does not allow enabling extensions", cpuname); return; } - env->misa_ext |= misa_bit; - env->misa_ext_mask |= misa_bit; - } else { - env->misa_ext &= ~misa_bit; - env->misa_ext_mask &= ~misa_bit; + if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { + /* + * Note: the 'priv_spec' command line option, if present, + * will take precedence over this priv_ver bump. + */ + env->priv_ver = PRIV_VERSION_1_12_0; + } } + + riscv_cpu_write_misa_bit(cpu, misa_bit, value); } static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, @@ -791,6 +996,7 @@ static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { MISA_CFG(RVJ, false), MISA_CFG(RVV, false), MISA_CFG(RVG, false), + MISA_CFG(RVB, false), }; /* @@ -821,7 +1027,117 @@ static void riscv_cpu_add_misa_properties(Object *cpu_obj) NULL, (void *)misa_cfg); object_property_set_description(cpu_obj, name, desc); if (use_def_vals) { - object_property_set_bool(cpu_obj, name, misa_cfg->enabled, NULL); + riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, + misa_cfg->enabled); + } + } +} + +static void cpu_set_profile(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPUProfile *profile = opaque; + RISCVCPU *cpu = RISCV_CPU(obj); + bool value; + int i, ext_offset; + + if (riscv_cpu_is_vendor(obj)) { + error_setg(errp, "Profile %s is not available for vendor CPUs", + profile->name); + return; + } + + if (cpu->env.misa_mxl != MXL_RV64) { + error_setg(errp, "Profile %s only available for 64 bit CPUs", + profile->name); + return; + } + + if (!visit_type_bool(v, name, &value, errp)) { + return; + } + + profile->user_set = true; + profile->enabled = value; + + if (profile->parent != NULL) { + object_property_set_bool(obj, profile->parent->name, + profile->enabled, NULL); + } + + if (profile->enabled) { + cpu->env.priv_ver = profile->priv_spec; + } + +#ifndef CONFIG_USER_ONLY + if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { + object_property_set_bool(obj, "mmu", true, NULL); + const char *satp_prop = satp_mode_str(profile->satp_mode, + riscv_cpu_is_32bit(cpu)); + object_property_set_bool(obj, satp_prop, profile->enabled, NULL); + } +#endif + + for (i = 0; misa_bits[i] != 0; i++) { + uint32_t bit = misa_bits[i]; + + if (!(profile->misa_ext & bit)) { + continue; + } + + if (bit == RVI && !profile->enabled) { + /* + * Disabling profiles will not disable the base + * ISA RV64I. + */ + continue; + } + + cpu_misa_ext_add_user_opt(bit, profile->enabled); + riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); + } + + for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { + ext_offset = profile->ext_offsets[i]; + + if (profile->enabled) { + if (cpu_cfg_offset_is_named_feat(ext_offset)) { + riscv_cpu_enable_named_feat(cpu, ext_offset); + } + + cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); + } + + cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); + isa_ext_update_enabled(cpu, ext_offset, profile->enabled); + } +} + +static void cpu_get_profile(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPUProfile *profile = opaque; + bool value = profile->enabled; + + visit_type_bool(v, name, &value, errp); +} + +static void riscv_cpu_add_profiles(Object *cpu_obj) +{ + for (int i = 0; riscv_profiles[i] != NULL; i++) { + const RISCVCPUProfile *profile = riscv_profiles[i]; + + object_property_add(cpu_obj, profile->name, "bool", + cpu_get_profile, cpu_set_profile, + NULL, (void *)profile); + + /* + * CPUs might enable a profile right from the start. + * Enable its mandatory extensions right away in this + * case. + */ + if (profile->enabled) { + object_property_set_bool(cpu_obj, profile->name, true, NULL); } } } @@ -850,7 +1166,7 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, { const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; RISCVCPU *cpu = RISCV_CPU(obj); - bool generic_cpu = riscv_cpu_is_generic(obj); + bool vendor_cpu = riscv_cpu_is_vendor(obj); bool prev_val, value; if (!visit_type_bool(v, name, &value, errp)) { @@ -864,9 +1180,7 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, multi_ext_cfg->name, lower); } - g_hash_table_insert(multi_ext_user_opts, - GUINT_TO_POINTER(multi_ext_cfg->offset), - (gpointer)value); + cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); @@ -874,13 +1188,17 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, return; } - if (value && !generic_cpu) { + if (value && vendor_cpu) { g_autofree char *cpuname = riscv_cpu_get_name(cpu); error_setg(errp, "'%s' CPU does not allow enabling extensions", cpuname); return; } + if (value) { + cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); + } + isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); } @@ -949,9 +1267,7 @@ static void riscv_cpu_add_user_properties(Object *obj) riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); - for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { - qdev_property_add_static(DEVICE(obj), prop); - } + riscv_cpu_add_profiles(obj); } /* @@ -965,12 +1281,18 @@ static void riscv_init_max_cpu_extensions(Object *obj) const RISCVCPUMultiExtConfig *prop; /* Enable RVG, RVJ and RVV that are disabled by default */ - riscv_cpu_set_misa(env, env->misa_mxl, env->misa_ext | RVG | RVJ | RVV); + riscv_cpu_set_misa_ext(env, env->misa_ext | RVG | RVJ | RVV); for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { isa_ext_update_enabled(cpu, prop->offset, true); } + /* + * Some extensions can't be added without backward compatibilty concerns. + * Disable those, the user can still opt in to them on the command line. + */ + cpu->cfg.ext_svade = false; + /* set vector version */ env->vext_ver = VEXT_VERSION_1_00_0; @@ -994,11 +1316,12 @@ static bool riscv_cpu_has_max_extensions(Object *cpu_obj) return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; } -static void tcg_cpu_instance_init(CPUState *cs) +static void riscv_tcg_cpu_instance_init(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); Object *obj = OBJECT(cpu); + misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); riscv_cpu_add_user_properties(obj); @@ -1007,7 +1330,7 @@ static void tcg_cpu_instance_init(CPUState *cs) } } -static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) +static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) { /* * All cpus use the same set of operations. @@ -1015,30 +1338,30 @@ static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) cc->tcg_ops = &riscv_tcg_ops; } -static void tcg_cpu_class_init(CPUClass *cc) +static void riscv_tcg_cpu_class_init(CPUClass *cc) { - cc->init_accel_cpu = tcg_cpu_init_ops; + cc->init_accel_cpu = riscv_tcg_cpu_init_ops; } -static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) +static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); - acc->cpu_class_init = tcg_cpu_class_init; - acc->cpu_instance_init = tcg_cpu_instance_init; - acc->cpu_target_realize = tcg_cpu_realize; + acc->cpu_class_init = riscv_tcg_cpu_class_init; + acc->cpu_instance_init = riscv_tcg_cpu_instance_init; + acc->cpu_target_realize = riscv_tcg_cpu_realize; } -static const TypeInfo tcg_cpu_accel_type_info = { +static const TypeInfo riscv_tcg_cpu_accel_type_info = { .name = ACCEL_CPU_NAME("tcg"), .parent = TYPE_ACCEL_CPU, - .class_init = tcg_cpu_accel_class_init, + .class_init = riscv_tcg_cpu_accel_class_init, .abstract = true, }; -static void tcg_cpu_accel_register_types(void) +static void riscv_tcg_cpu_accel_register_types(void) { - type_register_static(&tcg_cpu_accel_type_info); + type_register_static(&riscv_tcg_cpu_accel_type_info); } -type_init(tcg_cpu_accel_register_types); +type_init(riscv_tcg_cpu_accel_register_types); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index f0be79bb160..9ff09ebdb6f 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -109,12 +109,13 @@ typedef struct DisasContext { /* PointerMasking extension */ bool pm_mask_enabled; bool pm_base_enabled; + /* Ztso */ + bool ztso; /* Use icount trigger for native debug */ bool itrigger; /* FRM is known to contain a valid value. */ bool frm_valid; - /* TCG of the current insn_start */ - TCGOp *insn_start; + bool insn_start_updated; } DisasContext; static inline bool has_ext(DisasContext *ctx, uint32_t ext) @@ -205,9 +206,9 @@ static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in) static void decode_save_opc(DisasContext *ctx) { - assert(ctx->insn_start != NULL); - tcg_set_insn_start_param(ctx->insn_start, 1, ctx->opcode); - ctx->insn_start = NULL; + assert(!ctx->insn_start_updated); + ctx->insn_start_updated = true; + tcg_set_insn_start_param(ctx->base.insn_start, 1, ctx->opcode); } static void gen_pc_plus_diff(TCGv target, DisasContext *ctx, @@ -674,6 +675,12 @@ static void mark_vs_dirty(DisasContext *ctx) static inline void mark_vs_dirty(DisasContext *ctx) { } #endif +static void finalize_rvv_inst(DisasContext *ctx) +{ + mark_vs_dirty(ctx); + ctx->vstart_eq_zero = true; +} + static void gen_set_rm(DisasContext *ctx, int rm) { if (ctx->frm == rm) { @@ -1089,6 +1096,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "insn_trans/trans_rvv.c.inc" #include "insn_trans/trans_rvb.c.inc" #include "insn_trans/trans_rvzicond.c.inc" +#include "insn_trans/trans_rvzacas.c.inc" #include "insn_trans/trans_rvzawrs.c.inc" #include "insn_trans/trans_rvzicbo.c.inc" #include "insn_trans/trans_rvzfa.c.inc" @@ -1167,6 +1175,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPURISCVState *env = cpu_env(cs); + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); RISCVCPU *cpu = RISCV_CPU(cs); uint32_t tb_flags = ctx->base.tb->flags; @@ -1188,12 +1197,13 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s; ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO); ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX); - ctx->misa_mxl_max = env->misa_mxl_max; + ctx->misa_mxl_max = mcc->misa_mxl_max; ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL); ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL); ctx->cs = cs; ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED); ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED); + ctx->ztso = cpu->cfg.ext_ztso; ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER); ctx->zero = tcg_constant_tl(0); ctx->virt_inst_excp = false; @@ -1213,7 +1223,7 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) } tcg_gen_insn_start(pc_next, 0); - ctx->insn_start = tcg_last_op(); + ctx->insn_start_updated = false; } static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) @@ -1286,7 +1296,7 @@ static const TranslatorOps riscv_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c index e2d719b13b6..f7423df2264 100644 --- a/target/riscv/vcrypto_helper.c +++ b/target/riscv/vcrypto_helper.c @@ -222,6 +222,8 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key) uint32_t total_elems = vext_get_total_elems(env, desc, 4); \ uint32_t vta = vext_vta(desc); \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \ AESState round_key; \ round_key.d[0] = *((uint64_t *)vs2 + H8(i * 2 + 0)); \ @@ -246,6 +248,8 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key) uint32_t total_elems = vext_get_total_elems(env, desc, 4); \ uint32_t vta = vext_vta(desc); \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \ AESState round_key; \ round_key.d[0] = *((uint64_t *)vs2 + H8(0)); \ @@ -305,6 +309,8 @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm, uint32_t total_elems = vext_get_total_elems(env, desc, 4); uint32_t vta = vext_vta(desc); + VSTART_CHECK_EARLY_EXIT(env); + uimm &= 0b1111; if (uimm > 10 || uimm == 0) { uimm ^= 0b1000; @@ -351,6 +357,8 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm, uint32_t total_elems = vext_get_total_elems(env, desc, 4); uint32_t vta = vext_vta(desc); + VSTART_CHECK_EARLY_EXIT(env); + uimm &= 0b1111; if (uimm > 14 || uimm < 2) { uimm ^= 0b1000; @@ -457,6 +465,8 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { if (sew == MO_32) { vsha2ms_e32(((uint32_t *)vd) + i * 4, ((uint32_t *)vs1) + i * 4, @@ -572,6 +582,8 @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i, ((uint32_t *)vs1) + 4 * i + 2); @@ -590,6 +602,8 @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i, ((uint64_t *)vs1) + 4 * i + 2); @@ -608,6 +622,8 @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i, (((uint32_t *)vs1) + 4 * i)); @@ -626,6 +642,8 @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i, (((uint64_t *)vs1) + 4 * i)); @@ -658,6 +676,8 @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr, uint32_t *vs1 = vs1_vptr; uint32_t *vs2 = vs2_vptr; + VSTART_CHECK_EARLY_EXIT(env); + for (int i = env->vstart / 8; i < env->vl / 8; i++) { uint32_t w[24]; for (int j = 0; j < 8; j++) { @@ -757,6 +777,8 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm, uint32_t *vs2 = vs2_vptr; uint32_t v1[8], v2[8], v3[8]; + VSTART_CHECK_EARLY_EXIT(env); + for (int i = env->vstart / 8; i < env->vl / 8; i++) { for (int k = 0; k < 8; k++) { v2[k] = bswap32(vd[H4(i * 8 + k)]); @@ -780,6 +802,8 @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr, uint32_t vta = vext_vta(desc); uint32_t total_elems = vext_get_total_elems(env, desc, 4); + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]}; uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])}; @@ -817,6 +841,8 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env, uint32_t vta = vext_vta(desc); uint32_t total_elems = vext_get_total_elems(env, desc, 4); + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])}; uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])}; @@ -853,6 +879,8 @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env, uint32_t esz = sizeof(uint32_t); uint32_t total_elems = vext_get_total_elems(env, desc, esz); + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = group_start; i < group_end; ++i) { uint32_t vstart = i * egs; uint32_t vend = (i + 1) * egs; @@ -909,6 +937,8 @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc) uint32_t esz = sizeof(uint32_t); uint32_t total_elems = vext_get_total_elems(env, desc, esz); + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = group_start; i < group_end; ++i) { uint32_t vstart = i * egs; uint32_t vend = (i + 1) * egs; @@ -943,6 +973,8 @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc) uint32_t esz = sizeof(uint32_t); uint32_t total_elems = vext_get_total_elems(env, desc, esz); + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = group_start; i < group_end; ++i) { uint32_t vstart = i * egs; uint32_t vend = (i + 1) * egs; diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index c1c3a4d1eab..fa139040f82 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -35,19 +35,27 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, { int vlmax, vl; RISCVCPU *cpu = env_archcpu(env); - uint64_t lmul = FIELD_EX64(s2, VTYPE, VLMUL); - uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW); + uint64_t vlmul = FIELD_EX64(s2, VTYPE, VLMUL); + uint8_t vsew = FIELD_EX64(s2, VTYPE, VSEW); + uint16_t sew = 8 << vsew; uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV); int xlen = riscv_cpu_xlen(env); bool vill = (s2 >> (xlen - 1)) & 0x1; target_ulong reserved = s2 & MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT, xlen - 1 - R_VTYPE_RESERVED_SHIFT); + uint16_t vlen = cpu->cfg.vlenb << 3; + int8_t lmul; - if (lmul & 4) { - /* Fractional LMUL - check LMUL * VLEN >= SEW */ - if (lmul == 4 || - cpu->cfg.vlen >> (8 - lmul) < sew) { + if (vlmul & 4) { + /* + * Fractional LMUL, check: + * + * VLEN * LMUL >= SEW + * VLEN >> (8 - lmul) >= sew + * (vlenb << 3) >> (8 - lmul) >= sew + */ + if (vlmul == 4 || (vlen >> (8 - vlmul)) < sew) { vill = true; } } @@ -61,7 +69,9 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, return 0; } - vlmax = vext_get_vlmax(cpu, s2); + /* lmul encoded as in DisasContext::lmul */ + lmul = sextract32(FIELD_EX64(s2, VTYPE, VLMUL), 0, 3); + vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); if (s1 <= vlmax) { vl = s1; } else { @@ -113,14 +123,15 @@ static void probe_pages(CPURISCVState *env, target_ulong addr, { target_ulong pagelen = -(addr | TARGET_PAGE_MASK); target_ulong curlen = MIN(pagelen, len); + int mmu_index = riscv_env_mmu_index(env, false); probe_access(env, adjust_addr(env, addr), curlen, access_type, - cpu_mmu_index(env, false), ra); + mmu_index, ra); if (len > curlen) { addr += curlen; curlen = len - curlen; probe_access(env, adjust_addr(env, addr), curlen, access_type, - cpu_mmu_index(env, false), ra); + mmu_index, ra); } } @@ -196,7 +207,9 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, uint32_t esz = 1 << log2_esz; uint32_t vma = vext_vma(desc); - for (i = env->vstart; i < env->vl; i++, env->vstart++) { + VSTART_CHECK_EARLY_EXIT(env); + + for (i = env->vstart; i < env->vl; env->vstart = ++i) { k = 0; while (k < nf) { if (!vm && !vext_elem_mask(v0, i)) { @@ -261,8 +274,10 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; + VSTART_CHECK_EARLY_EXIT(env); + /* load bytes from guest memory */ - for (i = env->vstart; i < evl; i++, env->vstart++) { + for (i = env->vstart; i < evl; env->vstart = ++i) { k = 0; while (k < nf) { target_ulong addr = base + ((i * nf + k) << log2_esz); @@ -375,8 +390,10 @@ vext_ldst_index(void *vd, void *v0, target_ulong base, uint32_t esz = 1 << log2_esz; uint32_t vma = vext_vma(desc); + VSTART_CHECK_EARLY_EXIT(env); + /* load bytes from guest memory */ - for (i = env->vstart; i < env->vl; i++, env->vstart++) { + for (i = env->vstart; i < env->vl; env->vstart = ++i) { k = 0; while (k < nf) { if (!vm && !vext_elem_mask(v0, i)) { @@ -464,6 +481,9 @@ vext_ldff(void *vd, void *v0, target_ulong base, uint32_t esz = 1 << log2_esz; uint32_t vma = vext_vma(desc); target_ulong addr, offset, remain; + int mmu_index = riscv_env_mmu_index(env, false); + + VSTART_CHECK_EARLY_EXIT(env); /* probe every access */ for (i = env->vstart; i < env->vl; i++) { @@ -478,8 +498,7 @@ vext_ldff(void *vd, void *v0, target_ulong base, remain = nf << log2_esz; while (remain > 0) { offset = -(addr | TARGET_PAGE_MASK); - host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, - cpu_mmu_index(env, false)); + host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, mmu_index); if (host) { #ifdef CONFIG_USER_ONLY if (!page_check_range(addr, offset, PAGE_READ)) { @@ -558,9 +577,14 @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, { uint32_t i, k, off, pos; uint32_t nf = vext_nf(desc); - uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; + uint32_t vlenb = riscv_cpu_cfg(env)->vlenb; uint32_t max_elems = vlenb >> log2_esz; + if (env->vstart >= ((vlenb * nf) >> log2_esz)) { + env->vstart = 0; + return; + } + k = env->vstart / max_elems; off = env->vstart % max_elems; @@ -866,6 +890,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -898,6 +924,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ ETYPE carry = vext_elem_mask(v0, i); \ @@ -929,10 +957,12 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vl = env->vl; \ uint32_t vm = vext_vm(desc); \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -967,10 +997,12 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ { \ uint32_t vl = env->vl; \ uint32_t vm = vext_vm(desc); \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ ETYPE carry = !vm && vext_elem_mask(v0, i); \ @@ -1067,6 +1099,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -1114,6 +1148,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -1171,11 +1207,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -1236,11 +1274,13 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -1788,6 +1828,8 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ *((ETYPE *)vd + H(i)) = s1; \ @@ -1812,6 +1854,8 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ *((ETYPE *)vd + H(i)) = (ETYPE)s1; \ } \ @@ -1835,6 +1879,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \ *((ETYPE *)vd + H(i)) = *(vt + H(i)); \ @@ -1859,6 +1905,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ ETYPE d = (!vext_elem_mask(v0, i) ? s2 : \ @@ -1904,6 +1952,8 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2, uint32_t vl, uint32_t vm, int vxrm, opivv2_rm_fn *fn, uint32_t vma, uint32_t esz) { + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -2029,6 +2079,8 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2, uint32_t vl, uint32_t vm, int vxrm, opivx2_rm_fn *fn, uint32_t vma, uint32_t esz) { + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -2826,6 +2878,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -2869,6 +2923,8 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -3455,6 +3511,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ if (vl == 0) { \ return; \ } \ @@ -3971,11 +4029,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -4011,11 +4071,13 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4209,6 +4271,8 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ *((ETYPE *)vd + H(i)) = \ @@ -4528,11 +4592,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t desc) \ { \ uint32_t vl = env->vl; \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3;\ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ int a, b; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ a = vext_elem_mask(vs1, i); \ b = vext_elem_mask(vs2, i); \ @@ -4615,7 +4681,7 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, { uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; uint32_t vta_all_1s = vext_vta_all_1s(desc); uint32_t vma = vext_vma(desc); int i; @@ -4726,6 +4792,8 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \ uint32_t vma = vext_vma(desc); \ int i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -4761,6 +4829,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ target_ulong offset = s1, i_min, i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ i_min = MAX(env->vstart, offset); \ for (i = i_min; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4770,6 +4840,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ } \ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \ } \ + env->vstart = 0; \ /* set tail elements to 1s */ \ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } @@ -4793,6 +4864,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ target_ulong i_max, i_min, i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \ i_max = MAX(i_min, env->vstart); \ for (i = env->vstart; i < i_max; ++i) { \ @@ -4835,6 +4908,8 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -4884,6 +4959,8 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -4959,6 +5036,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint64_t index; \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -5002,6 +5081,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint64_t index = s1; \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -5063,9 +5144,22 @@ void HELPER(vmvr_v)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc) uint32_t startb = env->vstart * sewb; uint32_t i = startb; + if (startb >= maxsz) { + env->vstart = 0; + return; + } + + if (HOST_BIG_ENDIAN && i % 8 != 0) { + uint32_t j = ROUND_UP(i, 8); + memcpy((uint8_t *)vd + H1(j - 1), + (uint8_t *)vs2 + H1(j - 1), + j - i); + i = j; + } + memcpy((uint8_t *)vd + H1(i), (uint8_t *)vs2 + H1(i), - maxsz - startb); + maxsz - i); env->vstart = 0; } @@ -5083,6 +5177,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c index 9cf5c17cdea..996c21eb31c 100644 --- a/target/riscv/vector_internals.c +++ b/target/riscv/vector_internals.c @@ -16,6 +16,7 @@ * this program. If not, see . */ +#include "qemu/osdep.h" #include "vector_internals.h" /* set agnostic elements to 1s */ @@ -43,6 +44,8 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2, uint32_t vma = vext_vma(desc); uint32_t i; + VSTART_CHECK_EARLY_EXIT(env); + for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -67,6 +70,8 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2, uint32_t vma = vext_vma(desc); uint32_t i; + VSTART_CHECK_EARLY_EXIT(env); + for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h index 8133111e5f6..9e1e15b5750 100644 --- a/target/riscv/vector_internals.h +++ b/target/riscv/vector_internals.h @@ -19,12 +19,18 @@ #ifndef TARGET_RISCV_VECTOR_INTERNALS_H #define TARGET_RISCV_VECTOR_INTERNALS_H -#include "qemu/osdep.h" #include "qemu/bitops.h" #include "cpu.h" #include "tcg/tcg-gvec-desc.h" #include "internals.h" +#define VSTART_CHECK_EARLY_EXIT(env) do { \ + if (env->vstart >= env->vl) { \ + env->vstart = 0; \ + return; \ + } \ +} while (0) + static inline uint32_t vext_nf(uint32_t desc) { return FIELD_EX32(simd_data(desc), VDATA, NF); @@ -152,6 +158,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 9cc9d9d15ec..da673a595d4 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -64,11 +64,16 @@ static bool rx_cpu_has_work(CPUState *cs) (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR); } +static int riscv_cpu_mmu_index(CPUState *cs, bool ifunc) +{ + return 0; +} + static void rx_cpu_reset_hold(Object *obj) { - RXCPU *cpu = RX_CPU(obj); - RXCPUClass *rcc = RX_CPU_GET_CLASS(cpu); - CPURXState *env = &cpu->env; + CPUState *cs = CPU(obj); + RXCPUClass *rcc = RX_CPU_GET_CLASS(obj); + CPURXState *env = cpu_env(cs); uint32_t *resetvec; if (rcc->parent_phases.hold) { @@ -89,22 +94,6 @@ static void rx_cpu_reset_hold(Object *obj) set_flush_inputs_to_zero(1, &env->fp_status); } -static void rx_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - - qemu_printf(" %s\n", object_class_get_name(oc)); -} - -void rx_cpu_list(void) -{ - GSList *list; - list = object_class_get_list_sorted(TYPE_RX_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, rx_cpu_list_entry, NULL); - g_slist_free(list); -} - static ObjectClass *rx_cpu_class_by_name(const char *cpu_model) { ObjectClass *oc; @@ -194,7 +183,7 @@ static const struct SysemuCPUOps rx_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps rx_tcg_ops = { +static const TCGCPUOps rx_tcg_ops = { .initialize = rx_translate_init, .synchronize_from_tb = rx_cpu_synchronize_from_tb, .restore_state_to_opc = rx_restore_state_to_opc, @@ -220,6 +209,7 @@ static void rx_cpu_class_init(ObjectClass *klass, void *data) cc->class_by_name = rx_cpu_class_by_name; cc->has_work = rx_cpu_has_work; + cc->mmu_index = riscv_cpu_mmu_index; cc->dump_state = rx_cpu_dump_state; cc->set_pc = rx_cpu_set_pc; cc->get_pc = rx_cpu_get_pc; @@ -231,7 +221,6 @@ static void rx_cpu_class_init(ObjectClass *klass, void *data) cc->gdb_write_register = rx_cpu_gdb_write_register; cc->disas_set_info = rx_cpu_disas_set_info; - cc->gdb_num_core_regs = 26; cc->gdb_core_xml_file = "rx-core.xml"; cc->tcg_ops = &rx_tcg_ops; } diff --git a/target/rx/cpu.h b/target/rx/cpu.h index e931e77e854..c53593d7aa0 100644 --- a/target/rx/cpu.h +++ b/target/rx/cpu.h @@ -139,11 +139,8 @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void rx_translate_init(void); -void rx_cpu_list(void); void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte); -#define cpu_list rx_cpu_list - #include "exec/cpu-all.h" #define CPU_INTERRUPT_SOFT CPU_INTERRUPT_TGT_INT_0 @@ -161,11 +158,6 @@ static inline void cpu_get_tb_cpu_state(CPURXState *env, vaddr *pc, *flags = FIELD_DP32(*flags, PSW, U, env->psw_u); } -static inline int cpu_mmu_index(CPURXState *env, bool ifetch) -{ - return 0; -} - static inline uint32_t rx_cpu_pack_psw(CPURXState *env) { uint32_t psw = 0; diff --git a/target/rx/gdbstub.c b/target/rx/gdbstub.c index d7e0e6689b6..f222bf003be 100644 --- a/target/rx/gdbstub.c +++ b/target/rx/gdbstub.c @@ -21,8 +21,7 @@ int rx_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - RXCPU *cpu = RX_CPU(cs); - CPURXState *env = &cpu->env; + CPURXState *env = cpu_env(cs); switch (n) { case 0 ... 15: @@ -53,8 +52,7 @@ int rx_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int rx_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - RXCPU *cpu = RX_CPU(cs); - CPURXState *env = &cpu->env; + CPURXState *env = cpu_env(cs); uint32_t psw; switch (n) { case 0 ... 15: diff --git a/target/rx/helper.c b/target/rx/helper.c index dad5fb49768..80912e8dcb4 100644 --- a/target/rx/helper.c +++ b/target/rx/helper.c @@ -45,8 +45,7 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte) #define INT_FLAGS (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR) void rx_cpu_do_interrupt(CPUState *cs) { - RXCPU *cpu = RX_CPU(cs); - CPURXState *env = &cpu->env; + CPURXState *env = cpu_env(cs); int do_irq = cs->interrupt_request & INT_FLAGS; uint32_t save_psw; @@ -122,8 +121,7 @@ void rx_cpu_do_interrupt(CPUState *cs) bool rx_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - RXCPU *cpu = RX_CPU(cs); - CPURXState *env = &cpu->env; + CPURXState *env = cpu_env(cs); int accept = 0; /* hardware interrupt (Normal) */ if ((interrupt_request & CPU_INTERRUPT_HARD) && diff --git a/target/rx/translate.c b/target/rx/translate.c index c6ce717a95c..f6e9e0ec90a 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -131,8 +131,7 @@ static int bdsp_s(DisasContext *ctx, int d) void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - RXCPU *cpu = RX_CPU(cs); - CPURXState *env = &cpu->env; + CPURXState *env = cpu_env(cs); int i; uint32_t psw; @@ -2195,9 +2194,8 @@ static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a) static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { - CPURXState *env = cpu_env(cs); DisasContext *ctx = container_of(dcbase, DisasContext, base); - ctx->env = env; + ctx->env = cpu_env(cs); ctx->tb_flags = ctx->base.tb->flags; } @@ -2266,7 +2264,7 @@ static const TranslatorOps rx_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc; diff --git a/target/s390x/cpu-dump.c b/target/s390x/cpu-dump.c index ffa9e94d848..69cc9f77464 100644 --- a/target/s390x/cpu-dump.c +++ b/target/s390x/cpu-dump.c @@ -27,8 +27,7 @@ void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); int i; qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64, diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index 6acfa1c91b2..f7194534aeb 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -142,6 +142,11 @@ static bool s390_cpu_has_work(CPUState *cs) return s390_cpu_has_int(cpu); } +static int s390x_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return s390x_env_mmu_index(cpu_env(cs), ifetch); +} + static void s390_query_cpu_fast(CPUState *cpu, CpuInfoFast *value) { S390CPU *s390_cpu = S390_CPU(cpu); @@ -319,7 +324,7 @@ static void s390_cpu_reset_full(DeviceState *dev) #ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps s390_tcg_ops = { +static const TCGCPUOps s390_tcg_ops = { .initialize = s390x_translate_init, .restore_state_to_opc = s390x_restore_state_to_opc, @@ -352,6 +357,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data) scc->reset = s390_cpu_reset; cc->class_by_name = s390_cpu_class_by_name, cc->has_work = s390_cpu_has_work; + cc->mmu_index = s390x_cpu_mmu_index; cc->dump_state = s390_cpu_dump_state; cc->query_cpu_fast = s390_query_cpu_fast; cc->set_pc = s390_cpu_set_pc; @@ -362,7 +368,6 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data) s390_cpu_class_init_sysemu(cc); #endif cc->disas_set_info = s390_cpu_disas_set_info; - cc->gdb_num_core_regs = S390_NUM_CORE_REGS; cc->gdb_core_xml_file = "s390x-core64.xml"; cc->gdb_arch_name = s390_gdb_arch_name; diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index fa3aac4f973..43a46a5a068 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -381,7 +381,7 @@ extern const VMStateDescription vmstate_s390_cpu; #define MMU_HOME_IDX 2 #define MMU_REAL_IDX 3 -static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch) +static inline int s390x_env_mmu_index(CPUS390XState *env, bool ifetch) { #ifdef CONFIG_USER_ONLY return MMU_USER_IDX; @@ -491,8 +491,6 @@ static inline void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc, #define S390_R13_REGNUM 15 #define S390_R14_REGNUM 16 #define S390_R15_REGNUM 17 -/* Total Core Registers. */ -#define S390_NUM_CORE_REGS 18 static inline void setcc(S390CPU *cpu, uint64_t cc) { diff --git a/target/s390x/cpu_features_def.h.inc b/target/s390x/cpu_features_def.h.inc index e68da9b8ffd..c53ac133528 100644 --- a/target/s390x/cpu_features_def.h.inc +++ b/target/s390x/cpu_features_def.h.inc @@ -142,7 +142,7 @@ DEF_FEAT(SIE_CEI, "cei", SCLP_CPU, 43, "SIE: Conditional-external-interception f /* * Features exposed via no feature bit (but e.g., instruction sensing) - * -> the feature bit number is irrelavant + * -> the feature bit number is irrelevant */ DEF_FEAT(DAT_ENH_2, "dateh2", MISC, 0, "DAT-enhancement facility 2") DEF_FEAT(CMM, "cmm", MISC, 0, "Collaborative-memory-management facility") diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index a63d990e4e8..8ed3bb6a27b 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -500,21 +500,28 @@ static void error_prepend_missing_feat(const char *name, void *opaque) error_prepend((Error **) opaque, "%s ", name); } +static void check_compat_model_failed(Error **errp, + const S390CPUModel *max_model, + const char *msg) +{ + error_setg(errp, "%s. Maximum supported model in the current configuration: \'%s\'", + msg, max_model->def->name); + error_append_hint(errp, "Consider a different accelerator, try \"-accel help\"\n"); + return; +} + static void check_compatibility(const S390CPUModel *max_model, const S390CPUModel *model, Error **errp) { + ERRP_GUARD(); S390FeatBitmap missing; if (model->def->gen > max_model->def->gen) { - error_setg(errp, "Selected CPU generation is too new. Maximum " - "supported model in the configuration: \'%s\'", - max_model->def->name); + check_compat_model_failed(errp, max_model, "Selected CPU generation is too new"); return; } else if (model->def->gen == max_model->def->gen && model->def->ec_ga > max_model->def->ec_ga) { - error_setg(errp, "Selected CPU GA level is too new. Maximum " - "supported model in the configuration: \'%s\'", - max_model->def->name); + check_compat_model_failed(errp, max_model, "Selected CPU GA level is too new"); return; } @@ -536,7 +543,9 @@ static void check_compatibility(const S390CPUModel *max_model, error_setg(errp, " "); s390_feat_bitmap_to_ascii(missing, errp, error_prepend_missing_feat); error_prepend(errp, "Some features requested in the CPU model are not " - "available in the configuration: "); + "available in the current configuration: "); + error_append_hint(errp, + "Consider a different accelerator, QEMU, or kernel version\n"); } S390CPUModel *get_max_cpu_model(Error **errp) @@ -566,6 +575,7 @@ S390CPUModel *get_max_cpu_model(Error **errp) void s390_realize_cpu_model(CPUState *cs, Error **errp) { + ERRP_GUARD(); Error *err = NULL; S390CPUClass *xcc = S390_CPU_GET_CLASS(cs); S390CPU *cpu = S390_CPU(cs); diff --git a/target/s390x/cpu_models_sysemu.c b/target/s390x/cpu_models_sysemu.c index 63981bf36b6..2d99218069c 100644 --- a/target/s390x/cpu_models_sysemu.c +++ b/target/s390x/cpu_models_sysemu.c @@ -17,7 +17,6 @@ #include "sysemu/kvm.h" #include "qapi/error.h" #include "qapi/visitor.h" -#include "qapi/qmp/qerror.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qmp/qdict.h" #include "qapi/qapi-commands-machine-target.h" @@ -98,24 +97,16 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) } static void cpu_model_from_info(S390CPUModel *model, const CpuModelInfo *info, - Error **errp) + const char *info_arg_name, Error **errp) { Error *err = NULL; - const QDict *qdict = NULL; + const QDict *qdict; const QDictEntry *e; Visitor *visitor; ObjectClass *oc; S390CPU *cpu; Object *obj; - if (info->props) { - qdict = qobject_to(QDict, info->props); - if (!qdict) { - error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict"); - return; - } - } - oc = cpu_class_by_name(TYPE_S390_CPU, info->name); if (!oc) { error_setg(errp, "The CPU definition \'%s\' is unknown.", info->name); @@ -135,13 +126,17 @@ static void cpu_model_from_info(S390CPUModel *model, const CpuModelInfo *info, return; } - if (qdict) { + if (info->props) { + g_autofree const char *props_name = g_strdup_printf("%s.props", + info_arg_name); + visitor = qobject_input_visitor_new(info->props); - if (!visit_start_struct(visitor, NULL, NULL, 0, errp)) { + if (!visit_start_struct(visitor, props_name, NULL, 0, errp)) { visit_free(visitor); object_unref(obj); return; } + qdict = qobject_to(QDict, info->props); for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) { if (!object_property_set(obj, e->key, visitor, &err)) { break; @@ -223,7 +218,7 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, bool delta_changes = false; /* convert it to our internal representation */ - cpu_model_from_info(&s390_model, model, &err); + cpu_model_from_info(&s390_model, model, "model", &err); if (err) { error_propagate(errp, err); return NULL; @@ -261,12 +256,12 @@ CpuModelCompareInfo *qmp_query_cpu_model_comparison(CpuModelInfo *infoa, S390CPUModel modela, modelb; /* convert both models to our internal representation */ - cpu_model_from_info(&modela, infoa, &err); + cpu_model_from_info(&modela, infoa, "modela", &err); if (err) { error_propagate(errp, err); return NULL; } - cpu_model_from_info(&modelb, infob, &err); + cpu_model_from_info(&modelb, infob, "modelb", &err); if (err) { error_propagate(errp, err); return NULL; @@ -338,13 +333,13 @@ CpuModelBaselineInfo *qmp_query_cpu_model_baseline(CpuModelInfo *infoa, uint8_t max_gen; /* convert both models to our internal representation */ - cpu_model_from_info(&modela, infoa, &err); + cpu_model_from_info(&modela, infoa, "modela", &err); if (err) { error_propagate(errp, err); return NULL; } - cpu_model_from_info(&modelb, infob, &err); + cpu_model_from_info(&modelb, infob, "modelb", &err); if (err) { error_propagate(errp, err); return NULL; diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c index 6fbfd41bc86..a9f4eb92adf 100644 --- a/target/s390x/gdbstub.c +++ b/target/s390x/gdbstub.c @@ -30,8 +30,7 @@ int s390_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); switch (n) { case S390_PSWM_REGNUM: @@ -46,8 +45,7 @@ int s390_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); target_ulong tmpl = ldtul_p(mem_buf); switch (n) { @@ -69,11 +67,12 @@ int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) /* the values represent the positions in s390-acr.xml */ #define S390_A0_REGNUM 0 #define S390_A15_REGNUM 15 -/* total number of registers in s390-acr.xml */ -#define S390_NUM_AC_REGS 16 -static int cpu_read_ac_reg(CPUS390XState *env, GByteArray *buf, int n) +static int cpu_read_ac_reg(CPUState *cs, GByteArray *buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_A0_REGNUM ... S390_A15_REGNUM: return gdb_get_reg32(buf, env->aregs[n]); @@ -82,8 +81,11 @@ static int cpu_read_ac_reg(CPUS390XState *env, GByteArray *buf, int n) } } -static int cpu_write_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +static int cpu_write_ac_reg(CPUState *cs, uint8_t *mem_buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_A0_REGNUM ... S390_A15_REGNUM: env->aregs[n] = ldl_p(mem_buf); @@ -98,11 +100,12 @@ static int cpu_write_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n) #define S390_FPC_REGNUM 0 #define S390_F0_REGNUM 1 #define S390_F15_REGNUM 16 -/* total number of registers in s390-fpr.xml */ -#define S390_NUM_FP_REGS 17 -static int cpu_read_fp_reg(CPUS390XState *env, GByteArray *buf, int n) +static int cpu_read_fp_reg(CPUState *cs, GByteArray *buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_FPC_REGNUM: return gdb_get_reg32(buf, env->fpc); @@ -113,8 +116,11 @@ static int cpu_read_fp_reg(CPUS390XState *env, GByteArray *buf, int n) } } -static int cpu_write_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +static int cpu_write_fp_reg(CPUState *cs, uint8_t *mem_buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_FPC_REGNUM: env->fpc = ldl_p(mem_buf); @@ -132,11 +138,11 @@ static int cpu_write_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n) #define S390_V15L_REGNUM 15 #define S390_V16_REGNUM 16 #define S390_V31_REGNUM 31 -/* total number of registers in s390-vx.xml */ -#define S390_NUM_VREGS 32 -static int cpu_read_vreg(CPUS390XState *env, GByteArray *buf, int n) +static int cpu_read_vreg(CPUState *cs, GByteArray *buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; int ret; switch (n) { @@ -154,8 +160,11 @@ static int cpu_read_vreg(CPUS390XState *env, GByteArray *buf, int n) return ret; } -static int cpu_write_vreg(CPUS390XState *env, uint8_t *mem_buf, int n) +static int cpu_write_vreg(CPUState *cs, uint8_t *mem_buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_V0L_REGNUM ... S390_V15L_REGNUM: env->vregs[n][1] = ldtul_p(mem_buf + 8); @@ -172,12 +181,13 @@ static int cpu_write_vreg(CPUS390XState *env, uint8_t *mem_buf, int n) /* the values represent the positions in s390-cr.xml */ #define S390_C0_REGNUM 0 #define S390_C15_REGNUM 15 -/* total number of registers in s390-cr.xml */ -#define S390_NUM_C_REGS 16 #ifndef CONFIG_USER_ONLY -static int cpu_read_c_reg(CPUS390XState *env, GByteArray *buf, int n) +static int cpu_read_c_reg(CPUState *cs, GByteArray *buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_C0_REGNUM ... S390_C15_REGNUM: return gdb_get_regl(buf, env->cregs[n]); @@ -186,8 +196,11 @@ static int cpu_read_c_reg(CPUS390XState *env, GByteArray *buf, int n) } } -static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +static int cpu_write_c_reg(CPUState *cs, uint8_t *mem_buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_C0_REGNUM ... S390_C15_REGNUM: env->cregs[n] = ldtul_p(mem_buf); @@ -206,11 +219,12 @@ static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n) #define S390_VIRT_CPUTM_REGNUM 1 #define S390_VIRT_BEA_REGNUM 2 #define S390_VIRT_PREFIX_REGNUM 3 -/* total number of registers in s390-virt.xml */ -#define S390_NUM_VIRT_REGS 4 -static int cpu_read_virt_reg(CPUS390XState *env, GByteArray *mem_buf, int n) +static int cpu_read_virt_reg(CPUState *cs, GByteArray *mem_buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_VIRT_CKC_REGNUM: return gdb_get_regl(mem_buf, env->ckc); @@ -225,24 +239,27 @@ static int cpu_read_virt_reg(CPUS390XState *env, GByteArray *mem_buf, int n) } } -static int cpu_write_virt_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +static int cpu_write_virt_reg(CPUState *cs, uint8_t *mem_buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_VIRT_CKC_REGNUM: env->ckc = ldtul_p(mem_buf); - cpu_synchronize_post_init(env_cpu(env)); + cpu_synchronize_post_init(cs); return 8; case S390_VIRT_CPUTM_REGNUM: env->cputm = ldtul_p(mem_buf); - cpu_synchronize_post_init(env_cpu(env)); + cpu_synchronize_post_init(cs); return 8; case S390_VIRT_BEA_REGNUM: env->gbea = ldtul_p(mem_buf); - cpu_synchronize_post_init(env_cpu(env)); + cpu_synchronize_post_init(cs); return 8; case S390_VIRT_PREFIX_REGNUM: env->psa = ldtul_p(mem_buf); - cpu_synchronize_post_init(env_cpu(env)); + cpu_synchronize_post_init(cs); return 8; default: return 0; @@ -254,11 +271,12 @@ static int cpu_write_virt_reg(CPUS390XState *env, uint8_t *mem_buf, int n) #define S390_VIRT_KVM_PFT_REGNUM 1 #define S390_VIRT_KVM_PFS_REGNUM 2 #define S390_VIRT_KVM_PFC_REGNUM 3 -/* total number of registers in s390-virt-kvm.xml */ -#define S390_NUM_VIRT_KVM_REGS 4 -static int cpu_read_virt_kvm_reg(CPUS390XState *env, GByteArray *mem_buf, int n) +static int cpu_read_virt_kvm_reg(CPUState *cs, GByteArray *mem_buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_VIRT_KVM_PP_REGNUM: return gdb_get_regl(mem_buf, env->pp); @@ -273,8 +291,11 @@ static int cpu_read_virt_kvm_reg(CPUS390XState *env, GByteArray *mem_buf, int n) } } -static int cpu_write_virt_kvm_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +static int cpu_write_virt_kvm_reg(CPUState *cs, uint8_t *mem_buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + switch (n) { case S390_VIRT_KVM_PP_REGNUM: env->pp = ldtul_p(mem_buf); @@ -303,16 +324,20 @@ static int cpu_write_virt_kvm_reg(CPUS390XState *env, uint8_t *mem_buf, int n) #define S390_GS_GSD_REGNUM 1 #define S390_GS_GSSM_REGNUM 2 #define S390_GS_GSEPLA_REGNUM 3 -/* total number of registers in s390-gs.xml */ -#define S390_NUM_GS_REGS 4 -static int cpu_read_gs_reg(CPUS390XState *env, GByteArray *buf, int n) +static int cpu_read_gs_reg(CPUState *cs, GByteArray *buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + return gdb_get_regl(buf, env->gscb[n]); } -static int cpu_write_gs_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +static int cpu_write_gs_reg(CPUState *cs, uint8_t *mem_buf, int n) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + env->gscb[n] = ldtul_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; @@ -322,33 +347,33 @@ void s390_cpu_gdb_init(CPUState *cs) { gdb_register_coprocessor(cs, cpu_read_ac_reg, cpu_write_ac_reg, - S390_NUM_AC_REGS, "s390-acr.xml", 0); + gdb_find_static_feature("s390-acr.xml"), 0); gdb_register_coprocessor(cs, cpu_read_fp_reg, cpu_write_fp_reg, - S390_NUM_FP_REGS, "s390-fpr.xml", 0); + gdb_find_static_feature("s390-fpr.xml"), 0); gdb_register_coprocessor(cs, cpu_read_vreg, cpu_write_vreg, - S390_NUM_VREGS, "s390-vx.xml", 0); + gdb_find_static_feature("s390-vx.xml"), 0); gdb_register_coprocessor(cs, cpu_read_gs_reg, cpu_write_gs_reg, - S390_NUM_GS_REGS, "s390-gs.xml", 0); + gdb_find_static_feature("s390-gs.xml"), 0); #ifndef CONFIG_USER_ONLY gdb_register_coprocessor(cs, cpu_read_c_reg, cpu_write_c_reg, - S390_NUM_C_REGS, "s390-cr.xml", 0); + gdb_find_static_feature("s390-cr.xml"), 0); gdb_register_coprocessor(cs, cpu_read_virt_reg, cpu_write_virt_reg, - S390_NUM_VIRT_REGS, "s390-virt.xml", 0); + gdb_find_static_feature("s390-virt.xml"), 0); if (kvm_enabled()) { gdb_register_coprocessor(cs, cpu_read_virt_kvm_reg, cpu_write_virt_kvm_reg, - S390_NUM_VIRT_KVM_REGS, "s390-virt-kvm.xml", + gdb_find_static_feature("s390-virt-kvm.xml"), 0); } #endif diff --git a/target/s390x/helper.c b/target/s390x/helper.c index d76c06381bb..00d5d403f31 100644 --- a/target/s390x/helper.c +++ b/target/s390x/helper.c @@ -139,8 +139,7 @@ void do_restart_interrupt(CPUS390XState *env) void s390_cpu_recompute_watchpoints(CPUState *cs) { const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS; - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); /* We are called when the watchpoints have changed. First remove them all. */ diff --git a/target/s390x/helper.h b/target/s390x/helper.h index 05102578fc9..cc1c20e9e3f 100644 --- a/target/s390x/helper.h +++ b/target/s390x/helper.h @@ -88,7 +88,10 @@ DEF_HELPER_FLAGS_3(tcxb, TCG_CALL_NO_RWG_SE, i32, env, i128, i64) DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_2(sqxb, TCG_CALL_NO_WG, i128, env, i128) +DEF_HELPER_3(cvb, void, env, i32, i64) +DEF_HELPER_FLAGS_2(cvbg, TCG_CALL_NO_WG, i64, env, i128) DEF_HELPER_FLAGS_1(cvd, TCG_CALL_NO_RWG_SE, i64, s32) +DEF_HELPER_FLAGS_1(cvdg, TCG_CALL_NO_RWG_SE, i128, s64) DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(pka, TCG_CALL_NO_WG, void, env, i64, i64, i32) DEF_HELPER_FLAGS_4(pku, TCG_CALL_NO_WG, void, env, i64, i64, i32) diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 33ab3551f47..4ce809c5d46 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -474,8 +474,7 @@ static int can_sync_regs(CPUState *cs, int regs) int kvm_arch_put_registers(CPUState *cs, int level) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); struct kvm_fpu fpu = {}; int r; int i; @@ -601,8 +600,7 @@ int kvm_arch_put_registers(CPUState *cs, int level) int kvm_arch_get_registers(CPUState *cs) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); struct kvm_fpu fpu; int i, r; @@ -1923,7 +1921,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) S390CPU *cpu = S390_CPU(cs); int ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); kvm_cpu_synchronize_state(cs); @@ -1947,7 +1945,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); break; } - qemu_mutex_unlock_iothread(); + bql_unlock(); if (ret == 0) { ret = EXCP_INTERRUPT; diff --git a/target/s390x/kvm/pv.c b/target/s390x/kvm/pv.c index 6a69be7e5c5..7ca7faec73e 100644 --- a/target/s390x/kvm/pv.c +++ b/target/s390x/kvm/pv.c @@ -29,7 +29,8 @@ static bool info_valid; static struct kvm_s390_pv_info_vm info_vm; static struct kvm_s390_pv_info_dump info_dump; -static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data) +static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data, + int *pvrc) { struct kvm_pv_cmd pv_cmd = { .cmd = cmd, @@ -46,6 +47,9 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data) "IOCTL rc: %d", cmd, cmdname, pv_cmd.rc, pv_cmd.rrc, rc); } + if (pvrc) { + *pvrc = pv_cmd.rc; + } return rc; } @@ -53,12 +57,13 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data) * This macro lets us pass the command as a string to the function so * we can print it on an error. */ -#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data) +#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data, NULL) +#define s390_pv_cmd_pvrc(cmd, data, pvrc) __s390_pv_cmd(cmd, #cmd, data, pvrc) #define s390_pv_cmd_exit(cmd, data) \ { \ int rc; \ \ - rc = __s390_pv_cmd(cmd, #cmd, data);\ + rc = __s390_pv_cmd(cmd, #cmd, data, NULL); \ if (rc) { \ exit(1); \ } \ @@ -142,14 +147,24 @@ bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) return true; } -int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) +int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp) { + int ret, pvrc; struct kvm_s390_pv_sec_parm args = { .origin = origin, .length = length, }; - return s390_pv_cmd(KVM_PV_SET_SEC_PARMS, &args); + ret = s390_pv_cmd_pvrc(KVM_PV_SET_SEC_PARMS, &args, &pvrc); + if (ret) { + error_setg(errp, "Failed to set secure execution parameters"); + if (pvrc == 0x108) { + error_append_hint(errp, "Please check whether the image is " + "correctly encrypted for this host\n"); + } + } + + return ret; } /* diff --git a/target/s390x/kvm/pv.h b/target/s390x/kvm/pv.h index 7b935e2246c..5877d28ff10 100644 --- a/target/s390x/kvm/pv.h +++ b/target/s390x/kvm/pv.h @@ -42,7 +42,7 @@ int s390_pv_query_info(void); int s390_pv_vm_enable(void); void s390_pv_vm_disable(void); bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms); -int s390_pv_set_sec_parms(uint64_t origin, uint64_t length); +int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp); int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak); void s390_pv_prep_reset(void); int s390_pv_verify(void); @@ -62,7 +62,8 @@ static inline int s390_pv_query_info(void) { return 0; } static inline int s390_pv_vm_enable(void) { return 0; } static inline void s390_pv_vm_disable(void) {} static inline bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) { return false; } -static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) { return 0; } +static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, + Error **errp) { return 0; } static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) { return 0; } static inline void s390_pv_prep_reset(void) {} static inline int s390_pv_verify(void) { return 0; } diff --git a/target/s390x/machine.c b/target/s390x/machine.c index 37a076858c7..a125ebcc2fa 100644 --- a/target/s390x/machine.c +++ b/target/s390x/machine.c @@ -66,7 +66,7 @@ static const VMStateDescription vmstate_fpu = { .version_id = 1, .minimum_version_id = 1, .needed = fpu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.vregs[0][0], S390CPU), VMSTATE_UINT64(env.vregs[1][0], S390CPU), VMSTATE_UINT64(env.vregs[2][0], S390CPU), @@ -98,7 +98,7 @@ static const VMStateDescription vmstate_vregs = { .version_id = 1, .minimum_version_id = 1, .needed = vregs_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* vregs[0][0] -> vregs[15][0] and fregs are overlays */ VMSTATE_UINT64(env.vregs[16][0], S390CPU), VMSTATE_UINT64(env.vregs[17][0], S390CPU), @@ -157,12 +157,12 @@ static bool riccb_needed(void *opaque) return s390_has_feat(S390_FEAT_RUNTIME_INSTRUMENTATION); } -const VMStateDescription vmstate_riccb = { +static const VMStateDescription vmstate_riccb = { .name = "cpu/riccb", .version_id = 1, .minimum_version_id = 1, .needed = riccb_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(env.riccb, S390CPU, 64), VMSTATE_END_OF_LIST() } @@ -174,12 +174,12 @@ static bool exval_needed(void *opaque) return cpu->env.ex_value != 0; } -const VMStateDescription vmstate_exval = { +static const VMStateDescription vmstate_exval = { .name = "cpu/exval", .version_id = 1, .minimum_version_id = 1, .needed = exval_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.ex_value, S390CPU), VMSTATE_END_OF_LIST() } @@ -190,12 +190,12 @@ static bool gscb_needed(void *opaque) return s390_has_feat(S390_FEAT_GUARDED_STORAGE); } -const VMStateDescription vmstate_gscb = { +static const VMStateDescription vmstate_gscb = { .name = "cpu/gscb", .version_id = 1, .minimum_version_id = 1, .needed = gscb_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.gscb, S390CPU, 4), VMSTATE_END_OF_LIST() } @@ -206,12 +206,12 @@ static bool bpbc_needed(void *opaque) return s390_has_feat(S390_FEAT_BPB); } -const VMStateDescription vmstate_bpbc = { +static const VMStateDescription vmstate_bpbc = { .name = "cpu/bpbc", .version_id = 1, .minimum_version_id = 1, .needed = bpbc_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(env.bpbc, S390CPU), VMSTATE_END_OF_LIST() } @@ -222,12 +222,12 @@ static bool etoken_needed(void *opaque) return s390_has_feat(S390_FEAT_ETOKEN); } -const VMStateDescription vmstate_etoken = { +static const VMStateDescription vmstate_etoken = { .name = "cpu/etoken", .version_id = 1, .minimum_version_id = 1, .needed = etoken_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.etoken, S390CPU), VMSTATE_UINT64(env.etoken_extension, S390CPU), VMSTATE_END_OF_LIST() @@ -239,12 +239,12 @@ static bool diag318_needed(void *opaque) return s390_has_feat(S390_FEAT_DIAG_318); } -const VMStateDescription vmstate_diag318 = { +static const VMStateDescription vmstate_diag318 = { .name = "cpu/diag318", .version_id = 1, .minimum_version_id = 1, .needed = diag318_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.diag318_info, S390CPU), VMSTATE_END_OF_LIST() } @@ -256,7 +256,7 @@ const VMStateDescription vmstate_s390_cpu = { .pre_save = cpu_pre_save, .version_id = 4, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.regs, S390CPU, 16), VMSTATE_UINT64(env.psw.mask, S390CPU), VMSTATE_UINT64(env.psw.addr, S390CPU), @@ -278,7 +278,7 @@ const VMStateDescription vmstate_s390_cpu = { irqstate_saved_size), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fpu, &vmstate_vregs, &vmstate_riccb, diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c index b875bf14e56..f1c33f7967d 100644 --- a/target/s390x/tcg/excp_helper.c +++ b/target/s390x/tcg/excp_helper.c @@ -90,10 +90,7 @@ void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) static G_NORETURN void do_unaligned_access(CPUState *cs, uintptr_t retaddr) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; - - tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); + tcg_s390_program_interrupt(cpu_env(cs), PGM_SPECIFICATION, retaddr); } #if defined(CONFIG_USER_ONLY) @@ -146,8 +143,7 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); target_ulong vaddr, raddr; uint64_t asc, tec; int prot, excp; @@ -600,8 +596,7 @@ bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request) void s390x_cpu_debug_excp_handler(CPUState *cs) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); CPUWatchpoint *wp_hit = cs->watchpoint_hit; if (wp_hit && wp_hit->flags & BP_CPU) { diff --git a/target/s390x/tcg/insn-data.h.inc b/target/s390x/tcg/insn-data.h.inc index 2f07f39d9cb..e7d61cdec28 100644 --- a/target/s390x/tcg/insn-data.h.inc +++ b/target/s390x/tcg/insn-data.h.inc @@ -293,9 +293,14 @@ D(0xec73, CLFIT, RIE_a, GIE, r1_32u, i2_16u, 0, 0, ct, 0, 1) D(0xec71, CLGIT, RIE_a, GIE, r1_o, i2_16u, 0, 0, ct, 0, 1) +/* CONVERT TO BINARY */ + C(0x4f00, CVB, RX_a, Z, la2, 0, 0, 0, cvb, 0) + C(0xe306, CVBY, RXY_a, LD, la2, 0, 0, 0, cvb, 0) + C(0xe30e, CVBG, RXY_a, Z, la2, 0, r1, 0, cvbg, 0) /* CONVERT TO DECIMAL */ C(0x4e00, CVD, RX_a, Z, r1_o, a2, 0, 0, cvd, 0) C(0xe326, CVDY, RXY_a, LD, r1_o, a2, 0, 0, cvd, 0) + C(0xe32e, CVDG, RXY_a, Z, r1_o, a2, 0, 0, cvdg, 0) /* CONVERT TO FIXED */ F(0xb398, CFEBR, RRF_e, Z, 0, e2, new, r1_32, cfeb, 0, IF_BFP) F(0xb399, CFDBR, RRF_e, Z, 0, f2, new, r1_32, cfdb, 0, IF_BFP) diff --git a/target/s390x/tcg/int_helper.c b/target/s390x/tcg/int_helper.c index eb8e6dd1b57..2af970f2c8b 100644 --- a/target/s390x/tcg/int_helper.c +++ b/target/s390x/tcg/int_helper.c @@ -25,6 +25,7 @@ #include "exec/exec-all.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER @@ -98,6 +99,81 @@ Int128 HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t b) tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); } +void HELPER(cvb)(CPUS390XState *env, uint32_t r1, uint64_t dec) +{ + int64_t pow10 = 1, bin = 0; + int digit, sign; + + sign = dec & 0xf; + if (sign < 0xa) { + tcg_s390_data_exception(env, 0, GETPC()); + } + dec >>= 4; + + while (dec) { + digit = dec & 0xf; + if (digit > 0x9) { + tcg_s390_data_exception(env, 0, GETPC()); + } + dec >>= 4; + bin += digit * pow10; + pow10 *= 10; + } + + if (sign == 0xb || sign == 0xd) { + bin = -bin; + } + + /* R1 is updated even on fixed-point-divide exception. */ + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (uint32_t)bin; + if (bin != (int32_t)bin) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } +} + +uint64_t HELPER(cvbg)(CPUS390XState *env, Int128 dec) +{ + uint64_t dec64[] = {int128_getlo(dec), int128_gethi(dec)}; + int64_t bin = 0, pow10, tmp; + int digit, i, sign; + + sign = dec64[0] & 0xf; + if (sign < 0xa) { + tcg_s390_data_exception(env, 0, GETPC()); + } + dec64[0] >>= 4; + pow10 = (sign == 0xb || sign == 0xd) ? -1 : 1; + + for (i = 1; i < 20; i++) { + digit = dec64[i >> 4] & 0xf; + if (digit > 0x9) { + tcg_s390_data_exception(env, 0, GETPC()); + } + dec64[i >> 4] >>= 4; + /* + * Prepend the next digit and check for overflow. The multiplication + * cannot overflow, since, conveniently, the int64_t limits are + * approximately +-9.2E+18. If bin is zero, the addition cannot + * overflow. Otherwise bin is known to have the same sign as the rhs + * addend, in which case overflow happens if and only if the result + * has a different sign. + */ + tmp = bin + pow10 * digit; + if (bin && ((tmp ^ bin) < 0)) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } + bin = tmp; + pow10 *= 10; + } + + g_assert(!dec64[0]); + if (dec64[1]) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } + + return bin; +} + uint64_t HELPER(cvd)(int32_t reg) { /* positive 0 */ @@ -118,6 +194,27 @@ uint64_t HELPER(cvd)(int32_t reg) return dec; } +Int128 HELPER(cvdg)(int64_t reg) +{ + /* positive 0 */ + Int128 dec = int128_make64(0x0c); + Int128 bin = int128_makes64(reg); + Int128 base = int128_make64(10); + int shift; + + if (!int128_nonneg(bin)) { + bin = int128_neg(bin); + dec = int128_make64(0x0d); + } + + for (shift = 4; (shift < 128) && int128_nz(bin); shift += 4) { + dec = int128_or(dec, int128_lshift(int128_remu(bin, base), shift)); + bin = int128_divu(bin, base); + } + + return dec; +} + uint64_t HELPER(popcnt)(uint64_t val) { /* Note that we don't fold past bytes. */ diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c index 84103251b97..557831def40 100644 --- a/target/s390x/tcg/mem_helper.c +++ b/target/s390x/tcg/mem_helper.c @@ -358,7 +358,7 @@ static int mmu_idx_from_as(uint8_t as) static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src, uintptr_t ra) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); S390Access srca1, srca2, desta; uint32_t i; uint8_t c = 0; @@ -392,7 +392,7 @@ uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest, static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src, uintptr_t ra) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); S390Access srca1, srca2, desta; uint32_t i; uint8_t c = 0; @@ -433,7 +433,7 @@ uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest, static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src, uintptr_t ra) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); S390Access srca1, srca2, desta; uint32_t i; uint8_t c = 0; @@ -467,7 +467,7 @@ uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest, static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src, uintptr_t ra) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); S390Access srca, desta; uint32_t i; @@ -508,7 +508,7 @@ void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) /* move right to left */ void HELPER(mvcrl)(CPUS390XState *env, uint64_t l, uint64_t dest, uint64_t src) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); const uint64_t ra = GETPC(); S390Access srca, desta; int32_t i; @@ -529,7 +529,7 @@ void HELPER(mvcrl)(CPUS390XState *env, uint64_t l, uint64_t dest, uint64_t src) /* move inverse */ void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); S390Access srca, desta; uintptr_t ra = GETPC(); int i; @@ -550,7 +550,7 @@ void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) /* move numerics */ void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); S390Access srca1, srca2, desta; uintptr_t ra = GETPC(); int i; @@ -572,7 +572,7 @@ void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) /* move with offset */ void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); /* MVO always processes one more byte than specified - maximum is 16 */ const int len_dest = (l >> 4) + 1; const int len_src = (l & 0xf) + 1; @@ -606,7 +606,7 @@ void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) /* move zones */ void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); S390Access srca1, srca2, desta; uintptr_t ra = GETPC(); int i; @@ -669,7 +669,7 @@ uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask, if (!mask) { /* Recognize access exceptions for the first byte */ - probe_read(env, addr, 1, cpu_mmu_index(env, false), ra); + probe_read(env, addr, 1, s390x_env_mmu_index(env, false), ra); } while (mask) { @@ -893,7 +893,7 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint32_t r1, uint32_t r2) { const uint64_t src = get_address(env, r2) & TARGET_PAGE_MASK; const uint64_t dst = get_address(env, r1) & TARGET_PAGE_MASK; - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); const bool f = extract64(r0, 11, 1); const bool s = extract64(r0, 10, 1); const bool cco = extract64(r0, 8, 1); @@ -946,7 +946,7 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint32_t r1, uint32_t r2) /* string copy */ uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); const uint64_t d = get_address(env, r1); const uint64_t s = get_address(env, r2); const uint8_t c = env->regs[0]; @@ -1027,7 +1027,7 @@ static inline uint32_t do_mvcl(CPUS390XState *env, uint64_t *src, uint64_t *srclen, uint16_t pad, int wordsize, uintptr_t ra) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK)); S390Access srca, desta; int i, cc; @@ -1084,7 +1084,7 @@ static inline uint32_t do_mvcl(CPUS390XState *env, /* move long */ uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) { - const int mmu_idx = cpu_mmu_index(env, false); + const int mmu_idx = s390x_env_mmu_index(env, false); uintptr_t ra = GETPC(); uint64_t destlen = env->regs[r1 + 1] & 0xffffff; uint64_t dest = get_address(env, r1); @@ -1742,7 +1742,7 @@ uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2, static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2, bool parallel) { - uint32_t mem_idx = cpu_mmu_index(env, false); + uint32_t mem_idx = s390x_env_mmu_index(env, false); MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, mem_idx); MemOpIdx oi8 = make_memop_idx(MO_TE | MO_64, mem_idx); MemOpIdx oi4 = make_memop_idx(MO_TE | MO_32, mem_idx); @@ -2867,12 +2867,14 @@ uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, uintptr_t ra) { + const int mmu_idx = s390x_env_mmu_index(env, false); + /* test the actual access, not just any access to the page due to LAP */ while (len) { const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); const uint64_t curlen = MIN(pagelen, len); - probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra); + probe_write(env, addr, curlen, mmu_idx, ra); addr = wrap_address(env, addr + curlen); len -= curlen; } diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c index 6aa7907438f..8764846ce8f 100644 --- a/target/s390x/tcg/misc_helper.c +++ b/target/s390x/tcg/misc_helper.c @@ -101,9 +101,9 @@ uint64_t HELPER(stck)(CPUS390XState *env) /* SCLP service call */ uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) { - qemu_mutex_lock_iothread(); + bql_lock(); int r = sclp_service_call(env_archcpu(env), r1, r2); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (r < 0) { tcg_s390_program_interrupt(env, -r, GETPC()); } @@ -117,9 +117,9 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) switch (num) { case 0x500: /* KVM hypercall */ - qemu_mutex_lock_iothread(); + bql_lock(); r = s390_virtio_hypercall(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case 0x44: /* yield */ @@ -127,9 +127,9 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) break; case 0x308: /* ipl */ - qemu_mutex_lock_iothread(); + bql_lock(); handle_diag_308(env, r1, r3, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); r = 0; break; case 0x288: @@ -185,7 +185,7 @@ static void update_ckc_timer(CPUS390XState *env) /* stop the timer and remove pending CKC IRQs */ timer_del(env->tod_timer); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */ @@ -207,16 +207,14 @@ void HELPER(sckc)(CPUS390XState *env, uint64_t ckc) { env->ckc = ckc; - qemu_mutex_lock_iothread(); + bql_lock(); update_ckc_timer(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) { - S390CPU *cpu = S390_CPU(cs); - - update_ckc_timer(&cpu->env); + update_ckc_timer(cpu_env(cs)); } /* Set Clock */ @@ -229,9 +227,9 @@ uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low) .low = tod_low, }; - qemu_mutex_lock_iothread(); + bql_lock(); tdc->set(td, &tod, &error_abort); - qemu_mutex_unlock_iothread(); + bql_unlock(); return 0; } @@ -421,9 +419,9 @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, int cc; /* TODO: needed to inject interrupts - push further down */ - qemu_mutex_lock_iothread(); + bql_lock(); cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3); - qemu_mutex_unlock_iothread(); + bql_unlock(); return cc; } @@ -433,92 +431,92 @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, void HELPER(xsch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_xsch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(csch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_csch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(hsch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_hsch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_msch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(rchp)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_rchp(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(rsch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_rsch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(sal)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_sal(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(stcrw)(CPUS390XState *env, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_stcrw(cpu, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) @@ -533,10 +531,10 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } - qemu_mutex_lock_iothread(); + bql_lock(); io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); if (!io) { - qemu_mutex_unlock_iothread(); + bql_unlock(); return 0; } @@ -554,7 +552,7 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) { /* writing failed, reinject and properly clean up */ s390_io_interrupt(io->id, io->nr, io->parm, io->word); - qemu_mutex_unlock_iothread(); + bql_unlock(); g_free(io); s390_cpu_virt_mem_handle_exc(cpu, ra); return 0; @@ -570,24 +568,24 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) } g_free(io); - qemu_mutex_unlock_iothread(); + bql_unlock(); return 1; } void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(chsc)(CPUS390XState *env, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_chsc(cpu, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif @@ -726,27 +724,27 @@ void HELPER(clp)(CPUS390XState *env, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); clp_service_call(cpu, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); pcilg_service_call(cpu, r1, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); pcistg_service_call(cpu, r1, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, @@ -754,9 +752,9 @@ void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); stpcifc_service_call(cpu, r1, fiba, ar, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) @@ -764,9 +762,9 @@ void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) S390CPU *cpu = env_archcpu(env); int r; - qemu_mutex_lock_iothread(); + bql_lock(); r = css_do_sic(cpu, (r3 >> 27) & 0x7, r1 & 0xffff); - qemu_mutex_unlock_iothread(); + bql_unlock(); /* css_do_sic() may actually return a PGM_xxx value to inject */ if (r) { tcg_s390_program_interrupt(env, -r, GETPC()); @@ -777,9 +775,9 @@ void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); rpcit_service_call(cpu, r1, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, @@ -787,9 +785,9 @@ void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, @@ -797,8 +795,8 @@ void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); mpcifc_service_call(cpu, r1, fiba, ar, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index 8df00b7df9f..90a74ee795d 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -141,7 +141,6 @@ struct DisasFields { struct DisasContext { DisasContextBase base; const DisasInsn *insn; - TCGOp *insn_start; DisasFields fields; uint64_t ex_value; /* @@ -754,10 +753,10 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_TM_64: switch (mask) { case 8: - cond = TCG_COND_EQ; + cond = TCG_COND_TSTEQ; break; case 4 | 2 | 1: - cond = TCG_COND_NE; + cond = TCG_COND_TSTNE; break; default: goto do_dynamic; @@ -768,11 +767,11 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_ICM: switch (mask) { case 8: - cond = TCG_COND_EQ; + cond = TCG_COND_TSTEQ; break; case 4 | 2 | 1: case 4 | 2: - cond = TCG_COND_NE; + cond = TCG_COND_TSTNE; break; default: goto do_dynamic; @@ -854,18 +853,14 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) c->u.s64.a = cc_dst; c->u.s64.b = tcg_constant_i64(0); break; + case CC_OP_LTGT_64: case CC_OP_LTUGTU_64: - c->u.s64.a = cc_src; - c->u.s64.b = cc_dst; - break; - case CC_OP_TM_32: case CC_OP_TM_64: case CC_OP_ICM: - c->u.s64.a = tcg_temp_new_i64(); - c->u.s64.b = tcg_constant_i64(0); - tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst); + c->u.s64.a = cc_src; + c->u.s64.b = cc_dst; break; case CC_OP_ADDU: @@ -889,67 +884,45 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_STATIC: c->is_64 = false; c->u.s32.a = cc_op; - switch (mask) { - case 0x8 | 0x4 | 0x2: /* cc != 3 */ - cond = TCG_COND_NE; + + /* Fold half of the cases using bit 3 to invert. */ + switch (mask & 8 ? mask ^ 0xf : mask) { + case 0x1: /* cc == 3 */ + cond = TCG_COND_EQ; c->u.s32.b = tcg_constant_i32(3); break; - case 0x8 | 0x4 | 0x1: /* cc != 2 */ - cond = TCG_COND_NE; - c->u.s32.b = tcg_constant_i32(2); - break; - case 0x8 | 0x2 | 0x1: /* cc != 1 */ - cond = TCG_COND_NE; - c->u.s32.b = tcg_constant_i32(1); - break; - case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */ + case 0x2: /* cc == 2 */ cond = TCG_COND_EQ; - c->u.s32.a = tcg_temp_new_i32(); - c->u.s32.b = tcg_constant_i32(0); - tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); - break; - case 0x8 | 0x4: /* cc < 2 */ - cond = TCG_COND_LTU; c->u.s32.b = tcg_constant_i32(2); break; - case 0x8: /* cc == 0 */ - cond = TCG_COND_EQ; - c->u.s32.b = tcg_constant_i32(0); - break; - case 0x4 | 0x2 | 0x1: /* cc != 0 */ - cond = TCG_COND_NE; - c->u.s32.b = tcg_constant_i32(0); - break; - case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */ - cond = TCG_COND_NE; - c->u.s32.a = tcg_temp_new_i32(); - c->u.s32.b = tcg_constant_i32(0); - tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); - break; case 0x4: /* cc == 1 */ cond = TCG_COND_EQ; c->u.s32.b = tcg_constant_i32(1); break; - case 0x2 | 0x1: /* cc > 1 */ + case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */ cond = TCG_COND_GTU; c->u.s32.b = tcg_constant_i32(1); break; - case 0x2: /* cc == 2 */ - cond = TCG_COND_EQ; - c->u.s32.b = tcg_constant_i32(2); + case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */ + cond = TCG_COND_TSTNE; + c->u.s32.b = tcg_constant_i32(1); break; - case 0x1: /* cc == 3 */ - cond = TCG_COND_EQ; - c->u.s32.b = tcg_constant_i32(3); + case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */ + cond = TCG_COND_LEU; + c->u.s32.a = tcg_temp_new_i32(); + c->u.s32.b = tcg_constant_i32(1); + tcg_gen_addi_i32(c->u.s32.a, cc_op, -1); break; - default: - /* CC is masked by something else: (8 >> cc) & mask. */ + case 0x4 | 0x2 | 0x1: /* cc != 0 */ cond = TCG_COND_NE; - c->u.s32.a = tcg_temp_new_i32(); c->u.s32.b = tcg_constant_i32(0); - tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op); - tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask); break; + default: + /* case 0: never, handled above. */ + g_assert_not_reached(); + } + if (mask & 8) { + cond = tcg_invert_cond(cond); } break; @@ -2223,6 +2196,22 @@ static DisasJumpType op_csp(DisasContext *s, DisasOps *o) } #endif +static DisasJumpType op_cvb(DisasContext *s, DisasOps *o) +{ + TCGv_i64 t = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TEUQ); + gen_helper_cvb(tcg_env, tcg_constant_i32(get_field(s, r1)), t); + return DISAS_NEXT; +} + +static DisasJumpType op_cvbg(DisasContext *s, DisasOps *o) +{ + TCGv_i128 t = tcg_temp_new_i128(); + tcg_gen_qemu_ld_i128(t, o->addr1, get_mem_index(s), MO_TE | MO_128); + gen_helper_cvbg(o->out, tcg_env, t); + return DISAS_NEXT; +} + static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) { TCGv_i64 t1 = tcg_temp_new_i64(); @@ -2233,6 +2222,14 @@ static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) return DISAS_NEXT; } +static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o) +{ + TCGv_i128 t = tcg_temp_new_i128(); + gen_helper_cvdg(t, o->in1); + tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128); + return DISAS_NEXT; +} + static DisasJumpType op_ct(DisasContext *s, DisasOps *o) { int m3 = get_field(s, m3); @@ -4783,9 +4780,10 @@ static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) static DisasJumpType op_ts(DisasContext *s, DisasOps *o) { - TCGv_i32 t1 = tcg_constant_i32(0xff); + TCGv_i32 ff = tcg_constant_i32(0xff); + TCGv_i32 t1 = tcg_temp_new_i32(); - tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB); + tcg_gen_atomic_xchg_i32(t1, o->in2, ff, get_mem_index(s), MO_UB); tcg_gen_extract_i32(cc_op, t1, 7, 1); set_cc_static(s); return DISAS_NEXT; @@ -6315,7 +6313,7 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) insn = extract_insn(env, s); /* Update insn_start now that we know the ILEN. */ - tcg_set_insn_start_param(s->insn_start, 2, s->ilen); + tcg_set_insn_start_param(s->base.insn_start, 2, s->ilen); /* Not found means unimplemented/illegal opcode. */ if (insn == NULL) { @@ -6469,7 +6467,6 @@ static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) /* Delay the set of ilen until we've read the insn. */ tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0); - dc->insn_start = tcg_last_op(); } static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s, @@ -6547,7 +6544,7 @@ static const TranslatorOps s390x_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc; @@ -6558,8 +6555,7 @@ void s390x_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; + CPUS390XState *env = cpu_env(cs); int cc_op = data[1]; env->psw.addr = data[0]; diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index a8ec98b1348..4f5a4a3d985 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -71,8 +71,7 @@ static void superh_restore_state_to_opc(CPUState *cs, static bool superh_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb) { - SuperHCPU *cpu = SUPERH_CPU(cs); - CPUSH4State *env = &cpu->env; + CPUSH4State *env = cpu_env(cs); if ((env->flags & (TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND)) && !(cs->tcg_cflags & CF_PCREL) && env->pc != tb->pc) { @@ -89,12 +88,26 @@ static bool superh_cpu_has_work(CPUState *cs) return cs->interrupt_request & CPU_INTERRUPT_HARD; } +static int sh4_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + CPUSH4State *env = cpu_env(cs); + + /* + * The instruction in a RTE delay slot is fetched in privileged mode, + * but executed in user mode. + */ + if (ifetch && (env->flags & TB_FLAG_DELAY_SLOT_RTE)) { + return 0; + } else { + return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0; + } +} + static void superh_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - SuperHCPU *cpu = SUPERH_CPU(s); - SuperHCPUClass *scc = SUPERH_CPU_GET_CLASS(cpu); - CPUSH4State *env = &cpu->env; + CPUState *cs = CPU(obj); + SuperHCPUClass *scc = SUPERH_CPU_GET_CLASS(obj); + CPUSH4State *env = cpu_env(cs); if (scc->parent_phases.hold) { scc->parent_phases.hold(obj); @@ -122,23 +135,6 @@ static void superh_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) info->print_insn = print_insn_sh; } -static void superh_cpu_list_entry(gpointer data, gpointer user_data) -{ - const char *typename = object_class_get_name(OBJECT_CLASS(data)); - int len = strlen(typename) - strlen(SUPERH_CPU_TYPE_SUFFIX); - - qemu_printf("%.*s\n", len, typename); -} - -void sh4_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list_sorted(TYPE_SUPERH_CPU, false); - g_slist_foreach(list, superh_cpu_list_entry, NULL); - g_slist_free(list); -} - static ObjectClass *superh_cpu_class_by_name(const char *cpu_model) { ObjectClass *oc; @@ -161,8 +157,7 @@ static ObjectClass *superh_cpu_class_by_name(const char *cpu_model) static void sh7750r_cpu_initfn(Object *obj) { - SuperHCPU *cpu = SUPERH_CPU(obj); - CPUSH4State *env = &cpu->env; + CPUSH4State *env = cpu_env(CPU(obj)); env->id = SH_CPU_SH7750R; env->features = SH_FEATURE_BCR3_AND_BCR4; @@ -179,8 +174,7 @@ static void sh7750r_class_init(ObjectClass *oc, void *data) static void sh7751r_cpu_initfn(Object *obj) { - SuperHCPU *cpu = SUPERH_CPU(obj); - CPUSH4State *env = &cpu->env; + CPUSH4State *env = cpu_env(CPU(obj)); env->id = SH_CPU_SH7751R; env->features = SH_FEATURE_BCR3_AND_BCR4; @@ -197,8 +191,7 @@ static void sh7751r_class_init(ObjectClass *oc, void *data) static void sh7785_cpu_initfn(Object *obj) { - SuperHCPU *cpu = SUPERH_CPU(obj); - CPUSH4State *env = &cpu->env; + CPUSH4State *env = cpu_env(CPU(obj)); env->id = SH_CPU_SH7785; env->features = SH_FEATURE_SH4A; @@ -233,8 +226,7 @@ static void superh_cpu_realizefn(DeviceState *dev, Error **errp) static void superh_cpu_initfn(Object *obj) { - SuperHCPU *cpu = SUPERH_CPU(obj); - CPUSH4State *env = &cpu->env; + CPUSH4State *env = cpu_env(CPU(obj)); env->movcal_backup_tail = &(env->movcal_backup); } @@ -254,7 +246,7 @@ static const struct SysemuCPUOps sh4_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps superh_tcg_ops = { +static const TCGCPUOps superh_tcg_ops = { .initialize = sh4_translate_init, .synchronize_from_tb = superh_cpu_synchronize_from_tb, .restore_state_to_opc = superh_restore_state_to_opc, @@ -283,6 +275,7 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = superh_cpu_class_by_name; cc->has_work = superh_cpu_has_work; + cc->mmu_index = sh4_cpu_mmu_index; cc->dump_state = superh_cpu_dump_state; cc->set_pc = superh_cpu_set_pc; cc->get_pc = superh_cpu_get_pc; diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index 031dc0b457b..d928bcf0067 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -155,12 +155,22 @@ typedef struct CPUArchState { uint32_t pc; /* program counter */ uint32_t delayed_pc; /* target of delayed branch */ uint32_t delayed_cond; /* condition of delayed branch */ - uint32_t mach; /* multiply and accumulate high */ - uint32_t macl; /* multiply and accumulate low */ uint32_t pr; /* procedure register */ uint32_t fpscr; /* floating point status/control register */ uint32_t fpul; /* floating point communication register */ + /* multiply and accumulate: high, low and combined. */ + union { + uint64_t mac; + struct { +#if HOST_BIG_ENDIAN + uint32_t mach, macl; +#else + uint32_t macl, mach; +#endif + }; + }; + /* float point status register */ float_status fp_status; @@ -238,7 +248,6 @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, uintptr_t retaddr); void sh4_translate_init(void); -void sh4_cpu_list(void); #if !defined(CONFIG_USER_ONLY) hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); @@ -272,20 +281,8 @@ void cpu_load_tlb(CPUSH4State * env); #define CPU_RESOLVING_TYPE TYPE_SUPERH_CPU -#define cpu_list sh4_cpu_list - /* MMU modes definitions */ #define MMU_USER_IDX 1 -static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch) -{ - /* The instruction in a RTE delay slot is fetched in privileged - mode, but executed in user mode. */ - if (ifetch && (env->flags & TB_FLAG_DELAY_SLOT_RTE)) { - return 0; - } else { - return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0; - } -} #include "exec/cpu-all.h" diff --git a/target/sh4/gdbstub.c b/target/sh4/gdbstub.c index d8e199fc060..75926d4e049 100644 --- a/target/sh4/gdbstub.c +++ b/target/sh4/gdbstub.c @@ -26,8 +26,7 @@ int superh_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - SuperHCPU *cpu = SUPERH_CPU(cs); - CPUSH4State *env = &cpu->env; + CPUSH4State *env = cpu_env(cs); switch (n) { case 0 ... 7: @@ -76,8 +75,7 @@ int superh_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int superh_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - SuperHCPU *cpu = SUPERH_CPU(cs); - CPUSH4State *env = &cpu->env; + CPUSH4State *env = cpu_env(cs); switch (n) { case 0 ... 7: diff --git a/target/sh4/helper.c b/target/sh4/helper.c index 5a6f653c128..7c6f9d374ab 100644 --- a/target/sh4/helper.c +++ b/target/sh4/helper.c @@ -55,8 +55,7 @@ int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr) void superh_cpu_do_interrupt(CPUState *cs) { - SuperHCPU *cpu = SUPERH_CPU(cs); - CPUSH4State *env = &cpu->env; + CPUSH4State *env = cpu_env(cs); int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD; int do_exp, irq_vector = cs->exception_index; @@ -432,11 +431,10 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical, hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { - SuperHCPU *cpu = SUPERH_CPU(cs); target_ulong physical; int prot; - if (get_physical_address(&cpu->env, &physical, &prot, addr, MMU_DATA_LOAD) + if (get_physical_address(cpu_env(cs), &physical, &prot, addr, MMU_DATA_LOAD) == MMU_OK) { return physical; } @@ -782,11 +780,8 @@ int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr) bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { - SuperHCPU *cpu = SUPERH_CPU(cs); - CPUSH4State *env = &cpu->env; - /* Delay slots are indivisible, ignore interrupts */ - if (env->flags & TB_FLAG_DELAY_SLOT_MASK) { + if (cpu_env(cs)->flags & TB_FLAG_DELAY_SLOT_MASK) { return false; } else { superh_cpu_do_interrupt(cs); @@ -800,8 +795,7 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - SuperHCPU *cpu = SUPERH_CPU(cs); - CPUSH4State *env = &cpu->env; + CPUSH4State *env = cpu_env(cs); int ret; target_ulong physical; diff --git a/target/sh4/helper.h b/target/sh4/helper.h index 8d792f6b553..29011d3dbbb 100644 --- a/target/sh4/helper.h +++ b/target/sh4/helper.h @@ -11,8 +11,8 @@ DEF_HELPER_3(movcal, void, env, i32, i32) DEF_HELPER_1(discard_movcal_backup, void, env) DEF_HELPER_2(ocbi, void, env, i32) -DEF_HELPER_3(macl, void, env, i32, i32) -DEF_HELPER_3(macw, void, env, i32, i32) +DEF_HELPER_3(macl, void, env, s32, s32) +DEF_HELPER_3(macw, void, env, s32, s32) DEF_HELPER_2(ld_fpscr, void, env, i32) diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c index 54d390fe1f7..99394b714c9 100644 --- a/target/sh4/op_helper.c +++ b/target/sh4/op_helper.c @@ -29,9 +29,7 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { - CPUSH4State *env = cpu_env(cs); - - env->tea = addr; + cpu_env(cs)->tea = addr; switch (access_type) { case MMU_INST_FETCH: case MMU_DATA_LOAD: @@ -160,38 +158,47 @@ void helper_ocbi(CPUSH4State *env, uint32_t address) } } -void helper_macl(CPUSH4State *env, uint32_t arg0, uint32_t arg1) +void helper_macl(CPUSH4State *env, int32_t arg0, int32_t arg1) { + const int64_t min = -(1ll << 47); + const int64_t max = (1ll << 47) - 1; + int64_t mul = (int64_t)arg0 * arg1; + int64_t mac = env->mac; int64_t res; - res = ((uint64_t) env->mach << 32) | env->macl; - res += (int64_t) (int32_t) arg0 *(int64_t) (int32_t) arg1; - env->mach = (res >> 32) & 0xffffffff; - env->macl = res & 0xffffffff; - if (env->sr & (1u << SR_S)) { - if (res < 0) - env->mach |= 0xffff0000; - else - env->mach &= 0x00007fff; + if (!(env->sr & (1u << SR_S))) { + res = mac + mul; + } else if (sadd64_overflow(mac, mul, &res)) { + res = mac < 0 ? min : max; + } else { + res = MIN(MAX(res, min), max); } + + env->mac = res; } -void helper_macw(CPUSH4State *env, uint32_t arg0, uint32_t arg1) +void helper_macw(CPUSH4State *env, int32_t arg0, int32_t arg1) { - int64_t res; + /* Inputs are already sign-extended from 16 bits. */ + int32_t mul = arg0 * arg1; - res = ((uint64_t) env->mach << 32) | env->macl; - res += (int64_t) (int16_t) arg0 *(int64_t) (int16_t) arg1; - env->mach = (res >> 32) & 0xffffffff; - env->macl = res & 0xffffffff; if (env->sr & (1u << SR_S)) { - if (res < -0x80000000) { - env->mach = 1; - env->macl = 0x80000000; - } else if (res > 0x000000007fffffff) { + /* + * In saturation arithmetic mode, the accumulator is 32-bit + * with carry. MACH is not considered during the addition + * operation nor the 32-bit saturation logic. + */ + int32_t res, macl = env->macl; + + if (sadd32_overflow(macl, mul, &res)) { + res = macl < 0 ? INT32_MIN : INT32_MAX; + /* If overflow occurs, the MACH register is set to 1. */ env->mach = 1; - env->macl = 0x7fffffff; } + env->macl = res; + } else { + /* In non-saturation arithmetic mode, the accumulator is 64-bit */ + env->mac += mul; } } diff --git a/target/sh4/translate.c b/target/sh4/translate.c index 81f825f1257..ebb6c901bf6 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -159,8 +159,7 @@ void sh4_translate_init(void) void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - SuperHCPU *cpu = SUPERH_CPU(cs); - CPUSH4State *env = &cpu->env; + CPUSH4State *env = cpu_env(cs); int i; qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n", @@ -524,6 +523,7 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_movi_i32(REG(B11_8), B7_0s); return; case 0x9000: /* mov.w @(disp,PC),Rn */ + CHECK_NOT_DELAY_SLOT { TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, @@ -531,6 +531,7 @@ static void _decode_opc(DisasContext * ctx) } return; case 0xd000: /* mov.l @(disp,PC),Rn */ + CHECK_NOT_DELAY_SLOT { TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, @@ -817,10 +818,10 @@ static void _decode_opc(DisasContext * ctx) TCGv arg0, arg1; arg0 = tcg_temp_new(); tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, - MO_TESL | MO_ALIGN); + MO_TESW | MO_ALIGN); arg1 = tcg_temp_new(); tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, - MO_TESL | MO_ALIGN); + MO_TESW | MO_ALIGN); gen_helper_macw(tcg_env, arg0, arg1); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); @@ -1237,6 +1238,7 @@ static void _decode_opc(DisasContext * ctx) } return; case 0xc700: /* mova @(disp,PC),R0 */ + CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) + 4 + B7_0 * 4) & ~3); return; @@ -2186,7 +2188,6 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); - CPUSH4State *env = cpu_env(cs); uint32_t tbflags; int bound; @@ -2196,7 +2197,7 @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) /* We don't know if the delayed pc came from a dynamic or static branch, so assume it is a dynamic branch. */ ctx->delayed_pc = -1; /* use delayed pc from env pointer */ - ctx->features = env->features; + ctx->features = cpu_env(cs)->features; ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA); ctx->gbank = ((tbflags & (1 << SR_MD)) && (tbflags & (1 << SR_RB))) * 0x10; @@ -2317,7 +2318,7 @@ static const TranslatorOps sh4_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/sparc/asi.h b/target/sparc/asi.h index 3270ed0c7fc..a66829674bc 100644 --- a/target/sparc/asi.h +++ b/target/sparc/asi.h @@ -145,14 +145,14 @@ * and later ASIs. */ #define ASI_REAL 0x14 /* Real address, cacheable */ -#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ -#define ASI_REAL_IO 0x15 /* Real address, non-cachable */ +#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cacheable */ +#define ASI_REAL_IO 0x15 /* Real address, non-cacheable */ #define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ #define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */ #define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */ #define ASI_REAL_L 0x1c /* Real address, cacheable, LE */ -#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ -#define ASI_REAL_IO_L 0x1d /* Real address, non-cachable, LE */ +#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cacheable, little endian*/ +#define ASI_REAL_IO_L 0x1d /* Real address, non-cacheable, LE */ #define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ #define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/ #define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */ diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index befa7fc4eb8..e820f50acf6 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -31,10 +31,9 @@ static void sparc_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - SPARCCPU *cpu = SPARC_CPU(s); - SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(cpu); - CPUSPARCState *env = &cpu->env; + CPUState *cs = CPU(obj); + SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(obj); + CPUSPARCState *env = cpu_env(cs); if (scc->parent_phases.hold) { scc->parent_phases.hold(obj); @@ -83,8 +82,7 @@ static void sparc_cpu_reset_hold(Object *obj) static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); if (cpu_interrupts_enabled(env) && env->interrupt_index > 0) { int pil = env->interrupt_index & 0xf; @@ -368,7 +366,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "Fujitsu MB86904", .iu_version = 0x04 << 24, /* Impl 0, ver 4 */ - .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ + .fpu_version = 4 << FSR_VER_SHIFT, /* FPU version 4 (Meiko) */ .mmu_version = 0x04 << 24, /* Impl 0, ver 4 */ .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0x00ffffc0, @@ -381,7 +379,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "Fujitsu MB86907", .iu_version = 0x05 << 24, /* Impl 0, ver 5 */ - .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ + .fpu_version = 4 << FSR_VER_SHIFT, /* FPU version 4 (Meiko) */ .mmu_version = 0x05 << 24, /* Impl 0, ver 5 */ .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0xffffffc0, @@ -394,7 +392,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "TI MicroSparc I", .iu_version = 0x41000000, - .fpu_version = 4 << 17, + .fpu_version = 4 << FSR_VER_SHIFT, .mmu_version = 0x41000000, .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0x007ffff0, @@ -407,7 +405,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "TI MicroSparc II", .iu_version = 0x42000000, - .fpu_version = 4 << 17, + .fpu_version = 4 << FSR_VER_SHIFT, .mmu_version = 0x02000000, .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0x00ffffc0, @@ -420,7 +418,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "TI MicroSparc IIep", .iu_version = 0x42000000, - .fpu_version = 4 << 17, + .fpu_version = 4 << FSR_VER_SHIFT, .mmu_version = 0x04000000, .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0x00ffffc0, @@ -433,7 +431,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "TI SuperSparc 40", /* STP1020NPGA */ .iu_version = 0x41000000, /* SuperSPARC 2.x */ - .fpu_version = 0 << 17, + .fpu_version = 0 << FSR_VER_SHIFT, .mmu_version = 0x00000800, /* SuperSPARC 2.x, no MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, @@ -446,7 +444,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "TI SuperSparc 50", /* STP1020PGA */ .iu_version = 0x40000000, /* SuperSPARC 3.x */ - .fpu_version = 0 << 17, + .fpu_version = 0 << FSR_VER_SHIFT, .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, @@ -459,7 +457,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "TI SuperSparc 51", .iu_version = 0x40000000, /* SuperSPARC 3.x */ - .fpu_version = 0 << 17, + .fpu_version = 0 << FSR_VER_SHIFT, .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, @@ -473,7 +471,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "TI SuperSparc 60", /* STP1020APGA */ .iu_version = 0x40000000, /* SuperSPARC 3.x */ - .fpu_version = 0 << 17, + .fpu_version = 0 << FSR_VER_SHIFT, .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, @@ -486,7 +484,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "TI SuperSparc 61", .iu_version = 0x44000000, /* SuperSPARC 3.x */ - .fpu_version = 0 << 17, + .fpu_version = 0 << FSR_VER_SHIFT, .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, @@ -500,7 +498,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "TI SuperSparc II", .iu_version = 0x40000000, /* SuperSPARC II 1.x */ - .fpu_version = 0 << 17, + .fpu_version = 0 << FSR_VER_SHIFT, .mmu_version = 0x08000000, /* SuperSPARC II 1.x, MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, @@ -514,7 +512,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "LEON2", .iu_version = 0xf2000000, - .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ + .fpu_version = 4 << FSR_VER_SHIFT, /* FPU version 4 (Meiko) */ .mmu_version = 0xf2000000, .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0x007ffff0, @@ -527,7 +525,7 @@ static const sparc_def_t sparc_defs[] = { { .name = "LEON3", .iu_version = 0xf3000000, - .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ + .fpu_version = 4 << FSR_VER_SHIFT, /* FPU version 4 (Meiko) */ .mmu_version = 0xf3000000, .mmu_bm = 0x00000000, .mmu_ctpr_mask = 0xfffffffc, @@ -576,9 +574,10 @@ void sparc_cpu_list(void) { unsigned int i; + qemu_printf("Available CPU types:\n"); for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) { - qemu_printf("Sparc %16s IU " TARGET_FMT_lx - " FPU %08x MMU %08x NWINS %d ", + qemu_printf(" %-20s (IU " TARGET_FMT_lx + " FPU %08x MMU %08x NWINS %d) ", sparc_defs[i].name, sparc_defs[i].iu_version, sparc_defs[i].fpu_version, @@ -613,8 +612,7 @@ static void cpu_print_cc(FILE *f, uint32_t cc) static void sparc_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); int i, x; qemu_fprintf(f, "pc: " TARGET_FMT_lx " npc: " TARGET_FMT_lx "\n", env->pc, @@ -670,7 +668,7 @@ static void sparc_cpu_dump_state(CPUState *cs, FILE *f, int flags) env->cansave, env->canrestore, env->otherwin, env->wstate, env->cleanwin, env->nwindows - 1 - env->cwp); qemu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx " fprs: %016x\n", - env->fsr, env->y, env->fprs); + cpu_get_fsr(env), env->y, env->fprs); #else qemu_fprintf(f, "psr: %08x (icc: ", cpu_get_psr(env)); @@ -679,7 +677,7 @@ static void sparc_cpu_dump_state(CPUState *cs, FILE *f, int flags) env->psrps ? 'P' : '-', env->psret ? 'E' : '-', env->wim); qemu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx "\n", - env->fsr, env->y); + cpu_get_fsr(env), env->y); #endif qemu_fprintf(f, "\n"); } @@ -711,11 +709,36 @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs, static bool sparc_cpu_has_work(CPUState *cs) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; - return (cs->interrupt_request & CPU_INTERRUPT_HARD) && - cpu_interrupts_enabled(env); + cpu_interrupts_enabled(cpu_env(cs)); +} + +static int sparc_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + CPUSPARCState *env = cpu_env(cs); + +#ifndef TARGET_SPARC64 + if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ + return MMU_PHYS_IDX; + } else { + return env->psrs; + } +#else + /* IMMU or DMMU disabled. */ + if (ifetch + ? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0 + : (env->lsu & DMMU_E) == 0) { + return MMU_PHYS_IDX; + } else if (cpu_hypervisor_mode(env)) { + return MMU_PHYS_IDX; + } else if (env->tl > 0) { + return MMU_NUCLEUS_IDX; + } else if (cpu_supervisor_mode(env)) { + return MMU_KERNEL_IDX; + } else { + return MMU_USER_IDX; + } +#endif } static char *sparc_cpu_type_name(const char *cpu_model) @@ -749,8 +772,7 @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp) CPUState *cs = CPU(dev); SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(dev); Error *local_err = NULL; - SPARCCPU *cpu = SPARC_CPU(dev); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); #if defined(CONFIG_USER_ONLY) /* We are emulating the kernel, which will trap and emulate float128. */ @@ -758,7 +780,6 @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp) #endif env->version = env->def.iu_version; - env->fsr = env->def.fpu_version; env->nwindows = env->def.nwindows; #if !defined(TARGET_SPARC64) env->mmuregs[0] |= env->def.mmu_version; @@ -770,6 +791,7 @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp) env->version |= env->def.maxtl << 8; env->version |= env->def.nwindows - 1; #endif + cpu_put_fsr(env, 0); cpu_exec_realizefn(cs, &local_err); if (local_err != NULL) { @@ -874,7 +896,7 @@ static const struct SysemuCPUOps sparc_sysemu_ops = { #ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps sparc_tcg_ops = { +static const TCGCPUOps sparc_tcg_ops = { .initialize = sparc_tcg_init, .synchronize_from_tb = sparc_cpu_synchronize_from_tb, .restore_state_to_opc = sparc_restore_state_to_opc, @@ -906,6 +928,7 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = sparc_cpu_class_by_name; cc->parse_features = sparc_cpu_parse_features; cc->has_work = sparc_cpu_has_work; + cc->mmu_index = sparc_cpu_mmu_index; cc->dump_state = sparc_cpu_dump_state; #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) cc->memory_rw_debug = sparc_cpu_memory_rw_debug; diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index 6999a10a401..f3cdd17c629 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -31,8 +31,10 @@ #if !defined(TARGET_SPARC64) #define TARGET_DPREGS 16 +#define TARGET_FCCREGS 1 #else #define TARGET_DPREGS 32 +#define TARGET_FCCREGS 4 #endif /*#define EXCP_INTERRUPT 0x100*/ @@ -176,6 +178,7 @@ enum { #define FSR_DZM (1ULL << 24) #define FSR_NXM (1ULL << 23) #define FSR_TEM_MASK (FSR_NVM | FSR_OFM | FSR_UFM | FSR_DZM | FSR_NXM) +#define FSR_TEM_SHIFT 23 #define FSR_NVA (1ULL << 9) #define FSR_OFA (1ULL << 8) @@ -183,6 +186,7 @@ enum { #define FSR_DZA (1ULL << 6) #define FSR_NXA (1ULL << 5) #define FSR_AEXC_MASK (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA) +#define FSR_AEXC_SHIFT 5 #define FSR_NVC (1ULL << 4) #define FSR_OFC (1ULL << 3) @@ -191,31 +195,22 @@ enum { #define FSR_NXC (1ULL << 0) #define FSR_CEXC_MASK (FSR_NVC | FSR_OFC | FSR_UFC | FSR_DZC | FSR_NXC) +#define FSR_VER_SHIFT 17 +#define FSR_VER_MASK (7 << FSR_VER_SHIFT) + #define FSR_FTT2 (1ULL << 16) #define FSR_FTT1 (1ULL << 15) #define FSR_FTT0 (1ULL << 14) #define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0) -#ifdef TARGET_SPARC64 -#define FSR_FTT_NMASK 0xfffffffffffe3fffULL -#define FSR_FTT_CEXC_NMASK 0xfffffffffffe3fe0ULL -#define FSR_LDFSR_OLDMASK 0x0000003f000fc000ULL -#define FSR_LDXFSR_MASK 0x0000003fcfc00fffULL -#define FSR_LDXFSR_OLDMASK 0x00000000000fc000ULL -#else -#define FSR_FTT_NMASK 0xfffe3fffULL -#define FSR_FTT_CEXC_NMASK 0xfffe3fe0ULL -#define FSR_LDFSR_OLDMASK 0x000fc000ULL -#endif -#define FSR_LDFSR_MASK 0xcfc00fffULL #define FSR_FTT_IEEE_EXCP (1ULL << 14) #define FSR_FTT_UNIMPFPOP (3ULL << 14) #define FSR_FTT_SEQ_ERROR (4ULL << 14) #define FSR_FTT_INVAL_FPR (6ULL << 14) -#define FSR_FCC1_SHIFT 11 -#define FSR_FCC1 (1ULL << FSR_FCC1_SHIFT) -#define FSR_FCC0_SHIFT 10 -#define FSR_FCC0 (1ULL << FSR_FCC0_SHIFT) +#define FSR_FCC0_SHIFT 10 +#define FSR_FCC1_SHIFT 32 +#define FSR_FCC2_SHIFT 34 +#define FSR_FCC3_SHIFT 36 /* MMU */ #define MMU_E (1<<0) @@ -461,7 +456,11 @@ struct CPUArchState { target_ulong cond; /* conditional branch result (XXX: save it in a temporary register when possible) */ - target_ulong fsr; /* FPU state register */ + /* FPU State Register, in parts */ + uint32_t fsr; /* rm, tem, aexc */ + uint32_t fsr_cexc_ftt; /* cexc, ftt */ + uint32_t fcc[TARGET_FCCREGS]; /* fcc* */ + CPU_DoubleU fpr[TARGET_DPREGS]; /* floating point registers */ uint32_t cwp; /* index of current register window (extracted from PSR) */ @@ -509,8 +508,6 @@ struct CPUArchState { uint64_t mmubpregs[4]; uint64_t prom_addr; #endif - /* temporary float registers */ - float128 qt0, qt1; float_status fp_status; #if defined(TARGET_SPARC64) #define MAXTL_MAX 8 @@ -548,10 +545,9 @@ struct CPUArchState { #endif sparc_def_t def; - void *irq_manager; - void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno); - - /* Leon3 cache control */ + /* Leon3 */ + DeviceState *irq_manager; + void (*qemu_irq_ack)(CPUSPARCState *env, int intno); uint32_t cache_control; }; @@ -619,7 +615,9 @@ void sparc_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data); -/* cpu-exec.c */ +/* fop_helper.c */ +target_ulong cpu_get_fsr(CPUSPARCState *); +void cpu_put_fsr(CPUSPARCState *, target_ulong); /* win_helper.c */ target_ulong cpu_get_psr(CPUSPARCState *env1); @@ -708,34 +706,6 @@ static inline int cpu_supervisor_mode(CPUSPARCState *env1) } #endif -static inline int cpu_mmu_index(CPUSPARCState *env, bool ifetch) -{ -#if defined(CONFIG_USER_ONLY) - return MMU_USER_IDX; -#elif !defined(TARGET_SPARC64) - if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ - return MMU_PHYS_IDX; - } else { - return env->psrs; - } -#else - /* IMMU or DMMU disabled. */ - if (ifetch - ? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0 - : (env->lsu & DMMU_E) == 0) { - return MMU_PHYS_IDX; - } else if (cpu_hypervisor_mode(env)) { - return MMU_PHYS_IDX; - } else if (env->tl > 0) { - return MMU_NUCLEUS_IDX; - } else if (cpu_supervisor_mode(env)) { - return MMU_KERNEL_IDX; - } else { - return MMU_USER_IDX; - } -#endif -} - static inline int cpu_interrupts_enabled(CPUSPARCState *env1) { #if !defined (TARGET_SPARC64) @@ -783,7 +753,7 @@ static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc, uint32_t flags; *pc = env->pc; *cs_base = env->npc; - flags = cpu_mmu_index(env, false); + flags = cpu_mmu_index(env_cpu(env), false); #ifndef CONFIG_USER_ONLY if (cpu_supervisor_mode(env)) { flags |= TB_FLAG_SUPER; diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c index 0f8aa3abcd0..1205a599ef4 100644 --- a/target/sparc/fop_helper.c +++ b/target/sparc/fop_helper.c @@ -23,13 +23,32 @@ #include "exec/helper-proto.h" #include "fpu/softfloat.h" -#define QT0 (env->qt0) -#define QT1 (env->qt1) +static inline float128 f128_in(Int128 i) +{ + union { + Int128 i; + float128 f; + } u; + + u.i = i; + return u.f; +} + +static inline Int128 f128_ret(float128 f) +{ + union { + Int128 i; + float128 f; + } u; -static target_ulong do_check_ieee_exceptions(CPUSPARCState *env, uintptr_t ra) + u.f = f; + return u.i; +} + +static void check_ieee_exceptions(CPUSPARCState *env, uintptr_t ra) { target_ulong status = get_float_exception_flags(&env->fp_status); - target_ulong fsr = env->fsr; + uint32_t cexc = 0; if (unlikely(status)) { /* Keep exception flags clear for next time. */ @@ -37,333 +56,384 @@ static target_ulong do_check_ieee_exceptions(CPUSPARCState *env, uintptr_t ra) /* Copy IEEE 754 flags into FSR */ if (status & float_flag_invalid) { - fsr |= FSR_NVC; + cexc |= FSR_NVC; } if (status & float_flag_overflow) { - fsr |= FSR_OFC; + cexc |= FSR_OFC; } if (status & float_flag_underflow) { - fsr |= FSR_UFC; + cexc |= FSR_UFC; } if (status & float_flag_divbyzero) { - fsr |= FSR_DZC; + cexc |= FSR_DZC; } if (status & float_flag_inexact) { - fsr |= FSR_NXC; + cexc |= FSR_NXC; } - if ((fsr & FSR_CEXC_MASK) & ((fsr & FSR_TEM_MASK) >> 23)) { - CPUState *cs = env_cpu(env); - - /* Unmasked exception, generate a trap. Note that while - the helper is marked as NO_WG, we can get away with - writing to cpu state along the exception path, since - TCG generated code will never see the write. */ - env->fsr = fsr | FSR_FTT_IEEE_EXCP; - cs->exception_index = TT_FP_EXCP; - cpu_loop_exit_restore(cs, ra); - } else { - /* Accumulate exceptions */ - fsr |= (fsr & FSR_CEXC_MASK) << 5; + if (cexc & (env->fsr >> FSR_TEM_SHIFT)) { + /* Unmasked exception, generate an IEEE trap. */ + env->fsr_cexc_ftt = cexc | FSR_FTT_IEEE_EXCP; + cpu_raise_exception_ra(env, TT_FP_EXCP, ra); } + + /* Accumulate exceptions */ + env->fsr |= cexc << FSR_AEXC_SHIFT; } - return fsr; + /* No trap, so FTT is cleared. */ + env->fsr_cexc_ftt = cexc; } -target_ulong helper_check_ieee_exceptions(CPUSPARCState *env) +float32 helper_fadds(CPUSPARCState *env, float32 src1, float32 src2) { - return do_check_ieee_exceptions(env, GETPC()); + float32 ret = float32_add(src1, src2, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -#define F_HELPER(name, p) void helper_f##name##p(CPUSPARCState *env) +float32 helper_fsubs(CPUSPARCState *env, float32 src1, float32 src2) +{ + float32 ret = float32_sub(src1, src2, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; +} -#define F_BINOP(name) \ - float32 helper_f ## name ## s (CPUSPARCState *env, float32 src1, \ - float32 src2) \ - { \ - return float32_ ## name (src1, src2, &env->fp_status); \ - } \ - float64 helper_f ## name ## d (CPUSPARCState * env, float64 src1,\ - float64 src2) \ - { \ - return float64_ ## name (src1, src2, &env->fp_status); \ - } \ - F_HELPER(name, q) \ - { \ - QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \ - } +float32 helper_fmuls(CPUSPARCState *env, float32 src1, float32 src2) +{ + float32 ret = float32_mul(src1, src2, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; +} -F_BINOP(add); -F_BINOP(sub); -F_BINOP(mul); -F_BINOP(div); -#undef F_BINOP +float32 helper_fdivs(CPUSPARCState *env, float32 src1, float32 src2) +{ + float32 ret = float32_div(src1, src2, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; +} -float64 helper_fsmuld(CPUSPARCState *env, float32 src1, float32 src2) +float64 helper_faddd(CPUSPARCState *env, float64 src1, float64 src2) { - return float64_mul(float32_to_float64(src1, &env->fp_status), - float32_to_float64(src2, &env->fp_status), - &env->fp_status); + float64 ret = float64_add(src1, src2, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -void helper_fdmulq(CPUSPARCState *env, float64 src1, float64 src2) +float64 helper_fsubd(CPUSPARCState *env, float64 src1, float64 src2) { - QT0 = float128_mul(float64_to_float128(src1, &env->fp_status), - float64_to_float128(src2, &env->fp_status), - &env->fp_status); + float64 ret = float64_sub(src1, src2, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -float32 helper_fnegs(float32 src) +float64 helper_fmuld(CPUSPARCState *env, float64 src1, float64 src2) { - return float32_chs(src); + float64 ret = float64_mul(src1, src2, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -#ifdef TARGET_SPARC64 -float64 helper_fnegd(float64 src) +float64 helper_fdivd(CPUSPARCState *env, float64 src1, float64 src2) { - return float64_chs(src); + float64 ret = float64_div(src1, src2, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -F_HELPER(neg, q) +Int128 helper_faddq(CPUSPARCState *env, Int128 src1, Int128 src2) { - QT0 = float128_chs(QT1); + float128 ret = float128_add(f128_in(src1), f128_in(src2), &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return f128_ret(ret); +} + +Int128 helper_fsubq(CPUSPARCState *env, Int128 src1, Int128 src2) +{ + float128 ret = float128_sub(f128_in(src1), f128_in(src2), &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return f128_ret(ret); +} + +Int128 helper_fmulq(CPUSPARCState *env, Int128 src1, Int128 src2) +{ + float128 ret = float128_mul(f128_in(src1), f128_in(src2), &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return f128_ret(ret); +} + +Int128 helper_fdivq(CPUSPARCState *env, Int128 src1, Int128 src2) +{ + float128 ret = float128_div(f128_in(src1), f128_in(src2), &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return f128_ret(ret); +} + +float64 helper_fsmuld(CPUSPARCState *env, float32 src1, float32 src2) +{ + float64 ret = float64_mul(float32_to_float64(src1, &env->fp_status), + float32_to_float64(src2, &env->fp_status), + &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; +} + +Int128 helper_fdmulq(CPUSPARCState *env, float64 src1, float64 src2) +{ + float128 ret = float128_mul(float64_to_float128(src1, &env->fp_status), + float64_to_float128(src2, &env->fp_status), + &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return f128_ret(ret); } -#endif /* Integer to float conversion. */ float32 helper_fitos(CPUSPARCState *env, int32_t src) { - return int32_to_float32(src, &env->fp_status); + float32 ret = int32_to_float32(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } float64 helper_fitod(CPUSPARCState *env, int32_t src) { - return int32_to_float64(src, &env->fp_status); + float64 ret = int32_to_float64(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -void helper_fitoq(CPUSPARCState *env, int32_t src) +Int128 helper_fitoq(CPUSPARCState *env, int32_t src) { - QT0 = int32_to_float128(src, &env->fp_status); + float128 ret = int32_to_float128(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return f128_ret(ret); } #ifdef TARGET_SPARC64 float32 helper_fxtos(CPUSPARCState *env, int64_t src) { - return int64_to_float32(src, &env->fp_status); + float32 ret = int64_to_float32(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } float64 helper_fxtod(CPUSPARCState *env, int64_t src) { - return int64_to_float64(src, &env->fp_status); + float64 ret = int64_to_float64(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -void helper_fxtoq(CPUSPARCState *env, int64_t src) +Int128 helper_fxtoq(CPUSPARCState *env, int64_t src) { - QT0 = int64_to_float128(src, &env->fp_status); + float128 ret = int64_to_float128(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return f128_ret(ret); } #endif -#undef F_HELPER /* floating point conversion */ float32 helper_fdtos(CPUSPARCState *env, float64 src) { - return float64_to_float32(src, &env->fp_status); + float32 ret = float64_to_float32(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } float64 helper_fstod(CPUSPARCState *env, float32 src) { - return float32_to_float64(src, &env->fp_status); + float64 ret = float32_to_float64(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -float32 helper_fqtos(CPUSPARCState *env) +float32 helper_fqtos(CPUSPARCState *env, Int128 src) { - return float128_to_float32(QT1, &env->fp_status); + float32 ret = float128_to_float32(f128_in(src), &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -void helper_fstoq(CPUSPARCState *env, float32 src) +Int128 helper_fstoq(CPUSPARCState *env, float32 src) { - QT0 = float32_to_float128(src, &env->fp_status); + float128 ret = float32_to_float128(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return f128_ret(ret); } -float64 helper_fqtod(CPUSPARCState *env) +float64 helper_fqtod(CPUSPARCState *env, Int128 src) { - return float128_to_float64(QT1, &env->fp_status); + float64 ret = float128_to_float64(f128_in(src), &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -void helper_fdtoq(CPUSPARCState *env, float64 src) +Int128 helper_fdtoq(CPUSPARCState *env, float64 src) { - QT0 = float64_to_float128(src, &env->fp_status); + float128 ret = float64_to_float128(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return f128_ret(ret); } /* Float to integer conversion. */ int32_t helper_fstoi(CPUSPARCState *env, float32 src) { - return float32_to_int32_round_to_zero(src, &env->fp_status); + int32_t ret = float32_to_int32_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } int32_t helper_fdtoi(CPUSPARCState *env, float64 src) { - return float64_to_int32_round_to_zero(src, &env->fp_status); + int32_t ret = float64_to_int32_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -int32_t helper_fqtoi(CPUSPARCState *env) +int32_t helper_fqtoi(CPUSPARCState *env, Int128 src) { - return float128_to_int32_round_to_zero(QT1, &env->fp_status); + int32_t ret = float128_to_int32_round_to_zero(f128_in(src), + &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } #ifdef TARGET_SPARC64 int64_t helper_fstox(CPUSPARCState *env, float32 src) { - return float32_to_int64_round_to_zero(src, &env->fp_status); + int64_t ret = float32_to_int64_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } int64_t helper_fdtox(CPUSPARCState *env, float64 src) { - return float64_to_int64_round_to_zero(src, &env->fp_status); + int64_t ret = float64_to_int64_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -int64_t helper_fqtox(CPUSPARCState *env) +int64_t helper_fqtox(CPUSPARCState *env, Int128 src) { - return float128_to_int64_round_to_zero(QT1, &env->fp_status); + int64_t ret = float128_to_int64_round_to_zero(f128_in(src), + &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } #endif -float32 helper_fabss(float32 src) +float32 helper_fsqrts(CPUSPARCState *env, float32 src) { - return float32_abs(src); + float32 ret = float32_sqrt(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -#ifdef TARGET_SPARC64 -float64 helper_fabsd(float64 src) +float64 helper_fsqrtd(CPUSPARCState *env, float64 src) { - return float64_abs(src); + float64 ret = float64_sqrt(src, &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return ret; } -void helper_fabsq(CPUSPARCState *env) +Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src) { - QT0 = float128_abs(QT1); + float128 ret = float128_sqrt(f128_in(src), &env->fp_status); + check_ieee_exceptions(env, GETPC()); + return f128_ret(ret); } -#endif -float32 helper_fsqrts(CPUSPARCState *env, float32 src) +static uint32_t finish_fcmp(CPUSPARCState *env, FloatRelation r, uintptr_t ra) { - return float32_sqrt(src, &env->fp_status); + check_ieee_exceptions(env, ra); + + /* + * FCC values: + * 0 = + * 1 < + * 2 > + * 3 unordered + */ + switch (r) { + case float_relation_equal: + return 0; + case float_relation_less: + return 1; + case float_relation_greater: + return 2; + case float_relation_unordered: + env->fsr |= FSR_NVA; + return 3; + } + g_assert_not_reached(); } -float64 helper_fsqrtd(CPUSPARCState *env, float64 src) +uint32_t helper_fcmps(CPUSPARCState *env, float32 src1, float32 src2) { - return float64_sqrt(src, &env->fp_status); -} - -void helper_fsqrtq(CPUSPARCState *env) -{ - QT0 = float128_sqrt(QT1, &env->fp_status); -} - -#define GEN_FCMP(name, size, reg1, reg2, FS, E) \ - target_ulong glue(helper_, name) (CPUSPARCState *env) \ - { \ - FloatRelation ret; \ - target_ulong fsr; \ - if (E) { \ - ret = glue(size, _compare)(reg1, reg2, &env->fp_status); \ - } else { \ - ret = glue(size, _compare_quiet)(reg1, reg2, \ - &env->fp_status); \ - } \ - fsr = do_check_ieee_exceptions(env, GETPC()); \ - switch (ret) { \ - case float_relation_unordered: \ - fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \ - fsr |= FSR_NVA; \ - break; \ - case float_relation_less: \ - fsr &= ~(FSR_FCC1) << FS; \ - fsr |= FSR_FCC0 << FS; \ - break; \ - case float_relation_greater: \ - fsr &= ~(FSR_FCC0) << FS; \ - fsr |= FSR_FCC1 << FS; \ - break; \ - default: \ - fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \ - break; \ - } \ - return fsr; \ - } -#define GEN_FCMP_T(name, size, FS, E) \ - target_ulong glue(helper_, name)(CPUSPARCState *env, size src1, size src2)\ - { \ - FloatRelation ret; \ - target_ulong fsr; \ - if (E) { \ - ret = glue(size, _compare)(src1, src2, &env->fp_status); \ - } else { \ - ret = glue(size, _compare_quiet)(src1, src2, \ - &env->fp_status); \ - } \ - fsr = do_check_ieee_exceptions(env, GETPC()); \ - switch (ret) { \ - case float_relation_unordered: \ - fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \ - break; \ - case float_relation_less: \ - fsr &= ~(FSR_FCC1 << FS); \ - fsr |= FSR_FCC0 << FS; \ - break; \ - case float_relation_greater: \ - fsr &= ~(FSR_FCC0 << FS); \ - fsr |= FSR_FCC1 << FS; \ - break; \ - default: \ - fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \ - break; \ - } \ - return fsr; \ - } + FloatRelation r = float32_compare_quiet(src1, src2, &env->fp_status); + return finish_fcmp(env, r, GETPC()); +} -GEN_FCMP_T(fcmps, float32, 0, 0); -GEN_FCMP_T(fcmpd, float64, 0, 0); +uint32_t helper_fcmpes(CPUSPARCState *env, float32 src1, float32 src2) +{ + FloatRelation r = float32_compare(src1, src2, &env->fp_status); + return finish_fcmp(env, r, GETPC()); +} -GEN_FCMP_T(fcmpes, float32, 0, 1); -GEN_FCMP_T(fcmped, float64, 0, 1); +uint32_t helper_fcmpd(CPUSPARCState *env, float64 src1, float64 src2) +{ + FloatRelation r = float64_compare_quiet(src1, src2, &env->fp_status); + return finish_fcmp(env, r, GETPC()); +} -GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0); -GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1); +uint32_t helper_fcmped(CPUSPARCState *env, float64 src1, float64 src2) +{ + FloatRelation r = float64_compare(src1, src2, &env->fp_status); + return finish_fcmp(env, r, GETPC()); +} -#ifdef TARGET_SPARC64 -GEN_FCMP_T(fcmps_fcc1, float32, 22, 0); -GEN_FCMP_T(fcmpd_fcc1, float64, 22, 0); -GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0); +uint32_t helper_fcmpq(CPUSPARCState *env, Int128 src1, Int128 src2) +{ + FloatRelation r = float128_compare_quiet(f128_in(src1), f128_in(src2), + &env->fp_status); + return finish_fcmp(env, r, GETPC()); +} -GEN_FCMP_T(fcmps_fcc2, float32, 24, 0); -GEN_FCMP_T(fcmpd_fcc2, float64, 24, 0); -GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0); +uint32_t helper_fcmpeq(CPUSPARCState *env, Int128 src1, Int128 src2) +{ + FloatRelation r = float128_compare(f128_in(src1), f128_in(src2), + &env->fp_status); + return finish_fcmp(env, r, GETPC()); +} -GEN_FCMP_T(fcmps_fcc3, float32, 26, 0); -GEN_FCMP_T(fcmpd_fcc3, float64, 26, 0); -GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0); +target_ulong cpu_get_fsr(CPUSPARCState *env) +{ + target_ulong fsr = env->fsr | env->fsr_cexc_ftt; -GEN_FCMP_T(fcmpes_fcc1, float32, 22, 1); -GEN_FCMP_T(fcmped_fcc1, float64, 22, 1); -GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1); + fsr |= env->fcc[0] << FSR_FCC0_SHIFT; +#ifdef TARGET_SPARC64 + fsr |= (uint64_t)env->fcc[1] << FSR_FCC1_SHIFT; + fsr |= (uint64_t)env->fcc[2] << FSR_FCC2_SHIFT; + fsr |= (uint64_t)env->fcc[3] << FSR_FCC3_SHIFT; +#endif -GEN_FCMP_T(fcmpes_fcc2, float32, 24, 1); -GEN_FCMP_T(fcmped_fcc2, float64, 24, 1); -GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1); + /* VER is kept completely separate until re-assembly. */ + fsr |= env->def.fpu_version; -GEN_FCMP_T(fcmpes_fcc3, float32, 26, 1); -GEN_FCMP_T(fcmped_fcc3, float64, 26, 1); -GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1); -#endif -#undef GEN_FCMP_T -#undef GEN_FCMP + return fsr; +} + +target_ulong helper_get_fsr(CPUSPARCState *env) +{ + return cpu_get_fsr(env); +} -static void set_fsr(CPUSPARCState *env, target_ulong fsr) +static void set_fsr_nonsplit(CPUSPARCState *env, target_ulong fsr) { int rnd_mode; + env->fsr = fsr & (FSR_RD_MASK | FSR_TEM_MASK | FSR_AEXC_MASK); + switch (fsr & FSR_RD_MASK) { case FSR_RD_NEAREST: rnd_mode = float_round_nearest_even; @@ -382,7 +452,23 @@ static void set_fsr(CPUSPARCState *env, target_ulong fsr) set_float_rounding_mode(rnd_mode, &env->fp_status); } -void helper_set_fsr(CPUSPARCState *env, target_ulong fsr) +void cpu_put_fsr(CPUSPARCState *env, target_ulong fsr) +{ + env->fsr_cexc_ftt = fsr & (FSR_CEXC_MASK | FSR_FTT_MASK); + + env->fcc[0] = extract32(fsr, FSR_FCC0_SHIFT, 2); +#ifdef TARGET_SPARC64 + env->fcc[1] = extract64(fsr, FSR_FCC1_SHIFT, 2); + env->fcc[2] = extract64(fsr, FSR_FCC2_SHIFT, 2); + env->fcc[3] = extract64(fsr, FSR_FCC3_SHIFT, 2); +#endif + + set_fsr_nonsplit(env, fsr); +} + +void helper_set_fsr_nofcc_noftt(CPUSPARCState *env, uint32_t fsr) { - set_fsr(env, fsr); + env->fsr_cexc_ftt &= FSR_FTT_MASK; + env->fsr_cexc_ftt |= fsr & FSR_CEXC_MASK; + set_fsr_nonsplit(env, fsr); } diff --git a/target/sparc/gdbstub.c b/target/sparc/gdbstub.c index a1c8fdc4d55..07ea81ab5f1 100644 --- a/target/sparc/gdbstub.c +++ b/target/sparc/gdbstub.c @@ -29,8 +29,7 @@ int sparc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); if (n < 8) { /* g0..g7 */ @@ -64,7 +63,7 @@ int sparc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) case 69: return gdb_get_rega(mem_buf, env->npc); case 70: - return gdb_get_rega(mem_buf, env->fsr); + return gdb_get_rega(mem_buf, cpu_get_fsr(env)); case 71: return gdb_get_rega(mem_buf, 0); /* csr */ default: @@ -94,7 +93,7 @@ int sparc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) ((env->pstate & 0xfff) << 8) | cpu_get_cwp64(env)); case 83: - return gdb_get_regl(mem_buf, env->fsr); + return gdb_get_regl(mem_buf, cpu_get_fsr(env)); case 84: return gdb_get_regl(mem_buf, env->fprs); case 85: @@ -156,7 +155,7 @@ int sparc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) env->npc = tmp; break; case 70: - env->fsr = tmp; + cpu_put_fsr(env, tmp); break; default: return 0; @@ -191,7 +190,7 @@ int sparc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) cpu_put_cwp64(env, tmp & 0xff); break; case 83: - env->fsr = tmp; + cpu_put_fsr(env, tmp); break; case 84: env->fprs = tmp; diff --git a/target/sparc/helper.c b/target/sparc/helper.c index bd10b60e4bf..2247e243b5e 100644 --- a/target/sparc/helper.c +++ b/target/sparc/helper.c @@ -212,4 +212,20 @@ void helper_power_down(CPUSPARCState *env) env->npc = env->pc + 4; cpu_loop_exit(cs); } + +target_ulong helper_rdasr17(CPUSPARCState *env) +{ + CPUState *cs = env_cpu(env); + target_ulong val; + + /* + * TODO: There are many more fields to be filled, + * some of which are writable. + */ + val = env->def.nwindows - 1; /* [4:0] NWIN */ + val |= 1 << 8; /* [8] V8 */ + val |= (cs->cpu_index) << 28; /* [31:28] INDEX */ + + return val; +} #endif diff --git a/target/sparc/helper.h b/target/sparc/helper.h index 55eff662834..b8087d0d2b8 100644 --- a/target/sparc/helper.h +++ b/target/sparc/helper.h @@ -2,6 +2,7 @@ DEF_HELPER_1(rett, void, env) DEF_HELPER_2(wrpsr, void, env, tl) DEF_HELPER_1(rdpsr, tl, env) +DEF_HELPER_1(rdasr17, tl, env) DEF_HELPER_1(power_down, void, env) #else DEF_HELPER_FLAGS_2(wrpil, TCG_CALL_NO_RWG, void, env, tl) @@ -31,91 +32,67 @@ DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_WG, i64, env, tl, tl) DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_WG, i64, env, tl, tl) DEF_HELPER_3(taddcctv, tl, env, tl, tl) DEF_HELPER_3(tsubcctv, tl, env, tl, tl) +#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) +DEF_HELPER_FLAGS_3(ld_code, TCG_CALL_NO_WG, i64, env, tl, i32) +#endif #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) DEF_HELPER_FLAGS_4(ld_asi, TCG_CALL_NO_WG, i64, env, tl, int, i32) DEF_HELPER_FLAGS_5(st_asi, TCG_CALL_NO_WG, void, env, tl, i64, int, i32) #endif -DEF_HELPER_FLAGS_1(check_ieee_exceptions, TCG_CALL_NO_WG, tl, env) -DEF_HELPER_FLAGS_2(set_fsr, TCG_CALL_NO_RWG, void, env, tl) -DEF_HELPER_FLAGS_1(fabss, TCG_CALL_NO_RWG_SE, f32, f32) -DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, f32, env, f32) -DEF_HELPER_FLAGS_2(fsqrtd, TCG_CALL_NO_RWG, f64, env, f64) -DEF_HELPER_FLAGS_3(fcmps, TCG_CALL_NO_WG, tl, env, f32, f32) -DEF_HELPER_FLAGS_3(fcmpd, TCG_CALL_NO_WG, tl, env, f64, f64) -DEF_HELPER_FLAGS_3(fcmpes, TCG_CALL_NO_WG, tl, env, f32, f32) -DEF_HELPER_FLAGS_3(fcmped, TCG_CALL_NO_WG, tl, env, f64, f64) -DEF_HELPER_FLAGS_1(fsqrtq, TCG_CALL_NO_RWG, void, env) -DEF_HELPER_FLAGS_1(fcmpq, TCG_CALL_NO_WG, tl, env) -DEF_HELPER_FLAGS_1(fcmpeq, TCG_CALL_NO_WG, tl, env) -#ifdef TARGET_SPARC64 -DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_NO_RWG_SE, f64, f64) -DEF_HELPER_FLAGS_3(fcmps_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32) -DEF_HELPER_FLAGS_3(fcmps_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32) -DEF_HELPER_FLAGS_3(fcmps_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32) -DEF_HELPER_FLAGS_3(fcmpd_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64) -DEF_HELPER_FLAGS_3(fcmpd_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64) -DEF_HELPER_FLAGS_3(fcmpd_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64) -DEF_HELPER_FLAGS_3(fcmpes_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32) -DEF_HELPER_FLAGS_3(fcmpes_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32) -DEF_HELPER_FLAGS_3(fcmpes_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32) -DEF_HELPER_FLAGS_3(fcmped_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64) -DEF_HELPER_FLAGS_3(fcmped_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64) -DEF_HELPER_FLAGS_3(fcmped_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64) -DEF_HELPER_FLAGS_1(fabsq, TCG_CALL_NO_RWG, void, env) -DEF_HELPER_FLAGS_1(fcmpq_fcc1, TCG_CALL_NO_WG, tl, env) -DEF_HELPER_FLAGS_1(fcmpq_fcc2, TCG_CALL_NO_WG, tl, env) -DEF_HELPER_FLAGS_1(fcmpq_fcc3, TCG_CALL_NO_WG, tl, env) -DEF_HELPER_FLAGS_1(fcmpeq_fcc1, TCG_CALL_NO_WG, tl, env) -DEF_HELPER_FLAGS_1(fcmpeq_fcc2, TCG_CALL_NO_WG, tl, env) -DEF_HELPER_FLAGS_1(fcmpeq_fcc3, TCG_CALL_NO_WG, tl, env) -#endif +DEF_HELPER_FLAGS_1(get_fsr, TCG_CALL_NO_WG_SE, tl, env) +DEF_HELPER_FLAGS_2(set_fsr_nofcc_noftt, TCG_CALL_NO_RWG, void, env, i32) +DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_WG, f32, env, f32) +DEF_HELPER_FLAGS_2(fsqrtd, TCG_CALL_NO_WG, f64, env, f64) +DEF_HELPER_FLAGS_2(fsqrtq, TCG_CALL_NO_WG, i128, env, i128) +DEF_HELPER_FLAGS_3(fcmps, TCG_CALL_NO_WG, i32, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmpes, TCG_CALL_NO_WG, i32, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmpd, TCG_CALL_NO_WG, i32, env, f64, f64) +DEF_HELPER_FLAGS_3(fcmped, TCG_CALL_NO_WG, i32, env, f64, f64) +DEF_HELPER_FLAGS_3(fcmpq, TCG_CALL_NO_WG, i32, env, i128, i128) +DEF_HELPER_FLAGS_3(fcmpeq, TCG_CALL_NO_WG, i32, env, i128, i128) DEF_HELPER_2(raise_exception, noreturn, env, int) -#define F_HELPER_0_1(name) \ - DEF_HELPER_FLAGS_1(f ## name, TCG_CALL_NO_RWG, void, env) -DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_RWG, f64, env, f64, f64) -DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_RWG, f64, env, f64, f64) -DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_RWG, f64, env, f64, f64) -DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_RWG, f64, env, f64, f64) -F_HELPER_0_1(addq) -F_HELPER_0_1(subq) -F_HELPER_0_1(mulq) -F_HELPER_0_1(divq) +DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64) +DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64) +DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64) +DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64) + +DEF_HELPER_FLAGS_3(faddq, TCG_CALL_NO_WG, i128, env, i128, i128) +DEF_HELPER_FLAGS_3(fsubq, TCG_CALL_NO_WG, i128, env, i128, i128) +DEF_HELPER_FLAGS_3(fmulq, TCG_CALL_NO_WG, i128, env, i128, i128) +DEF_HELPER_FLAGS_3(fdivq, TCG_CALL_NO_WG, i128, env, i128, i128) -DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_RWG, f32, env, f32, f32) -DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_RWG, f32, env, f32, f32) -DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_RWG, f32, env, f32, f32) -DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_RWG, f32, env, f32, f32) +DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32) +DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32) +DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32) +DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32) -DEF_HELPER_FLAGS_3(fsmuld, TCG_CALL_NO_RWG, f64, env, f32, f32) -DEF_HELPER_FLAGS_3(fdmulq, TCG_CALL_NO_RWG, void, env, f64, f64) +DEF_HELPER_FLAGS_3(fsmuld, TCG_CALL_NO_WG, f64, env, f32, f32) +DEF_HELPER_FLAGS_3(fdmulq, TCG_CALL_NO_WG, i128, env, f64, f64) -DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_NO_RWG_SE, f32, f32) -DEF_HELPER_FLAGS_2(fitod, TCG_CALL_NO_RWG_SE, f64, env, s32) -DEF_HELPER_FLAGS_2(fitoq, TCG_CALL_NO_RWG, void, env, s32) +DEF_HELPER_FLAGS_2(fitod, TCG_CALL_NO_WG, f64, env, s32) +DEF_HELPER_FLAGS_2(fitoq, TCG_CALL_NO_WG, i128, env, s32) -DEF_HELPER_FLAGS_2(fitos, TCG_CALL_NO_RWG, f32, env, s32) +DEF_HELPER_FLAGS_2(fitos, TCG_CALL_NO_WG, f32, env, s32) #ifdef TARGET_SPARC64 -DEF_HELPER_FLAGS_1(fnegd, TCG_CALL_NO_RWG_SE, f64, f64) -DEF_HELPER_FLAGS_1(fnegq, TCG_CALL_NO_RWG, void, env) -DEF_HELPER_FLAGS_2(fxtos, TCG_CALL_NO_RWG, f32, env, s64) -DEF_HELPER_FLAGS_2(fxtod, TCG_CALL_NO_RWG, f64, env, s64) -DEF_HELPER_FLAGS_2(fxtoq, TCG_CALL_NO_RWG, void, env, s64) +DEF_HELPER_FLAGS_2(fxtos, TCG_CALL_NO_WG, f32, env, s64) +DEF_HELPER_FLAGS_2(fxtod, TCG_CALL_NO_WG, f64, env, s64) +DEF_HELPER_FLAGS_2(fxtoq, TCG_CALL_NO_WG, i128, env, s64) #endif -DEF_HELPER_FLAGS_2(fdtos, TCG_CALL_NO_RWG, f32, env, f64) -DEF_HELPER_FLAGS_2(fstod, TCG_CALL_NO_RWG, f64, env, f32) -DEF_HELPER_FLAGS_1(fqtos, TCG_CALL_NO_RWG, f32, env) -DEF_HELPER_FLAGS_2(fstoq, TCG_CALL_NO_RWG, void, env, f32) -DEF_HELPER_FLAGS_1(fqtod, TCG_CALL_NO_RWG, f64, env) -DEF_HELPER_FLAGS_2(fdtoq, TCG_CALL_NO_RWG, void, env, f64) -DEF_HELPER_FLAGS_2(fstoi, TCG_CALL_NO_RWG, s32, env, f32) -DEF_HELPER_FLAGS_2(fdtoi, TCG_CALL_NO_RWG, s32, env, f64) -DEF_HELPER_FLAGS_1(fqtoi, TCG_CALL_NO_RWG, s32, env) +DEF_HELPER_FLAGS_2(fdtos, TCG_CALL_NO_WG, f32, env, f64) +DEF_HELPER_FLAGS_2(fstod, TCG_CALL_NO_WG, f64, env, f32) +DEF_HELPER_FLAGS_2(fqtos, TCG_CALL_NO_WG, f32, env, i128) +DEF_HELPER_FLAGS_2(fstoq, TCG_CALL_NO_WG, i128, env, f32) +DEF_HELPER_FLAGS_2(fqtod, TCG_CALL_NO_WG, f64, env, i128) +DEF_HELPER_FLAGS_2(fdtoq, TCG_CALL_NO_WG, i128, env, f64) +DEF_HELPER_FLAGS_2(fstoi, TCG_CALL_NO_WG, s32, env, f32) +DEF_HELPER_FLAGS_2(fdtoi, TCG_CALL_NO_WG, s32, env, f64) +DEF_HELPER_FLAGS_2(fqtoi, TCG_CALL_NO_WG, s32, env, i128) #ifdef TARGET_SPARC64 -DEF_HELPER_FLAGS_2(fstox, TCG_CALL_NO_RWG, s64, env, f32) -DEF_HELPER_FLAGS_2(fdtox, TCG_CALL_NO_RWG, s64, env, f64) -DEF_HELPER_FLAGS_1(fqtox, TCG_CALL_NO_RWG, s64, env) +DEF_HELPER_FLAGS_2(fstox, TCG_CALL_NO_WG, s64, env, f32) +DEF_HELPER_FLAGS_2(fdtox, TCG_CALL_NO_WG, s64, env, f64) +DEF_HELPER_FLAGS_2(fqtox, TCG_CALL_NO_WG, s64, env, i128) DEF_HELPER_FLAGS_2(fpmerge, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(fmul8x16, TCG_CALL_NO_RWG_SE, i64, i64, i64) @@ -141,6 +118,5 @@ VIS_CMPHELPER(cmpeq) VIS_CMPHELPER(cmple) VIS_CMPHELPER(cmpne) #endif -#undef F_HELPER_0_1 #undef VIS_HELPER #undef VIS_CMPHELPER diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c index 15636135822..6b7d65b0314 100644 --- a/target/sparc/int32_helper.c +++ b/target/sparc/int32_helper.c @@ -70,7 +70,7 @@ void cpu_check_irqs(CPUSPARCState *env) CPUState *cs; /* We should be holding the BQL before we mess with IRQs */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (env->pil_in && (env->interrupt_index == 0 || (env->interrupt_index & ~15) == TT_EXTINT)) { @@ -99,8 +99,7 @@ void cpu_check_irqs(CPUSPARCState *env) void sparc_cpu_do_interrupt(CPUState *cs) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); int cwp, intno = cs->exception_index; if (qemu_loglevel_mask(CPU_LOG_INT)) { @@ -160,7 +159,7 @@ void sparc_cpu_do_interrupt(CPUState *cs) #if !defined(CONFIG_USER_ONLY) /* IRQ acknowledgment */ if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) { - env->qemu_irq_ack(env, env->irq_manager, intno); + env->qemu_irq_ack(env, intno); } #endif } diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c index 1b4155f5f37..bd14c7a0db9 100644 --- a/target/sparc/int64_helper.c +++ b/target/sparc/int64_helper.c @@ -69,7 +69,7 @@ void cpu_check_irqs(CPUSPARCState *env) (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER)); /* We should be holding the BQL before we mess with IRQs */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */ if (env->ivec_status & 0x20) { @@ -130,8 +130,7 @@ void cpu_check_irqs(CPUSPARCState *env) void sparc_cpu_do_interrupt(CPUState *cs) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); int intno = cs->exception_index; trap_state *tsptr; @@ -267,9 +266,9 @@ static bool do_modify_softint(CPUSPARCState *env, uint32_t value) env->softint = value; #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif return true; diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c index 09066d54878..2846a86cc4e 100644 --- a/target/sparc/ldst_helper.c +++ b/target/sparc/ldst_helper.c @@ -66,9 +66,6 @@ #endif #endif -#define QT0 (env->qt0) -#define QT1 (env->qt1) - #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) /* Calculates TSB pointer value for fault page size * UltraSPARC IIi has fixed sizes (8k or 64k) for the page pointers @@ -421,8 +418,7 @@ static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr, bool is_write, bool is_exec, int is_asi, unsigned size, uintptr_t retaddr) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); int fault_type; #ifdef DEBUG_UNASSIGNED @@ -483,8 +479,7 @@ static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr, bool is_write, bool is_exec, int is_asi, unsigned size, uintptr_t retaddr) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); #ifdef DEBUG_UNASSIGNED printf("Unassigned mem access to " HWADDR_FMT_plx " from " TARGET_FMT_lx @@ -590,7 +585,6 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, #if defined(DEBUG_MXCC) || defined(DEBUG_ASI) uint32_t last_addr = addr; #endif - MemOpIdx oi; do_check_align(env, addr, size - 1, GETPC()); switch (asi) { @@ -689,24 +683,6 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */ case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */ break; - case ASI_KERNELTXT: /* Supervisor code access */ - oi = make_memop_idx(memop, cpu_mmu_index(env, true)); - switch (size) { - case 1: - ret = cpu_ldb_code_mmu(env, addr, oi, GETPC()); - break; - case 2: - ret = cpu_ldw_code_mmu(env, addr, oi, GETPC()); - break; - default: - case 4: - ret = cpu_ldl_code_mmu(env, addr, oi, GETPC()); - break; - case 8: - ret = cpu_ldq_code_mmu(env, addr, oi, GETPC()); - break; - } - break; case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */ case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */ case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */ @@ -784,7 +760,6 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, case 0x4c: /* SuperSPARC MMU Breakpoint Action */ ret = env->mmubpaction; break; - case ASI_USERTXT: /* User code access, XXX */ default: sparc_raise_mmu_fault(cs, addr, false, false, asi, size, GETPC()); ret = 0; @@ -792,6 +767,8 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, case ASI_USERDATA: /* User data access */ case ASI_KERNELDATA: /* Supervisor data access */ + case ASI_USERTXT: /* User code access */ + case ASI_KERNELTXT: /* Supervisor code access */ case ASI_P: /* Implicit primary context data access (v9 only?) */ case ASI_M_BYPASS: /* MMU passthrough */ case ASI_LEON_BYPASS: /* LEON MMU passthrough */ @@ -1166,6 +1143,49 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, #endif } +uint64_t helper_ld_code(CPUSPARCState *env, target_ulong addr, uint32_t oi) +{ + MemOp mop = get_memop(oi); + uintptr_t ra = GETPC(); + uint64_t ret; + + switch (mop & MO_SIZE) { + case MO_8: + ret = cpu_ldb_code_mmu(env, addr, oi, ra); + if (mop & MO_SIGN) { + ret = (int8_t)ret; + } + break; + case MO_16: + ret = cpu_ldw_code_mmu(env, addr, oi, ra); + if ((mop & MO_BSWAP) != MO_TE) { + ret = bswap16(ret); + } + if (mop & MO_SIGN) { + ret = (int16_t)ret; + } + break; + case MO_32: + ret = cpu_ldl_code_mmu(env, addr, oi, ra); + if ((mop & MO_BSWAP) != MO_TE) { + ret = bswap32(ret); + } + if (mop & MO_SIGN) { + ret = (int32_t)ret; + } + break; + case MO_64: + ret = cpu_ldq_code_mmu(env, addr, oi, ra); + if ((mop & MO_BSWAP) != MO_TE) { + ret = bswap64(ret); + } + break; + default: + g_assert_not_reached(); + } + return ret; +} + #endif /* CONFIG_USER_ONLY */ #else /* TARGET_SPARC64 */ diff --git a/target/sparc/machine.c b/target/sparc/machine.c index 44dfc07014f..48e0cf22f30 100644 --- a/target/sparc/machine.c +++ b/target/sparc/machine.c @@ -10,7 +10,7 @@ static const VMStateDescription vmstate_cpu_timer = { .name = "cpu_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(frequency, CPUTimer), VMSTATE_UINT32(disabled, CPUTimer), VMSTATE_UINT64(disabled_mask, CPUTimer), @@ -29,7 +29,7 @@ static const VMStateDescription vmstate_trap_state = { .name = "trap_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tpc, trap_state), VMSTATE_UINT64(tnpc, trap_state), VMSTATE_UINT64(tstate, trap_state), @@ -42,7 +42,7 @@ static const VMStateDescription vmstate_tlb_entry = { .name = "tlb_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tag, SparcTLBEntry), VMSTATE_UINT64(tte, SparcTLBEntry), VMSTATE_END_OF_LIST() @@ -83,6 +83,32 @@ static const VMStateInfo vmstate_psr = { .put = put_psr, }; +static int get_fsr(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field) +{ + SPARCCPU *cpu = opaque; + target_ulong val = qemu_get_betl(f); + + cpu_put_fsr(&cpu->env, val); + return 0; +} + +static int put_fsr(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + SPARCCPU *cpu = opaque; + target_ulong val = cpu_get_fsr(&cpu->env); + + qemu_put_betl(f, val); + return 0; +} + +static const VMStateInfo vmstate_fsr = { + .name = "fsr", + .get = get_fsr, + .put = put_fsr, +}; + #ifdef TARGET_SPARC64 static int get_xcc(QEMUFile *f, void *opaque, size_t size, const VMStateField *field) @@ -147,7 +173,7 @@ const VMStateDescription vmstate_sparc_cpu = { .version_id = SPARC_VMSTATE_VER, .minimum_version_id = SPARC_VMSTATE_VER, .pre_save = cpu_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gregs, SPARCCPU, 8), VMSTATE_UINT32(env.nwindows, SPARCCPU), VMSTATE_VARRAY_MULTIPLY(env.regbase, SPARCCPU, env.nwindows, 16, @@ -157,7 +183,6 @@ const VMStateDescription vmstate_sparc_cpu = { VMSTATE_UINTTL(env.npc, SPARCCPU), VMSTATE_UINTTL(env.y, SPARCCPU), { - .name = "psr", .version_id = 0, .size = sizeof(uint32_t), @@ -165,7 +190,14 @@ const VMStateDescription vmstate_sparc_cpu = { .flags = VMS_SINGLE, .offset = 0, }, - VMSTATE_UINTTL(env.fsr, SPARCCPU), + { + .name = "fsr", + .version_id = 0, + .size = sizeof(target_ulong), + .info = &vmstate_fsr, + .flags = VMS_SINGLE, + .offset = 0, + }, VMSTATE_UINTTL(env.tbr, SPARCCPU), VMSTATE_INT32(env.interrupt_index, SPARCCPU), VMSTATE_UINT32(env.pil_in, SPARCCPU), diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c index 453498c6704..ad1591d9fdc 100644 --- a/target/sparc/mmu_helper.c +++ b/target/sparc/mmu_helper.c @@ -206,8 +206,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); CPUTLBEntryFull full = {}; target_ulong vaddr; int error_code = 0, access_index; @@ -391,8 +390,7 @@ void dump_mmu(CPUSPARCState *env) int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, uint8_t *buf, int len, bool is_write) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); target_ulong addr = address; int i; int len1; @@ -580,7 +578,7 @@ static int get_physical_address_data(CPUSPARCState *env, CPUTLBEntryFull *full, int do_fault = 0; if (TTE_IS_IE(env->dtlb[i].tte)) { - full->attrs.byte_swap = true; + full->tlb_fill_flags |= TLB_BSWAP; } /* access ok? */ @@ -759,8 +757,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); CPUTLBEntryFull full = {}; int error_code = 0, access_index; @@ -898,10 +895,9 @@ hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); hwaddr phys_addr; - int mmu_idx = cpu_mmu_index(env, false); + int mmu_idx = cpu_mmu_index(cs, false); if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { @@ -916,8 +912,7 @@ G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, int mmu_idx, uintptr_t retaddr) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); #ifdef TARGET_SPARC64 env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type); diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 93872995597..571b3e3f038 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -37,15 +37,14 @@ #ifdef TARGET_SPARC64 # define gen_helper_rdpsr(D, E) qemu_build_not_reached() +# define gen_helper_rdasr17(D, E) qemu_build_not_reached() # define gen_helper_rett(E) qemu_build_not_reached() # define gen_helper_power_down(E) qemu_build_not_reached() # define gen_helper_wrpsr(E, S) qemu_build_not_reached() #else # define gen_helper_clear_softint(E, S) qemu_build_not_reached() # define gen_helper_done(E) qemu_build_not_reached() -# define gen_helper_fabsd(D, S) qemu_build_not_reached() # define gen_helper_flushw(E) qemu_build_not_reached() -# define gen_helper_fnegd(D, S) qemu_build_not_reached() # define gen_helper_rdccr(D, E) qemu_build_not_reached() # define gen_helper_rdcwp(D, E) qemu_build_not_reached() # define gen_helper_restored(E) qemu_build_not_reached() @@ -61,7 +60,6 @@ # define gen_helper_write_softint(E, S) qemu_build_not_reached() # define gen_helper_wrpil(E, S) qemu_build_not_reached() # define gen_helper_wrpstate(E, S) qemu_build_not_reached() -# define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; }) # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; }) # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; }) # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; }) @@ -79,7 +77,6 @@ # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; }) # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; }) # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; }) -# define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; }) # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; }) # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; }) # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; }) @@ -87,8 +84,6 @@ # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; }) # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; }) # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; }) -# define FSR_LDXFSR_MASK 0 -# define FSR_LDXFSR_OLDMASK 0 # define MAXTL_MASK 0 #endif @@ -103,7 +98,7 @@ /* global register indexes */ static TCGv_ptr cpu_regwptr; -static TCGv cpu_fsr, cpu_pc, cpu_npc; +static TCGv cpu_pc, cpu_npc; static TCGv cpu_regs[32]; static TCGv cpu_y; static TCGv cpu_tbr; @@ -134,6 +129,7 @@ static TCGv cpu_gsr; /* Floating point registers */ static TCGv_i64 cpu_fpr[TARGET_DPREGS]; +static TCGv_i32 cpu_fcc[TARGET_FCCREGS]; #define env_field_offsetof(X) offsetof(CPUSPARCState, X) #ifdef TARGET_SPARC64 @@ -246,11 +242,6 @@ static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) gen_update_fprs_dirty(dc, dst); } -static TCGv_i32 gen_dest_fpr_F(DisasContext *dc) -{ - return tcg_temp_new_i32(); -} - static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src) { src = DFPREG(src); @@ -269,28 +260,20 @@ static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst) return cpu_fpr[DFPREG(dst) / 2]; } -static void gen_op_load_fpr_QT0(unsigned int src) +static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src) { - tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, ll.upper)); - tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, ll.lower)); -} + TCGv_i128 ret = tcg_temp_new_i128(); -static void gen_op_load_fpr_QT1(unsigned int src) -{ - tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) + - offsetof(CPU_QuadU, ll.upper)); - tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) + - offsetof(CPU_QuadU, ll.lower)); + src = QFPREG(src); + tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]); + return ret; } -static void gen_op_store_QT0_fpr(unsigned int dst) +static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v) { - tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, ll.upper)); - tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) + - offsetof(CPU_QuadU, ll.lower)); + dst = DFPREG(dst); + tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v); + gen_update_fprs_dirty(dc, dst); } /* moves */ @@ -506,6 +489,7 @@ static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2) static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) { TCGv zero = tcg_constant_tl(0); + TCGv one = tcg_constant_tl(1); TCGv t_src1 = tcg_temp_new(); TCGv t_src2 = tcg_temp_new(); TCGv t0 = tcg_temp_new(); @@ -517,8 +501,7 @@ static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) * if (!(env->y & 1)) * src2 = 0; */ - tcg_gen_andi_tl(t0, cpu_y, 0x1); - tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2); + tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2); /* * b2 = src1 & 1; @@ -736,159 +719,6 @@ static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2) #endif } -// 1 -static void gen_op_eval_ba(TCGv dst) -{ - tcg_gen_movi_tl(dst, 1); -} - -// 0 -static void gen_op_eval_bn(TCGv dst) -{ - tcg_gen_movi_tl(dst, 0); -} - -/* - FPSR bit field FCC1 | FCC0: - 0 = - 1 < - 2 > - 3 unordered -*/ -static void gen_mov_reg_FCC0(TCGv reg, TCGv src, - unsigned int fcc_offset) -{ - tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset); - tcg_gen_andi_tl(reg, reg, 0x1); -} - -static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset) -{ - tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset); - tcg_gen_andi_tl(reg, reg, 0x1); -} - -// !0: FCC0 | FCC1 -static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - TCGv t0 = tcg_temp_new(); - gen_mov_reg_FCC0(dst, src, fcc_offset); - gen_mov_reg_FCC1(t0, src, fcc_offset); - tcg_gen_or_tl(dst, dst, t0); -} - -// 1 or 2: FCC0 ^ FCC1 -static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - TCGv t0 = tcg_temp_new(); - gen_mov_reg_FCC0(dst, src, fcc_offset); - gen_mov_reg_FCC1(t0, src, fcc_offset); - tcg_gen_xor_tl(dst, dst, t0); -} - -// 1 or 3: FCC0 -static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - gen_mov_reg_FCC0(dst, src, fcc_offset); -} - -// 1: FCC0 & !FCC1 -static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - TCGv t0 = tcg_temp_new(); - gen_mov_reg_FCC0(dst, src, fcc_offset); - gen_mov_reg_FCC1(t0, src, fcc_offset); - tcg_gen_andc_tl(dst, dst, t0); -} - -// 2 or 3: FCC1 -static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - gen_mov_reg_FCC1(dst, src, fcc_offset); -} - -// 2: !FCC0 & FCC1 -static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - TCGv t0 = tcg_temp_new(); - gen_mov_reg_FCC0(dst, src, fcc_offset); - gen_mov_reg_FCC1(t0, src, fcc_offset); - tcg_gen_andc_tl(dst, t0, dst); -} - -// 3: FCC0 & FCC1 -static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - TCGv t0 = tcg_temp_new(); - gen_mov_reg_FCC0(dst, src, fcc_offset); - gen_mov_reg_FCC1(t0, src, fcc_offset); - tcg_gen_and_tl(dst, dst, t0); -} - -// 0: !(FCC0 | FCC1) -static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - TCGv t0 = tcg_temp_new(); - gen_mov_reg_FCC0(dst, src, fcc_offset); - gen_mov_reg_FCC1(t0, src, fcc_offset); - tcg_gen_or_tl(dst, dst, t0); - tcg_gen_xori_tl(dst, dst, 0x1); -} - -// 0 or 3: !(FCC0 ^ FCC1) -static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - TCGv t0 = tcg_temp_new(); - gen_mov_reg_FCC0(dst, src, fcc_offset); - gen_mov_reg_FCC1(t0, src, fcc_offset); - tcg_gen_xor_tl(dst, dst, t0); - tcg_gen_xori_tl(dst, dst, 0x1); -} - -// 0 or 2: !FCC0 -static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - gen_mov_reg_FCC0(dst, src, fcc_offset); - tcg_gen_xori_tl(dst, dst, 0x1); -} - -// !1: !(FCC0 & !FCC1) -static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - TCGv t0 = tcg_temp_new(); - gen_mov_reg_FCC0(dst, src, fcc_offset); - gen_mov_reg_FCC1(t0, src, fcc_offset); - tcg_gen_andc_tl(dst, dst, t0); - tcg_gen_xori_tl(dst, dst, 0x1); -} - -// 0 or 1: !FCC1 -static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - gen_mov_reg_FCC1(dst, src, fcc_offset); - tcg_gen_xori_tl(dst, dst, 0x1); -} - -// !2: !(!FCC0 & FCC1) -static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - TCGv t0 = tcg_temp_new(); - gen_mov_reg_FCC0(dst, src, fcc_offset); - gen_mov_reg_FCC1(t0, src, fcc_offset); - tcg_gen_andc_tl(dst, t0, dst); - tcg_gen_xori_tl(dst, dst, 0x1); -} - -// !3: !(FCC0 & FCC1) -static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset) -{ - TCGv t0 = tcg_temp_new(); - gen_mov_reg_FCC0(dst, src, fcc_offset); - gen_mov_reg_FCC1(t0, src, fcc_offset); - tcg_gen_and_tl(dst, dst, t0); - tcg_gen_xori_tl(dst, dst, 0x1); -} - static void finishing_insn(DisasContext *dc) { /* @@ -1113,80 +943,62 @@ static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond) { - unsigned int offset; - TCGv r_dst; - - /* For now we still generate a straight boolean result. */ - cmp->cond = TCG_COND_NE; - cmp->c1 = r_dst = tcg_temp_new(); - cmp->c2 = 0; - - switch (cc) { - default: - case 0x0: - offset = 0; - break; - case 0x1: - offset = 32 - 10; - break; - case 0x2: - offset = 34 - 10; - break; - case 0x3: - offset = 36 - 10; - break; - } + TCGv_i32 fcc = cpu_fcc[cc]; + TCGv_i32 c1 = fcc; + int c2 = 0; + TCGCond tcond; - switch (cond) { - case 0x0: - gen_op_eval_bn(r_dst); - break; - case 0x1: - gen_op_eval_fbne(r_dst, cpu_fsr, offset); - break; - case 0x2: - gen_op_eval_fblg(r_dst, cpu_fsr, offset); - break; - case 0x3: - gen_op_eval_fbul(r_dst, cpu_fsr, offset); - break; - case 0x4: - gen_op_eval_fbl(r_dst, cpu_fsr, offset); - break; - case 0x5: - gen_op_eval_fbug(r_dst, cpu_fsr, offset); - break; - case 0x6: - gen_op_eval_fbg(r_dst, cpu_fsr, offset); - break; - case 0x7: - gen_op_eval_fbu(r_dst, cpu_fsr, offset); - break; - case 0x8: - gen_op_eval_ba(r_dst); + /* + * FCC values: + * 0 = + * 1 < + * 2 > + * 3 unordered + */ + switch (cond & 7) { + case 0x0: /* fbn */ + tcond = TCG_COND_NEVER; break; - case 0x9: - gen_op_eval_fbe(r_dst, cpu_fsr, offset); + case 0x1: /* fbne : !0 */ + tcond = TCG_COND_NE; break; - case 0xa: - gen_op_eval_fbue(r_dst, cpu_fsr, offset); + case 0x2: /* fblg : 1 or 2 */ + /* fcc in {1,2} - 1 -> fcc in {0,1} */ + c1 = tcg_temp_new_i32(); + tcg_gen_addi_i32(c1, fcc, -1); + c2 = 1; + tcond = TCG_COND_LEU; break; - case 0xb: - gen_op_eval_fbge(r_dst, cpu_fsr, offset); + case 0x3: /* fbul : 1 or 3 */ + c1 = tcg_temp_new_i32(); + tcg_gen_andi_i32(c1, fcc, 1); + tcond = TCG_COND_NE; break; - case 0xc: - gen_op_eval_fbuge(r_dst, cpu_fsr, offset); + case 0x4: /* fbl : 1 */ + c2 = 1; + tcond = TCG_COND_EQ; break; - case 0xd: - gen_op_eval_fble(r_dst, cpu_fsr, offset); + case 0x5: /* fbug : 2 or 3 */ + c2 = 2; + tcond = TCG_COND_GEU; break; - case 0xe: - gen_op_eval_fbule(r_dst, cpu_fsr, offset); + case 0x6: /* fbg : 2 */ + c2 = 2; + tcond = TCG_COND_EQ; break; - case 0xf: - gen_op_eval_fbo(r_dst, cpu_fsr, offset); + case 0x7: /* fbu : 3 */ + c2 = 3; + tcond = TCG_COND_EQ; break; } + if (cond & 8) { + tcond = tcg_invert_cond(tcond); + } + + cmp->cond = tcond; + cmp->c2 = c2; + cmp->c1 = tcg_temp_new(); + tcg_gen_extu_i32_tl(cmp->c1, c1); } static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src) @@ -1216,7 +1028,8 @@ static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src) static void gen_op_clear_ieee_excp_and_FTT(void) { - tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK); + tcg_gen_st_i32(tcg_constant_i32(0), tcg_env, + offsetof(CPUSPARCState, fsr_cexc_ftt)); } static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src) @@ -1228,13 +1041,13 @@ static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src) static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src) { gen_op_clear_ieee_excp_and_FTT(); - gen_helper_fnegs(dst, src); + tcg_gen_xori_i32(dst, src, 1u << 31); } static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src) { gen_op_clear_ieee_excp_and_FTT(); - gen_helper_fabss(dst, src); + tcg_gen_andi_i32(dst, src, ~(1u << 31)); } static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src) @@ -1246,161 +1059,44 @@ static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src) static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src) { gen_op_clear_ieee_excp_and_FTT(); - gen_helper_fnegd(dst, src); + tcg_gen_xori_i64(dst, src, 1ull << 63); } static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src) { gen_op_clear_ieee_excp_and_FTT(); - gen_helper_fabsd(dst, src); + tcg_gen_andi_i64(dst, src, ~(1ull << 63)); } -#ifdef TARGET_SPARC64 -static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) +static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src) { - switch (fccno) { - case 0: - gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 1: - gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 2: - gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 3: - gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - } -} + TCGv_i64 l = tcg_temp_new_i64(); + TCGv_i64 h = tcg_temp_new_i64(); -static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) -{ - switch (fccno) { - case 0: - gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 1: - gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 2: - gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 3: - gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - } -} - -static void gen_op_fcmpq(int fccno) -{ - switch (fccno) { - case 0: - gen_helper_fcmpq(cpu_fsr, tcg_env); - break; - case 1: - gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env); - break; - case 2: - gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env); - break; - case 3: - gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env); - break; - } -} - -static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) -{ - switch (fccno) { - case 0: - gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 1: - gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 2: - gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 3: - gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - } + tcg_gen_extr_i128_i64(l, h, src); + tcg_gen_xori_i64(h, h, 1ull << 63); + tcg_gen_concat_i64_i128(dst, l, h); } -static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) +static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src) { - switch (fccno) { - case 0: - gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 1: - gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 2: - gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - case 3: - gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); - break; - } -} - -static void gen_op_fcmpeq(int fccno) -{ - switch (fccno) { - case 0: - gen_helper_fcmpeq(cpu_fsr, tcg_env); - break; - case 1: - gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env); - break; - case 2: - gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env); - break; - case 3: - gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env); - break; - } -} + TCGv_i64 l = tcg_temp_new_i64(); + TCGv_i64 h = tcg_temp_new_i64(); -#else - -static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2) -{ - gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2); + tcg_gen_extr_i128_i64(l, h, src); + tcg_gen_andi_i64(h, h, ~(1ull << 63)); + tcg_gen_concat_i64_i128(dst, l, h); } -static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) +static void gen_op_fpexception_im(DisasContext *dc, int ftt) { - gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2); -} - -static void gen_op_fcmpq(int fccno) -{ - gen_helper_fcmpq(cpu_fsr, tcg_env); -} - -static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2) -{ - gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2); -} - -static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) -{ - gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2); -} - -static void gen_op_fcmpeq(int fccno) -{ - gen_helper_fcmpeq(cpu_fsr, tcg_env); -} -#endif - -static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags) -{ - tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK); - tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags); + /* + * CEXC is only set when succesfully completing an FPop, + * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception. + * Thus we can simply store FTT into this field. + */ + tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env, + offsetof(CPUSPARCState, fsr_cexc_ftt)); gen_exception(dc, TT_FP_EXCP); } @@ -1421,6 +1117,7 @@ typedef enum { GET_ASI_EXCP, GET_ASI_DIRECT, GET_ASI_DTWINX, + GET_ASI_CODE, GET_ASI_BLOCK, GET_ASI_SHORT, GET_ASI_BCOPY, @@ -1463,14 +1160,22 @@ static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop) || (asi == ASI_USERDATA && (dc->def->features & CPU_FEATURE_CASA))) { switch (asi) { - case ASI_USERDATA: /* User data access */ + case ASI_USERDATA: /* User data access */ mem_idx = MMU_USER_IDX; type = GET_ASI_DIRECT; break; - case ASI_KERNELDATA: /* Supervisor data access */ + case ASI_KERNELDATA: /* Supervisor data access */ mem_idx = MMU_KERNEL_IDX; type = GET_ASI_DIRECT; break; + case ASI_USERTXT: /* User text access */ + mem_idx = MMU_USER_IDX; + type = GET_ASI_CODE; + break; + case ASI_KERNELTXT: /* Supervisor text access */ + mem_idx = MMU_KERNEL_IDX; + type = GET_ASI_CODE; + break; case ASI_M_BYPASS: /* MMU passthrough */ case ASI_LEON_BYPASS: /* LEON MMU passthrough */ mem_idx = MMU_PHYS_IDX; @@ -1683,6 +1388,21 @@ static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr) case GET_ASI_DIRECT: tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN); break; + + case GET_ASI_CODE: +#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) + { + MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx); + TCGv_i64 t64 = tcg_temp_new_i64(); + + gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi)); + tcg_gen_trunc_i64_tl(dst, t64); + } + break; +#else + g_assert_not_reached(); +#endif + default: { TCGv_i32 r_asi = tcg_constant_i32(da->asi); @@ -1727,28 +1447,35 @@ static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr) case GET_ASI_BCOPY: assert(TARGET_LONG_BITS == 32); - /* Copy 32 bytes from the address in SRC to ADDR. */ - /* ??? The original qemu code suggests 4-byte alignment, dropping - the low bits, but the only place I can see this used is in the - Linux kernel with 32 byte alignment, which would make more sense - as a cacheline-style operation. */ + /* + * Copy 32 bytes from the address in SRC to ADDR. + * + * From Ross RT625 hyperSPARC manual, section 4.6: + * "Block Copy and Block Fill will work only on cache line boundaries." + * + * It does not specify if an unaliged address is truncated or trapped. + * Previous qemu behaviour was to truncate to 4 byte alignment, which + * is obviously wrong. The only place I can see this used is in the + * Linux kernel which begins with page alignment, advancing by 32, + * so is always aligned. Assume truncation as the simpler option. + * + * Since the loads and stores are paired, allow the copy to happen + * in the host endianness. The copy need not be atomic. + */ { + MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR; TCGv saddr = tcg_temp_new(); TCGv daddr = tcg_temp_new(); - TCGv four = tcg_constant_tl(4); - TCGv_i32 tmp = tcg_temp_new_i32(); - int i; - - tcg_gen_andi_tl(saddr, src, -4); - tcg_gen_andi_tl(daddr, addr, -4); - for (i = 0; i < 32; i += 4) { - /* Since the loads and stores are paired, allow the - copy to happen in the host endianness. */ - tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL); - tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL); - tcg_gen_add_tl(saddr, saddr, four); - tcg_gen_add_tl(daddr, daddr, four); - } + TCGv_i128 tmp = tcg_temp_new_i128(); + + tcg_gen_andi_tl(saddr, src, -32); + tcg_gen_andi_tl(daddr, addr, -32); + tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop); + tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop); + tcg_gen_addi_tl(saddr, saddr, 16); + tcg_gen_addi_tl(daddr, daddr, 16); + tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop); + tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop); } break; @@ -1866,7 +1593,7 @@ static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size, memop |= MO_ALIGN_4; switch (size) { case MO_32: - d32 = gen_dest_fpr_F(dc); + d32 = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop); gen_store_fpr_F(dc, rd, d32); break; @@ -1931,7 +1658,7 @@ static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size, case MO_32: d64 = tcg_temp_new_i64(); gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); - d32 = gen_dest_fpr_F(dc); + d32 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(d32, d64); gen_store_fpr_F(dc, rd, d32); break; @@ -2088,6 +1815,26 @@ static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) } break; + case GET_ASI_CODE: +#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) + { + MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx); + TCGv_i64 tmp = tcg_temp_new_i64(); + + gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi)); + + /* See above. */ + if ((da->memop & MO_BSWAP) == MO_TE) { + tcg_gen_extr_i64_tl(lo, hi, tmp); + } else { + tcg_gen_extr_i64_tl(hi, lo, tmp); + } + } + break; +#else + g_assert_not_reached(); +#endif + default: /* ??? In theory we've handled all of the ASIs that are valid for ldda, and this should raise DAE_invalid_asi. However, @@ -2165,23 +1912,22 @@ static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) case GET_ASI_BFILL: assert(TARGET_LONG_BITS == 32); - /* Store 32 bytes of T64 to ADDR. */ - /* ??? The original qemu code suggests 8-byte alignment, dropping - the low bits, but the only place I can see this used is in the - Linux kernel with 32 byte alignment, which would make more sense - as a cacheline-style operation. */ + /* + * Store 32 bytes of [rd:rd+1] to ADDR. + * See comments for GET_ASI_COPY above. + */ { - TCGv_i64 t64 = tcg_temp_new_i64(); - TCGv d_addr = tcg_temp_new(); - TCGv eight = tcg_constant_tl(8); - int i; - - tcg_gen_concat_tl_i64(t64, lo, hi); - tcg_gen_andi_tl(d_addr, addr, -8); - for (i = 0; i < 32; i += 8) { - tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop); - tcg_gen_add_tl(d_addr, d_addr, eight); - } + MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR; + TCGv_i64 t8 = tcg_temp_new_i64(); + TCGv_i128 t16 = tcg_temp_new_i128(); + TCGv daddr = tcg_temp_new(); + + tcg_gen_concat_tl_i64(t8, lo, hi); + tcg_gen_concat_i64_i128(t16, t8, t8); + tcg_gen_andi_tl(daddr, addr, -32); + tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop); + tcg_gen_addi_tl(daddr, daddr, 16); + tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop); } break; @@ -2222,7 +1968,7 @@ static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) s1 = gen_load_fpr_F(dc, rs); s2 = gen_load_fpr_F(dc, rd); - dst = gen_dest_fpr_F(dc); + dst = tcg_temp_new_i32(); zero = tcg_constant_i32(0); tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2); @@ -2681,16 +2427,8 @@ static bool trans_RDY(DisasContext *dc, arg_RDY *a) static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst) { - uint32_t val; - - /* - * TODO: There are many more fields to be filled, - * some of which are writable. - */ - val = dc->def->nwindows - 1; /* [4:0] NWIN */ - val |= 1 << 8; /* [8] V8 */ - - return tcg_constant_tl(val); + gen_helper_rdasr17(dst, tcg_env); + return dst; } TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config) @@ -4383,38 +4121,75 @@ static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a) return true; } -static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop, - target_ulong new_mask, target_ulong old_mask) +static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a) { - TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + TCGv_i32 tmp; + if (addr == NULL) { return false; } if (gen_trap_ifnofpu(dc)) { return true; } - tmp = tcg_temp_new(); - tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN); - tcg_gen_andi_tl(tmp, tmp, new_mask); - tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask); - tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp); - gen_helper_set_fsr(tcg_env, cpu_fsr); + + tmp = tcg_temp_new_i32(); + tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN); + + tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2); + /* LDFSR does not change FCC[1-3]. */ + + gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp); return advance_pc(dc); } -TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK) -TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK) +static bool trans_LDXFSR(DisasContext *dc, arg_r_r_ri *a) +{ +#ifdef TARGET_SPARC64 + TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + TCGv_i64 t64; + TCGv_i32 lo, hi; + + if (addr == NULL) { + return false; + } + if (gen_trap_ifnofpu(dc)) { + return true; + } + + t64 = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN); + + lo = tcg_temp_new_i32(); + hi = cpu_fcc[3]; + tcg_gen_extr_i64_i32(lo, hi, t64); + tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2); + tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2); + tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2); + tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2); + + gen_helper_set_fsr_nofcc_noftt(tcg_env, lo); + return advance_pc(dc); +#else + return false; +#endif +} static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop) { TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + TCGv fsr; + if (addr == NULL) { return false; } if (gen_trap_ifnofpu(dc)) { return true; } - tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN); + + fsr = tcg_temp_new(); + gen_helper_get_fsr(fsr, tcg_env); + tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN); return advance_pc(dc); } @@ -4491,7 +4266,7 @@ static bool do_fd(DisasContext *dc, arg_r_r *a, return true; } - dst = gen_dest_fpr_F(dc); + dst = tcg_temp_new_i32(); src = gen_load_fpr_D(dc, a->rs); func(dst, src); gen_store_fpr_F(dc, a->rd, dst); @@ -4510,10 +4285,8 @@ static bool do_env_ff(DisasContext *dc, arg_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); tmp = gen_load_fpr_F(dc, a->rs); func(tmp, tcg_env, tmp); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); gen_store_fpr_F(dc, a->rd, tmp); return advance_pc(dc); } @@ -4532,11 +4305,9 @@ static bool do_env_fd(DisasContext *dc, arg_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); - dst = gen_dest_fpr_F(dc); + dst = tcg_temp_new_i32(); src = gen_load_fpr_D(dc, a->rs); func(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); gen_store_fpr_F(dc, a->rd, dst); return advance_pc(dc); } @@ -4576,11 +4347,9 @@ static bool do_env_dd(DisasContext *dc, arg_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); dst = gen_dest_fpr_D(dc, a->rd); src = gen_load_fpr_D(dc, a->rs); func(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); gen_store_fpr_D(dc, a->rd, dst); return advance_pc(dc); } @@ -4599,11 +4368,9 @@ static bool do_env_df(DisasContext *dc, arg_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); dst = gen_dest_fpr_D(dc, a->rd); src = gen_load_fpr_F(dc, a->rs); func(dst, tcg_env, src); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); gen_store_fpr_D(dc, a->rd, dst); return advance_pc(dc); } @@ -4612,32 +4379,11 @@ TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod) TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod) TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox) -static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a) -{ - int rd, rs; - - if (!avail_64(dc)) { - return false; - } - if (gen_trap_ifnofpu(dc)) { - return true; - } - if (gen_trap_float128(dc)) { - return true; - } - - gen_op_clear_ieee_excp_and_FTT(); - rd = QFPREG(a->rd); - rs = QFPREG(a->rs); - tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]); - tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]); - gen_update_fprs_dirty(dc, rd); - return advance_pc(dc); -} - static bool do_qq(DisasContext *dc, arg_r_r *a, - void (*func)(TCGv_env)) + void (*func)(TCGv_i128, TCGv_i128)) { + TCGv_i128 t; + if (gen_trap_ifnofpu(dc)) { return true; } @@ -4646,19 +4392,21 @@ static bool do_qq(DisasContext *dc, arg_r_r *a, } gen_op_clear_ieee_excp_and_FTT(); - gen_op_load_fpr_QT1(QFPREG(a->rs)); - func(tcg_env); - gen_op_store_QT0_fpr(QFPREG(a->rd)); - gen_update_fprs_dirty(dc, QFPREG(a->rd)); + t = gen_load_fpr_Q(dc, a->rs); + func(t, t); + gen_store_fpr_Q(dc, a->rd, t); return advance_pc(dc); } -TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq) -TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq) +TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128) +TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq) +TRANS(FABSq, 64, do_qq, a, gen_op_fabsq) static bool do_env_qq(DisasContext *dc, arg_r_r *a, - void (*func)(TCGv_env)) + void (*func)(TCGv_i128, TCGv_env, TCGv_i128)) { + TCGv_i128 t; + if (gen_trap_ifnofpu(dc)) { return true; } @@ -4666,20 +4414,18 @@ static bool do_env_qq(DisasContext *dc, arg_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); - gen_op_load_fpr_QT1(QFPREG(a->rs)); - func(tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - gen_op_store_QT0_fpr(QFPREG(a->rd)); - gen_update_fprs_dirty(dc, QFPREG(a->rd)); + t = gen_load_fpr_Q(dc, a->rs); + func(t, tcg_env, t); + gen_store_fpr_Q(dc, a->rd, t); return advance_pc(dc); } TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq) static bool do_env_fq(DisasContext *dc, arg_r_r *a, - void (*func)(TCGv_i32, TCGv_env)) + void (*func)(TCGv_i32, TCGv_env, TCGv_i128)) { + TCGv_i128 src; TCGv_i32 dst; if (gen_trap_ifnofpu(dc)) { @@ -4689,11 +4435,9 @@ static bool do_env_fq(DisasContext *dc, arg_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); - gen_op_load_fpr_QT1(QFPREG(a->rs)); - dst = gen_dest_fpr_F(dc); - func(dst, tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + src = gen_load_fpr_Q(dc, a->rs); + dst = tcg_temp_new_i32(); + func(dst, tcg_env, src); gen_store_fpr_F(dc, a->rd, dst); return advance_pc(dc); } @@ -4702,8 +4446,9 @@ TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos) TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi) static bool do_env_dq(DisasContext *dc, arg_r_r *a, - void (*func)(TCGv_i64, TCGv_env)) + void (*func)(TCGv_i64, TCGv_env, TCGv_i128)) { + TCGv_i128 src; TCGv_i64 dst; if (gen_trap_ifnofpu(dc)) { @@ -4713,11 +4458,9 @@ static bool do_env_dq(DisasContext *dc, arg_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); - gen_op_load_fpr_QT1(QFPREG(a->rs)); + src = gen_load_fpr_Q(dc, a->rs); dst = gen_dest_fpr_D(dc, a->rd); - func(dst, tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); + func(dst, tcg_env, src); gen_store_fpr_D(dc, a->rd, dst); return advance_pc(dc); } @@ -4726,9 +4469,10 @@ TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod) TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox) static bool do_env_qf(DisasContext *dc, arg_r_r *a, - void (*func)(TCGv_env, TCGv_i32)) + void (*func)(TCGv_i128, TCGv_env, TCGv_i32)) { TCGv_i32 src; + TCGv_i128 dst; if (gen_trap_ifnofpu(dc)) { return true; @@ -4737,11 +4481,10 @@ static bool do_env_qf(DisasContext *dc, arg_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); src = gen_load_fpr_F(dc, a->rs); - func(tcg_env, src); - gen_op_store_QT0_fpr(QFPREG(a->rd)); - gen_update_fprs_dirty(dc, QFPREG(a->rd)); + dst = tcg_temp_new_i128(); + func(dst, tcg_env, src); + gen_store_fpr_Q(dc, a->rd, dst); return advance_pc(dc); } @@ -4749,9 +4492,10 @@ TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq) TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq) static bool do_env_qd(DisasContext *dc, arg_r_r *a, - void (*func)(TCGv_env, TCGv_i64)) + void (*func)(TCGv_i128, TCGv_env, TCGv_i64)) { TCGv_i64 src; + TCGv_i128 dst; if (gen_trap_ifnofpu(dc)) { return true; @@ -4760,11 +4504,10 @@ static bool do_env_qd(DisasContext *dc, arg_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); src = gen_load_fpr_D(dc, a->rs); - func(tcg_env, src); - gen_op_store_QT0_fpr(QFPREG(a->rd)); - gen_update_fprs_dirty(dc, QFPREG(a->rd)); + dst = tcg_temp_new_i128(); + func(dst, tcg_env, src); + gen_store_fpr_Q(dc, a->rd, dst); return advance_pc(dc); } @@ -4809,11 +4552,9 @@ static bool do_env_fff(DisasContext *dc, arg_r_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); src1 = gen_load_fpr_F(dc, a->rs1); src2 = gen_load_fpr_F(dc, a->rs2); func(src1, tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); gen_store_fpr_F(dc, a->rd, src1); return advance_pc(dc); } @@ -4904,12 +4645,10 @@ static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); dst = gen_dest_fpr_D(dc, a->rd); src1 = gen_load_fpr_D(dc, a->rs1); src2 = gen_load_fpr_D(dc, a->rs2); func(dst, tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); gen_store_fpr_D(dc, a->rd, dst); return advance_pc(dc); } @@ -4931,12 +4670,10 @@ static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a) return raise_unimpfpop(dc); } - gen_op_clear_ieee_excp_and_FTT(); dst = gen_dest_fpr_D(dc, a->rd); src1 = gen_load_fpr_F(dc, a->rs1); src2 = gen_load_fpr_F(dc, a->rs2); gen_helper_fsmuld(dst, tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); gen_store_fpr_D(dc, a->rd, dst); return advance_pc(dc); } @@ -4962,8 +4699,10 @@ static bool do_dddd(DisasContext *dc, arg_r_r_r *a, TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist) static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a, - void (*func)(TCGv_env)) + void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128)) { + TCGv_i128 src1, src2; + if (gen_trap_ifnofpu(dc)) { return true; } @@ -4971,13 +4710,10 @@ static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a, return true; } - gen_op_clear_ieee_excp_and_FTT(); - gen_op_load_fpr_QT0(QFPREG(a->rs1)); - gen_op_load_fpr_QT1(QFPREG(a->rs2)); - func(tcg_env); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - gen_op_store_QT0_fpr(QFPREG(a->rd)); - gen_update_fprs_dirty(dc, QFPREG(a->rd)); + src1 = gen_load_fpr_Q(dc, a->rs1); + src2 = gen_load_fpr_Q(dc, a->rs2); + func(src1, tcg_env, src1, src2); + gen_store_fpr_Q(dc, a->rd, src1); return advance_pc(dc); } @@ -4989,6 +4725,7 @@ TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq) static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a) { TCGv_i64 src1, src2; + TCGv_i128 dst; if (gen_trap_ifnofpu(dc)) { return true; @@ -4997,13 +4734,11 @@ static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a) return true; } - gen_op_clear_ieee_excp_and_FTT(); src1 = gen_load_fpr_D(dc, a->rs1); src2 = gen_load_fpr_D(dc, a->rs2); - gen_helper_fdmulq(tcg_env, src1, src2); - gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); - gen_op_store_QT0_fpr(QFPREG(a->rd)); - gen_update_fprs_dirty(dc, QFPREG(a->rd)); + dst = tcg_temp_new_i128(); + gen_helper_fdmulq(dst, tcg_env, src1, src2); + gen_store_fpr_Q(dc, a->rd, dst); return advance_pc(dc); } @@ -5086,13 +4821,12 @@ static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e) return true; } - gen_op_clear_ieee_excp_and_FTT(); src1 = gen_load_fpr_F(dc, a->rs1); src2 = gen_load_fpr_F(dc, a->rs2); if (e) { - gen_op_fcmpes(a->cc, src1, src2); + gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2); } else { - gen_op_fcmps(a->cc, src1, src2); + gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2); } return advance_pc(dc); } @@ -5111,13 +4845,12 @@ static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e) return true; } - gen_op_clear_ieee_excp_and_FTT(); src1 = gen_load_fpr_D(dc, a->rs1); src2 = gen_load_fpr_D(dc, a->rs2); if (e) { - gen_op_fcmped(a->cc, src1, src2); + gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2); } else { - gen_op_fcmpd(a->cc, src1, src2); + gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2); } return advance_pc(dc); } @@ -5127,6 +4860,8 @@ TRANS(FCMPEd, ALL, do_fcmpd, a, true) static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e) { + TCGv_i128 src1, src2; + if (avail_32(dc) && a->cc != 0) { return false; } @@ -5137,13 +4872,12 @@ static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e) return true; } - gen_op_clear_ieee_excp_and_FTT(); - gen_op_load_fpr_QT0(QFPREG(a->rs1)); - gen_op_load_fpr_QT1(QFPREG(a->rs2)); + src1 = gen_load_fpr_Q(dc, a->rs1); + src2 = gen_load_fpr_Q(dc, a->rs2); if (e) { - gen_op_fcmpeq(a->cc); + gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2); } else { - gen_op_fcmpq(a->cc); + gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2); } return advance_pc(dc); } @@ -5154,13 +4888,12 @@ TRANS(FCMPEq, ALL, do_fcmpq, a, true) static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); - CPUSPARCState *env = cpu_env(cs); int bound; dc->pc = dc->base.pc_first; dc->npc = (target_ulong)dc->base.tb->cs_base; dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK; - dc->def = &env->def; + dc->def = &cpu_env(cs)->def; dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags); dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags); #ifndef CONFIG_USER_ONLY @@ -5210,10 +4943,9 @@ static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); - CPUSPARCState *env = cpu_env(cs); unsigned int insn; - insn = translator_ldl(env, &dc->base, dc->pc); + insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc); dc->base.pc_next += 4; if (!decode(dc, insn)) { @@ -5327,7 +5059,7 @@ static const TranslatorOps sparc_tr_ops = { }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc = {}; @@ -5349,6 +5081,18 @@ void sparc_tcg_init(void) "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", }; + static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = { +#ifdef TARGET_SPARC64 + { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" }, + { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" }, + { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" }, + { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" }, + { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" }, +#else + { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" }, +#endif + }; + static const struct { TCGv *ptr; int off; const char *name; } rtl[] = { #ifdef TARGET_SPARC64 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" }, @@ -5360,7 +5104,6 @@ void sparc_tcg_init(void) { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" }, { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" }, { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" }, - { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" }, { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" }, { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" }, { &cpu_y, offsetof(CPUSPARCState, y), "y" }, @@ -5373,6 +5116,10 @@ void sparc_tcg_init(void) offsetof(CPUSPARCState, regwptr), "regwptr"); + for (i = 0; i < ARRAY_SIZE(r32); ++i) { + *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name); + } + for (i = 0; i < ARRAY_SIZE(rtl); ++i) { *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name); } @@ -5395,19 +5142,13 @@ void sparc_tcg_init(void) offsetof(CPUSPARCState, fpr[i]), fregnames[i]); } - -#ifdef TARGET_SPARC64 - cpu_fprs = tcg_global_mem_new_i32(tcg_env, - offsetof(CPUSPARCState, fprs), "fprs"); -#endif } void sparc_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) { - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; + CPUSPARCState *env = cpu_env(cs); target_ulong pc = data[0]; target_ulong npc = data[1]; diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c index 16d1c70fe71..b53fc9ce940 100644 --- a/target/sparc/win_helper.c +++ b/target/sparc/win_helper.c @@ -179,9 +179,9 @@ void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr) cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC()); } else { /* cpu_put_psr may trigger interrupts, hence BQL */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_put_psr(env, new_psr); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -407,9 +407,9 @@ void helper_wrpstate(CPUSPARCState *env, target_ulong new_state) #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } @@ -422,9 +422,9 @@ void helper_wrpil(CPUSPARCState *env, target_ulong new_pil) env->psrpil = new_pil; if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } @@ -451,9 +451,9 @@ void helper_done(CPUSPARCState *env) #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } @@ -480,9 +480,9 @@ void helper_retry(CPUSPARCState *env) #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } diff --git a/target/target-common.c b/target/target-common.c new file mode 100644 index 00000000000..903b10cfe4b --- /dev/null +++ b/target/target-common.c @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#include "qemu/osdep.h" + +#include "cpu.h" +#include "exec/target_page.h" + +int qemu_target_page_mask(void) +{ + return TARGET_PAGE_MASK; +} diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index 034e01c1891..a9af73aeb58 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -36,52 +36,38 @@ static const gchar *tricore_gdb_arch_name(CPUState *cs) static void tricore_cpu_set_pc(CPUState *cs, vaddr value) { - TriCoreCPU *cpu = TRICORE_CPU(cs); - CPUTriCoreState *env = &cpu->env; - - env->PC = value & ~(target_ulong)1; + cpu_env(cs)->PC = value & ~(target_ulong)1; } static vaddr tricore_cpu_get_pc(CPUState *cs) { - TriCoreCPU *cpu = TRICORE_CPU(cs); - CPUTriCoreState *env = &cpu->env; - - return env->PC; + return cpu_env(cs)->PC; } static void tricore_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - TriCoreCPU *cpu = TRICORE_CPU(cs); - CPUTriCoreState *env = &cpu->env; - tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); - env->PC = tb->pc; + cpu_env(cs)->PC = tb->pc; } static void tricore_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) { - TriCoreCPU *cpu = TRICORE_CPU(cs); - CPUTriCoreState *env = &cpu->env; - - env->PC = data[0]; + cpu_env(cs)->PC = data[0]; } static void tricore_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - TriCoreCPU *cpu = TRICORE_CPU(s); - TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(cpu); - CPUTriCoreState *env = &cpu->env; + CPUState *cs = CPU(obj); + TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(obj); if (tcc->parent_phases.hold) { tcc->parent_phases.hold(obj); } - cpu_state_reset(env); + cpu_state_reset(cpu_env(cs)); } static bool tricore_cpu_has_work(CPUState *cs) @@ -89,6 +75,11 @@ static bool tricore_cpu_has_work(CPUState *cs) return true; } +static int tricore_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return 0; +} + static void tricore_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); @@ -132,9 +123,7 @@ static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(TRICORE_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_TRICORE_CPU)) { - return NULL; - } + return oc; } @@ -175,7 +164,7 @@ static const struct SysemuCPUOps tricore_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps tricore_tcg_ops = { +static const TCGCPUOps tricore_tcg_ops = { .initialize = tricore_tcg_init, .synchronize_from_tb = tricore_cpu_synchronize_from_tb, .restore_state_to_opc = tricore_restore_state_to_opc, @@ -196,6 +185,7 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data) &mcc->parent_phases); cc->class_by_name = tricore_cpu_class_by_name; cc->has_work = tricore_cpu_has_work; + cc->mmu_index = tricore_cpu_mmu_index; cc->gdb_read_register = tricore_cpu_gdb_read_register; cc->gdb_write_register = tricore_cpu_gdb_write_register; diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h index de3ab53a837..220af69fc25 100644 --- a/target/tricore/cpu.h +++ b/target/tricore/cpu.h @@ -246,15 +246,6 @@ void fpu_set_state(CPUTriCoreState *env); #define MMU_USER_IDX 2 -void tricore_cpu_list(void); - -#define cpu_list tricore_cpu_list - -static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch) -{ - return 0; -} - #include "exec/cpu-all.h" FIELD(TB_FLAGS, PRIV, 0, 2) diff --git a/target/tricore/gdbstub.c b/target/tricore/gdbstub.c index e8f8e5e6ea0..f9309c5e277 100644 --- a/target/tricore/gdbstub.c +++ b/target/tricore/gdbstub.c @@ -106,8 +106,7 @@ static void tricore_cpu_gdb_write_csfr(CPUTriCoreState *env, int n, int tricore_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - TriCoreCPU *cpu = TRICORE_CPU(cs); - CPUTriCoreState *env = &cpu->env; + CPUTriCoreState *env = cpu_env(cs); if (n < 16) { /* data registers */ return gdb_get_reg32(mem_buf, env->gpr_d[n]); @@ -121,8 +120,7 @@ int tricore_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int tricore_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - TriCoreCPU *cpu = TRICORE_CPU(cs); - CPUTriCoreState *env = &cpu->env; + CPUTriCoreState *env = cpu_env(cs); uint32_t tmp; tmp = ldl_p(mem_buf); diff --git a/target/tricore/helper.c b/target/tricore/helper.c index 7e5da3cb23e..76bd2263708 100644 --- a/target/tricore/helper.c +++ b/target/tricore/helper.c @@ -48,7 +48,7 @@ hwaddr tricore_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) TriCoreCPU *cpu = TRICORE_CPU(cs); hwaddr phys_addr; int prot; - int mmu_idx = cpu_mmu_index(&cpu->env, false); + int mmu_idx = cpu_mmu_index(cs, false); if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, MMU_DATA_LOAD, mmu_idx)) { @@ -67,8 +67,7 @@ bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType rw, int mmu_idx, bool probe, uintptr_t retaddr) { - TriCoreCPU *cpu = TRICORE_CPU(cs); - CPUTriCoreState *env = &cpu->env; + CPUTriCoreState *env = cpu_env(cs); hwaddr physical; int prot; int ret = 0; @@ -77,9 +76,9 @@ bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size, ret = get_physical_address(env, &physical, &prot, address, rw, mmu_idx); - qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical " + qemu_log_mask(CPU_LOG_MMU, "%s address=0x%" VADDR_PRIx " ret %d physical " HWADDR_FMT_plx " prot %d\n", - __func__, (target_ulong)address, ret, physical, prot); + __func__, address, ret, physical, prot); if (ret == TLBRET_MATCH) { tlb_set_page(cs, address & TARGET_PAGE_MASK, @@ -96,28 +95,6 @@ bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } } -static void tricore_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - const char *typename; - char *name; - - typename = object_class_get_name(oc); - name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_TRICORE_CPU)); - qemu_printf(" %s\n", name); - g_free(name); -} - -void tricore_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list_sorted(TYPE_TRICORE_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, tricore_cpu_list_entry, NULL); - g_slist_free(list); -} - void fpu_set_state(CPUTriCoreState *env) { switch (extract32(env->PSW, 24, 2)) { diff --git a/target/tricore/translate.c b/target/tricore/translate.c index 66553d1be01..c45e1d992e5 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -95,8 +95,7 @@ enum { void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - TriCoreCPU *cpu = TRICORE_CPU(cs); - CPUTriCoreState *env = &cpu->env; + CPUTriCoreState *env = cpu_env(cs); uint32_t psw; int i; @@ -8355,7 +8354,7 @@ static void tricore_tr_init_disas_context(DisasContextBase *dcbase, { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPUTriCoreState *env = cpu_env(cs); - ctx->mem_idx = cpu_mmu_index(env, false); + ctx->mem_idx = cpu_mmu_index(cs, false); uint32_t tb_flags = (uint32_t)ctx->base.tb->flags; ctx->priv = FIELD_EX32(tb_flags, TB_FLAGS, PRIV); @@ -8472,7 +8471,7 @@ static const TranslatorOps tricore_tr_ops = { void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext ctx; translator_loop(cs, tb, max_insns, pc, host_pc, diff --git a/target/xtensa/Kconfig b/target/xtensa/Kconfig index a3c8dc7f6d7..5e46049262d 100644 --- a/target/xtensa/Kconfig +++ b/target/xtensa/Kconfig @@ -1,2 +1,3 @@ config XTENSA bool + select SEMIHOSTING diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index e20fe87bf25..875cf843c93 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -74,6 +74,11 @@ static bool xtensa_cpu_has_work(CPUState *cs) #endif } +static int xtensa_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return xtensa_get_cring(cpu_env(cs)); +} + #ifdef CONFIG_USER_ONLY static bool abi_call0; @@ -90,10 +95,9 @@ bool xtensa_abi_call0(void) static void xtensa_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(obj); - XtensaCPU *cpu = XTENSA_CPU(s); - XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(cpu); - CPUXtensaState *env = &cpu->env; + CPUState *cs = CPU(obj); + XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj); + CPUXtensaState *env = cpu_env(cs); bool dfpu = xtensa_option_enabled(env->config, XTENSA_OPTION_DFP_COPROCESSOR); @@ -127,7 +131,7 @@ static void xtensa_cpu_reset_hold(Object *obj) #ifndef CONFIG_USER_ONLY reset_mmu(env); - s->halted = env->runstall; + cs->halted = env->runstall; #endif set_no_signaling_nans(!dfpu, &env->fp_status); set_use_first_nan(!dfpu, &env->fp_status); @@ -141,9 +145,7 @@ static ObjectClass *xtensa_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(XTENSA_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc == NULL || !object_class_dynamic_cast(oc, TYPE_XTENSA_CPU)) { - return NULL; - } + return oc; } @@ -224,7 +226,7 @@ static const struct SysemuCPUOps xtensa_sysemu_ops = { #include "hw/core/tcg-cpu-ops.h" -static const struct TCGCPUOps xtensa_tcg_ops = { +static const TCGCPUOps xtensa_tcg_ops = { .initialize = xtensa_translate_init, .debug_excp_handler = xtensa_breakpoint_handler, .restore_state_to_opc = xtensa_restore_state_to_opc, @@ -235,6 +237,7 @@ static const struct TCGCPUOps xtensa_tcg_ops = { .do_interrupt = xtensa_cpu_do_interrupt, .do_transaction_failed = xtensa_cpu_do_transaction_failed, .do_unaligned_access = xtensa_cpu_do_unaligned_access, + .debug_check_breakpoint = xtensa_debug_check_breakpoint, #endif /* !CONFIG_USER_ONLY */ }; @@ -253,6 +256,7 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = xtensa_cpu_class_by_name; cc->has_work = xtensa_cpu_has_work; + cc->mmu_index = xtensa_cpu_mmu_index; cc->dump_state = xtensa_cpu_dump_state; cc->set_pc = xtensa_cpu_set_pc; cc->get_pc = xtensa_cpu_get_pc; diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index dd817293065..6b8d0636d27 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -229,6 +229,7 @@ enum { #define MAX_NCCOMPARE 3 #define MAX_TLB_WAY_SIZE 8 #define MAX_NDBREAK 2 +#define MAX_NIBREAK 2 #define MAX_NMEMORY 4 #define MAX_MPU_FOREGROUND_SEGMENTS 32 @@ -547,6 +548,8 @@ struct CPUArchState { /* Watchpoints for DBREAK registers */ struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK]; + /* Breakpoints for IBREAK registers */ + struct CPUBreakpoint *cpu_breakpoint[MAX_NIBREAK]; }; /** @@ -590,6 +593,7 @@ void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +bool xtensa_debug_check_breakpoint(CPUState *cs); #endif void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, int flags); void xtensa_count_regs(const XtensaConfig *config, @@ -600,8 +604,6 @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); -#define cpu_list xtensa_cpu_list - #define CPU_RESOLVING_TYPE TYPE_XTENSA_CPU #if TARGET_BIG_ENDIAN @@ -626,7 +628,6 @@ void check_interrupts(CPUXtensaState *s); void xtensa_irq_init(CPUXtensaState *env); qemu_irq *xtensa_get_extints(CPUXtensaState *env); qemu_irq xtensa_get_runstall(CPUXtensaState *env); -void xtensa_cpu_list(void); void xtensa_sync_window_from_phys(CPUXtensaState *env); void xtensa_sync_phys_from_window(CPUXtensaState *env); void xtensa_rotate_window(CPUXtensaState *env, uint32_t delta); @@ -712,11 +713,6 @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env) /* MMU modes definitions */ #define MMU_USER_IDX 3 -static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch) -{ - return xtensa_get_cring(env); -} - #define XTENSA_TBFLAG_RING_MASK 0x3 #define XTENSA_TBFLAG_EXCM 0x4 #define XTENSA_TBFLAG_LITBASE 0x8 diff --git a/target/xtensa/dbg_helper.c b/target/xtensa/dbg_helper.c index 3e0c9e8e8be..5546c82ecda 100644 --- a/target/xtensa/dbg_helper.c +++ b/target/xtensa/dbg_helper.c @@ -33,27 +33,21 @@ #include "exec/exec-all.h" #include "exec/address-spaces.h" -static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr) -{ - uint32_t paddr; - uint32_t page_size; - unsigned access; - int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0, - &paddr, &page_size, &access); - if (ret == 0) { - tb_invalidate_phys_addr(&address_space_memory, paddr, - MEMTXATTRS_UNSPECIFIED); - } -} - void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v) { + CPUState *cs = env_cpu(env); uint32_t change = v ^ env->sregs[IBREAKENABLE]; unsigned i; for (i = 0; i < env->config->nibreak; ++i) { if (change & (1 << i)) { - tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); + if (v & (1 << i)) { + cpu_breakpoint_insert(cs, env->sregs[IBREAKA + i], + BP_CPU, &env->cpu_breakpoint[i]); + } else { + cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[i]); + env->cpu_breakpoint[i] = NULL; + } } } env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1); @@ -62,12 +56,31 @@ void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v) void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v) { if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) { - tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); - tb_invalidate_virtual_addr(env, v); + CPUState *cs = env_cpu(env); + + cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[i]); + cpu_breakpoint_insert(cs, v, BP_CPU, &env->cpu_breakpoint[i]); } env->sregs[IBREAKA + i] = v; } +bool xtensa_debug_check_breakpoint(CPUState *cs) +{ + CPUXtensaState *env = cpu_env(cs); + unsigned int i; + + if (xtensa_get_cintlevel(env) >= env->config->debug_level) { + return false; + } + for (i = 0; i < env->config->nibreak; ++i) { + if (env->sregs[IBREAKENABLE] & (1 << i) && + env->sregs[IBREAKA + i] == env->pc) { + return true; + } + } + return false; +} + static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka, uint32_t dbreakc) { diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c index 91354884f7e..0514c2c1f32 100644 --- a/target/xtensa/exc_helper.c +++ b/target/xtensa/exc_helper.c @@ -105,9 +105,9 @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | (intlevel << PS_INTLEVEL_SHIFT); - qemu_mutex_lock_iothread(); + bql_lock(); check_interrupts(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (env->pending_irq_level) { cpu_loop_exit(cpu); @@ -120,9 +120,9 @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) void HELPER(check_interrupts)(CPUXtensaState *env) { - qemu_mutex_lock_iothread(); + bql_lock(); check_interrupts(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(intset)(CPUXtensaState *env, uint32_t v) @@ -205,8 +205,7 @@ static void handle_interrupt(CPUXtensaState *env) /* Called from cpu_handle_interrupt with BQL held */ void xtensa_cpu_do_interrupt(CPUState *cs) { - XtensaCPU *cpu = XTENSA_CPU(cs); - CPUXtensaState *env = &cpu->env; + CPUXtensaState *env = cpu_env(cs); if (cs->exception_index == EXC_IRQ) { qemu_log_mask(CPU_LOG_INT, diff --git a/target/xtensa/gdbstub.c b/target/xtensa/gdbstub.c index 4b3bfb7e59c..4748fb65321 100644 --- a/target/xtensa/gdbstub.c +++ b/target/xtensa/gdbstub.c @@ -65,8 +65,7 @@ void xtensa_count_regs(const XtensaConfig *config, int xtensa_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - XtensaCPU *cpu = XTENSA_CPU(cs); - CPUXtensaState *env = &cpu->env; + CPUXtensaState *env = cpu_env(cs); const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n; #ifdef CONFIG_USER_ONLY int num_regs = env->config->gdb_regmap.num_core_regs; @@ -120,8 +119,7 @@ int xtensa_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int xtensa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - XtensaCPU *cpu = XTENSA_CPU(cs); - CPUXtensaState *env = &cpu->env; + CPUXtensaState *env = cpu_env(cs); uint32_t tmp; const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n; #ifdef CONFIG_USER_ONLY diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c index dbeb97a953c..ca214b948a9 100644 --- a/target/xtensa/helper.c +++ b/target/xtensa/helper.c @@ -217,8 +217,7 @@ static uint32_t check_hw_breakpoints(CPUXtensaState *env) void xtensa_breakpoint_handler(CPUState *cs) { - XtensaCPU *cpu = XTENSA_CPU(cs); - CPUXtensaState *env = &cpu->env; + CPUXtensaState *env = cpu_env(cs); if (cs->watchpoint_hit) { if (cs->watchpoint_hit->flags & BP_CPU) { @@ -231,15 +230,18 @@ void xtensa_breakpoint_handler(CPUState *cs) } cpu_loop_exit_noexc(cs); } - } -} - -void xtensa_cpu_list(void) -{ - XtensaConfigList *core = xtensa_cores; - qemu_printf("Available CPUs:\n"); - for (; core; core = core->next) { - qemu_printf(" %s\n", core->config->name); + } else { + if (cpu_breakpoint_test(cs, env->pc, BP_GDB) + || !cpu_breakpoint_test(cs, env->pc, BP_CPU)) { + return; + } + if (env->sregs[ICOUNT] == 0xffffffff && + xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) { + debug_exception_env(env, DEBUGCAUSE_IC); + } else { + debug_exception_env(env, DEBUGCAUSE_IB); + } + cpu_loop_exit_noexc(cs); } } @@ -263,8 +265,7 @@ bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - XtensaCPU *cpu = XTENSA_CPU(cs); - CPUXtensaState *env = &cpu->env; + CPUXtensaState *env = cpu_env(cs); uint32_t paddr; uint32_t page_size; unsigned access; @@ -294,8 +295,7 @@ void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { - XtensaCPU *cpu = XTENSA_CPU(cs); - CPUXtensaState *env = &cpu->env; + CPUXtensaState *env = cpu_env(cs); cpu_restore_state(cs, retaddr); HELPER(exception_cause_vaddr)(env, env->pc, diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c index 2fda4e887cc..47063b0a57b 100644 --- a/target/xtensa/mmu_helper.c +++ b/target/xtensa/mmu_helper.c @@ -66,7 +66,7 @@ void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr) * only the side-effects (ie any MMU or other exception) */ probe_access(env, vaddr, 1, MMU_INST_FETCH, - cpu_mmu_index(env, true), GETPC()); + cpu_mmu_index(env_cpu(env), true), GETPC()); } void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index de899405994..b206d57fc4c 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -1123,27 +1123,13 @@ static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc) return xtensa_op0_insn_len(dc, b0); } -static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc) -{ - unsigned i; - - for (i = 0; i < dc->config->nibreak; ++i) { - if ((env->sregs[IBREAKENABLE] & (1 << i)) && - env->sregs[IBREAKA + i] == dc->pc) { - gen_debug_exception(dc, DEBUGCAUSE_IB); - break; - } - } -} - static void xtensa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); - CPUXtensaState *env = cpu_env(cpu); uint32_t tb_flags = dc->base.tb->flags; - dc->config = env->config; + dc->config = cpu_env(cpu)->config; dc->pc = dc->base.pc_first; dc->ring = tb_flags & XTENSA_TBFLAG_RING_MASK; dc->cring = (tb_flags & XTENSA_TBFLAG_EXCM) ? 0 : dc->ring; @@ -1205,10 +1191,6 @@ static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) gen_set_label(label); } - if (dc->debug) { - gen_ibreak_check(env, dc); - } - disas_xtensa_insn(env, dc); if (dc->icount) { @@ -1256,7 +1238,7 @@ static const TranslatorOps xtensa_translator_ops = { }; void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - target_ulong pc, void *host_pc) + vaddr pc, void *host_pc) { DisasContext dc = {}; translator_loop(cpu, tb, max_insns, pc, host_pc, @@ -1265,8 +1247,7 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - XtensaCPU *cpu = XTENSA_CPU(cs); - CPUXtensaState *env = &cpu->env; + CPUXtensaState *env = cpu_env(cs); xtensa_isa isa = env->config->isa; int i, j; diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h index 3fdee26a3d6..44fcc1206e0 100644 --- a/tcg/aarch64/tcg-target-con-set.h +++ b/tcg/aarch64/tcg-target-con-set.h @@ -10,7 +10,7 @@ * tcg-target-con-str.h; the constraint combination is inclusive or. */ C_O0_I1(r) -C_O0_I2(r, rA) +C_O0_I2(r, rC) C_O0_I2(rZ, r) C_O0_I2(w, r) C_O0_I3(rZ, rZ, r) @@ -22,6 +22,7 @@ C_O1_I2(r, 0, rZ) C_O1_I2(r, r, r) C_O1_I2(r, r, rA) C_O1_I2(r, r, rAL) +C_O1_I2(r, r, rC) C_O1_I2(r, r, ri) C_O1_I2(r, r, rL) C_O1_I2(r, rZ, rZ) @@ -31,6 +32,6 @@ C_O1_I2(w, w, wN) C_O1_I2(w, w, wO) C_O1_I2(w, w, wZ) C_O1_I3(w, w, w, w) -C_O1_I4(r, r, rA, rZ, rZ) +C_O1_I4(r, r, rC, rZ, rZ) C_O2_I1(r, r, r) C_O2_I4(r, r, rZ, rZ, rA, rMZ) diff --git a/tcg/aarch64/tcg-target-con-str.h b/tcg/aarch64/tcg-target-con-str.h index fb1a845b4f0..48e1722c683 100644 --- a/tcg/aarch64/tcg-target-con-str.h +++ b/tcg/aarch64/tcg-target-con-str.h @@ -16,6 +16,7 @@ REGS('w', ALL_VECTOR_REGS) * CONST(letter, TCG_CT_CONST_* bit set) */ CONST('A', TCG_CT_CONST_AIMM) +CONST('C', TCG_CT_CONST_CMP) CONST('L', TCG_CT_CONST_LIMM) CONST('M', TCG_CT_CONST_MONE) CONST('O', TCG_CT_CONST_ORRI) diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index a3efa1e67a5..56fc9cb9e00 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -105,6 +105,18 @@ static bool reloc_pc19(tcg_insn_unit *src_rw, const tcg_insn_unit *target) return false; } +static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target) +{ + const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); + ptrdiff_t offset = target - src_rx; + + if (offset == sextract64(offset, 0, 14)) { + *src_rw = deposit32(*src_rw, 5, 14, offset); + return true; + } + return false; +} + static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { @@ -115,6 +127,8 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, return reloc_pc26(code_ptr, (const tcg_insn_unit *)value); case R_AARCH64_CONDBR19: return reloc_pc19(code_ptr, (const tcg_insn_unit *)value); + case R_AARCH64_TSTBR14: + return reloc_pc14(code_ptr, (const tcg_insn_unit *)value); default: g_assert_not_reached(); } @@ -126,6 +140,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, #define TCG_CT_CONST_MONE 0x800 #define TCG_CT_CONST_ORRI 0x1000 #define TCG_CT_CONST_ANDI 0x2000 +#define TCG_CT_CONST_CMP 0x4000 #define ALL_GENERAL_REGS 0xffffffffu #define ALL_VECTOR_REGS 0xffffffff00000000ull @@ -270,7 +285,8 @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) } } -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) { if (ct & TCG_CT_CONST) { return 1; @@ -278,6 +294,15 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) if (type == TCG_TYPE_I32) { val = (int32_t)val; } + + if (ct & TCG_CT_CONST_CMP) { + if (is_tst_cond(cond)) { + ct |= TCG_CT_CONST_LIMM; + } else { + ct |= TCG_CT_CONST_AIMM; + } + } + if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) { return 1; } @@ -344,6 +369,9 @@ static const enum aarch64_cond_code tcg_cond_to_aarch64[] = { [TCG_COND_GTU] = COND_HI, [TCG_COND_GEU] = COND_HS, [TCG_COND_LEU] = COND_LS, + /* bit test */ + [TCG_COND_TSTEQ] = COND_EQ, + [TCG_COND_TSTNE] = COND_NE, }; typedef enum { @@ -366,6 +394,10 @@ typedef enum { /* Conditional branch (immediate). */ I3202_B_C = 0x54000000, + /* Test and branch (immediate). */ + I3205_TBZ = 0x36000000, + I3205_TBNZ = 0x37000000, + /* Unconditional branch (immediate). */ I3206_B = 0x14000000, I3206_BL = 0x94000000, @@ -646,6 +678,14 @@ static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn, tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5); } +static void tcg_out_insn_3205(TCGContext *s, AArch64Insn insn, + TCGReg rt, int imm6, int imm14) +{ + insn |= (imm6 & 0x20) << (31 - 5); + insn |= (imm6 & 0x1f) << 19; + tcg_out32(s, insn | (imm14 & 0x3fff) << 5 | rt); +} + static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26) { tcg_out32(s, insn | (imm26 & 0x03ffffff)); @@ -1341,19 +1381,25 @@ static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd, tcg_out_bfm(s, ext, rd, rn, a, b); } -static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a, +static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg a, tcg_target_long b, bool const_b) { - if (const_b) { - /* Using CMP or CMN aliases. */ - if (b >= 0) { + if (is_tst_cond(cond)) { + if (!const_b) { + tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b); + } else { + tcg_out_logicali(s, I3404_ANDSI, ext, TCG_REG_XZR, a, b); + } + } else { + if (!const_b) { + tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b); + } else if (b >= 0) { + tcg_debug_assert(is_aimm(b)); tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b); } else { + tcg_debug_assert(is_aimm(-b)); tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b); } - } else { - /* Using CMP alias SUBS wzr, Wn, Wm */ - tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b); } } @@ -1394,30 +1440,76 @@ static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l) static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a, TCGArg b, bool b_const, TCGLabel *l) { - intptr_t offset; - bool need_cmp; - - if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) { - need_cmp = false; - } else { - need_cmp = true; - tcg_out_cmp(s, ext, a, b, b_const); + int tbit = -1; + bool need_cmp = true; + + switch (c) { + case TCG_COND_EQ: + case TCG_COND_NE: + /* cmp xN,0; b.ne L -> cbnz xN,L */ + if (b_const && b == 0) { + need_cmp = false; + } + break; + case TCG_COND_LT: + case TCG_COND_GE: + /* cmp xN,0; b.mi L -> tbnz xN,63,L */ + if (b_const && b == 0) { + c = (c == TCG_COND_LT ? TCG_COND_TSTNE : TCG_COND_TSTEQ); + tbit = ext ? 63 : 31; + need_cmp = false; + } + break; + case TCG_COND_TSTEQ: + case TCG_COND_TSTNE: + /* tst xN,0xffffffff; b.ne L -> cbnz wN,L */ + if (b_const && b == UINT32_MAX) { + c = tcg_tst_eqne_cond(c); + ext = TCG_TYPE_I32; + need_cmp = false; + break; + } + /* tst xN,1< tbnz xN,B,L */ + if (b_const && is_power_of_2(b)) { + tbit = ctz64(b); + need_cmp = false; + } + break; + default: + break; } - if (!l->has_value) { + if (need_cmp) { + tcg_out_cmp(s, ext, c, a, b, b_const); tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0); - offset = tcg_in32(s) >> 5; - } else { - offset = tcg_pcrel_diff(s, l->u.value_ptr) >> 2; - tcg_debug_assert(offset == sextract64(offset, 0, 19)); + tcg_out_insn(s, 3202, B_C, c, 0); + return; } - if (need_cmp) { - tcg_out_insn(s, 3202, B_C, c, offset); - } else if (c == TCG_COND_EQ) { - tcg_out_insn(s, 3201, CBZ, ext, a, offset); + if (tbit >= 0) { + tcg_out_reloc(s, s->code_ptr, R_AARCH64_TSTBR14, l, 0); + switch (c) { + case TCG_COND_TSTEQ: + tcg_out_insn(s, 3205, TBZ, a, tbit, 0); + break; + case TCG_COND_TSTNE: + tcg_out_insn(s, 3205, TBNZ, a, tbit, 0); + break; + default: + g_assert_not_reached(); + } } else { - tcg_out_insn(s, 3201, CBNZ, ext, a, offset); + tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0); + switch (c) { + case TCG_COND_EQ: + tcg_out_insn(s, 3201, CBZ, ext, a, 0); + break; + case TCG_COND_NE: + tcg_out_insn(s, 3201, CBNZ, ext, a, 0); + break; + default: + g_assert_not_reached(); + } } } @@ -1574,7 +1666,7 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d, } else { AArch64Insn sel = I3506_CSEL; - tcg_out_cmp(s, ext, a0, 0, 1); + tcg_out_cmp(s, ext, TCG_COND_NE, a0, 0, 1); tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP0, a1); if (const_b) { @@ -1719,7 +1811,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, addr_adj, compare_mask); /* Perform the address comparison. */ - tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0); + tcg_out_cmp(s, addr_type, TCG_COND_NE, TCG_REG_TMP0, TCG_REG_TMP2, 0); /* If not equal, we jump to the slow path. */ ldst->label_ptr[0] = s->code_ptr; @@ -2275,7 +2367,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_setcond_i64: - tcg_out_cmp(s, ext, a1, a2, c2); + tcg_out_cmp(s, ext, args[3], a1, a2, c2); /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */ tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR, TCG_REG_XZR, tcg_invert_cond(args[3])); @@ -2285,7 +2377,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_negsetcond_i64: - tcg_out_cmp(s, ext, a1, a2, c2); + tcg_out_cmp(s, ext, args[3], a1, a2, c2); /* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */ tcg_out_insn(s, 3506, CSINV, ext, a0, TCG_REG_XZR, TCG_REG_XZR, tcg_invert_cond(args[3])); @@ -2295,7 +2387,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_movcond_i64: - tcg_out_cmp(s, ext, a1, a2, c2); + tcg_out_cmp(s, ext, args[5], a1, a2, c2); tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]); break; @@ -2895,11 +2987,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_add_i64: case INDEX_op_sub_i32: case INDEX_op_sub_i64: + return C_O1_I2(r, r, rA); + case INDEX_op_setcond_i32: case INDEX_op_setcond_i64: case INDEX_op_negsetcond_i32: case INDEX_op_negsetcond_i64: - return C_O1_I2(r, r, rA); + return C_O1_I2(r, r, rC); case INDEX_op_mul_i32: case INDEX_op_mul_i64: @@ -2949,11 +3043,11 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: - return C_O0_I2(r, rA); + return C_O0_I2(r, rC); case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: - return C_O1_I4(r, r, rA, rZ, rZ); + return C_O1_I4(r, r, rC, rZ, rZ); case INDEX_op_qemu_ld_a32_i32: case INDEX_op_qemu_ld_a64_i32: diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index 33f15a564ab..85d5746e475 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -55,7 +55,11 @@ typedef enum { #define TCG_TARGET_CALL_STACK_OFFSET 0 #define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL #define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN +#ifdef CONFIG_DARWIN +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL +#else +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN +#endif #define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL #define have_lse (cpuinfo & CPUINFO_LSE) @@ -138,6 +142,8 @@ typedef enum { #define TCG_TARGET_HAS_qemu_ldst_i128 1 #endif +#define TCG_TARGET_HAS_tst 1 + #define TCG_TARGET_HAS_v64 1 #define TCG_TARGET_HAS_v128 1 #define TCG_TARGET_HAS_v256 0 diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index c9a47b7ea14..6a04c73c76d 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -501,7 +501,8 @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) * mov operand2: values represented with x << (2 * y), x < 0x100 * add, sub, eor...: ditto */ -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) { if (ct & TCG_CT_CONST) { return 1; @@ -1190,6 +1191,33 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) } } +static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a, + TCGArg b, int b_const) +{ + if (!is_tst_cond(cond)) { + tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const); + return cond; + } + + cond = tcg_tst_eqne_cond(cond); + if (b_const) { + int imm12 = encode_imm(b); + + /* + * The compare constraints allow rIN, but TST does not support N. + * Be prepared to load the constant into a scratch register. + */ + if (imm12 >= 0) { + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12); + return cond; + } + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b); + b = TCG_REG_TMP; + } + tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0)); + return cond; +} + static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, const int *const_args) { @@ -1217,6 +1245,13 @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); return cond; + case TCG_COND_TSTEQ: + case TCG_COND_TSTNE: + /* Similar, but with TST instead of CMP. */ + tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh); + tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl); + return tcg_tst_eqne_cond(cond); + case TCG_COND_LT: case TCG_COND_GE: /* We perform a double-word subtraction and examine the result. @@ -1808,9 +1843,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, /* Constraints mean that v2 is always in the same register as dest, * so we only need to do "if condition passed, move v1 to dest". */ - tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, - args[1], args[2], const_args[2]); - tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV, + c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]); + tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV, ARITH_MVN, args[0], 0, args[3], const_args[3]); break; case INDEX_op_add_i32: @@ -1960,25 +1994,21 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_brcond_i32: - tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, - args[0], args[1], const_args[1]); - tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], - arg_label(args[3])); + c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]); + tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3])); break; case INDEX_op_setcond_i32: - tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, - args[1], args[2], const_args[2]); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], + c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]); + tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], + tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], ARITH_MOV, args[0], 0, 0); break; case INDEX_op_negsetcond_i32: - tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, - args[1], args[2], const_args[2]); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], + c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]); + tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MVN, args[0], 0, 0); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], + tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], ARITH_MOV, args[0], 0, 0); break; diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h index a712cc80adf..a43875cb09a 100644 --- a/tcg/arm/tcg-target.h +++ b/tcg/arm/tcg-target.h @@ -125,6 +125,8 @@ extern bool use_neon_instructions; #define TCG_TARGET_HAS_qemu_ldst_i128 0 +#define TCG_TARGET_HAS_tst 1 + #define TCG_TARGET_HAS_v64 use_neon_instructions #define TCG_TARGET_HAS_v128 use_neon_instructions #define TCG_TARGET_HAS_v256 0 diff --git a/accel/tcg/debuginfo.c b/tcg/debuginfo.c similarity index 98% rename from accel/tcg/debuginfo.c rename to tcg/debuginfo.c index 71c66d04d12..3753f7ef67c 100644 --- a/accel/tcg/debuginfo.c +++ b/tcg/debuginfo.c @@ -6,11 +6,10 @@ #include "qemu/osdep.h" #include "qemu/lockable.h" +#include "tcg/debuginfo.h" #include -#include "debuginfo.h" - static QemuMutex lock; static Dwfl *dwfl; static const Dwfl_Callbacks dwfl_callbacks = { diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h index 7d00a7dde80..e24241cfa2d 100644 --- a/tcg/i386/tcg-target-con-set.h +++ b/tcg/i386/tcg-target-con-set.h @@ -20,7 +20,7 @@ C_O0_I2(L, L) C_O0_I2(qi, r) C_O0_I2(re, r) C_O0_I2(ri, r) -C_O0_I2(r, re) +C_O0_I2(r, reT) C_O0_I2(s, L) C_O0_I2(x, r) C_O0_I3(L, L, L) @@ -34,7 +34,7 @@ C_O1_I1(r, r) C_O1_I1(x, r) C_O1_I1(x, x) C_O1_I2(q, 0, qi) -C_O1_I2(q, r, re) +C_O1_I2(q, r, reT) C_O1_I2(r, 0, ci) C_O1_I2(r, 0, r) C_O1_I2(r, 0, re) @@ -50,7 +50,7 @@ C_N1_I2(r, r, r) C_N1_I2(r, r, rW) C_O1_I3(x, 0, x, x) C_O1_I3(x, x, x, x) -C_O1_I4(r, r, re, r, 0) +C_O1_I4(r, r, reT, r, 0) C_O1_I4(r, r, r, ri, ri) C_O2_I1(r, r, L) C_O2_I2(a, d, a, r) diff --git a/tcg/i386/tcg-target-con-str.h b/tcg/i386/tcg-target-con-str.h index 95a30e58cd2..cc22db227ba 100644 --- a/tcg/i386/tcg-target-con-str.h +++ b/tcg/i386/tcg-target-con-str.h @@ -28,5 +28,6 @@ REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */ */ CONST('e', TCG_CT_CONST_S32) CONST('I', TCG_CT_CONST_I32) +CONST('T', TCG_CT_CONST_TST) CONST('W', TCG_CT_CONST_WSZ) CONST('Z', TCG_CT_CONST_U32) diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index a83f8aab304..c6ba4986236 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -132,6 +132,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define TCG_CT_CONST_U32 0x200 #define TCG_CT_CONST_I32 0x400 #define TCG_CT_CONST_WSZ 0x800 +#define TCG_CT_CONST_TST 0x1000 /* Registers used with L constraint, which are the first argument registers on x86_64, and two random call clobbered registers on @@ -195,13 +196,15 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, } /* test if a constant matches the constraint */ -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) { if (ct & TCG_CT_CONST) { return 1; } if (type == TCG_TYPE_I32) { - if (ct & (TCG_CT_CONST_S32 | TCG_CT_CONST_U32 | TCG_CT_CONST_I32)) { + if (ct & (TCG_CT_CONST_S32 | TCG_CT_CONST_U32 | + TCG_CT_CONST_I32 | TCG_CT_CONST_TST)) { return 1; } } else { @@ -214,6 +217,17 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) { return 1; } + /* + * This will be used in combination with TCG_CT_CONST_S32, + * so "normal" TESTQ is already matched. Also accept: + * TESTQ -> TESTL (uint32_t) + * TESTQ -> BT (is_power_of_2) + */ + if ((ct & TCG_CT_CONST_TST) + && is_tst_cond(cond) + && (val == (uint32_t)val || is_power_of_2(val))) { + return 1; + } } if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { return 1; @@ -244,6 +258,7 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) #define P_VEXL 0x80000 /* Set VEX.L = 1 */ #define P_EVEX 0x100000 /* Requires EVEX encoding */ +#define OPC_ARITH_EbIb (0x80) #define OPC_ARITH_EvIz (0x81) #define OPC_ARITH_EvIb (0x83) #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */ @@ -394,6 +409,7 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16) #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2) #define OPC_SHRD_Ib (0xac | P_EXT) +#define OPC_TESTB (0x84) #define OPC_TESTL (0x85) #define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3) #define OPC_UD2 (0x0b | P_EXT) @@ -440,6 +456,12 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) #define OPC_GRP3_Ev (0xf7) #define OPC_GRP5 (0xff) #define OPC_GRP14 (0x73 | P_EXT | P_DATA16) +#define OPC_GRPBT (0xba | P_EXT) + +#define OPC_GRPBT_BT 4 +#define OPC_GRPBT_BTS 5 +#define OPC_GRPBT_BTR 6 +#define OPC_GRPBT_BTC 7 /* Group 1 opcode extensions for 0x80-0x83. These are also used as modifiers for OPC_ARITH. */ @@ -504,6 +526,8 @@ static const uint8_t tcg_cond_to_jcc[] = { [TCG_COND_GEU] = JCC_JAE, [TCG_COND_LEU] = JCC_JBE, [TCG_COND_GTU] = JCC_JA, + [TCG_COND_TSTEQ] = JCC_JE, + [TCG_COND_TSTNE] = JCC_JNE, }; #if TCG_TARGET_REG_BITS == 64 @@ -1316,23 +1340,41 @@ static void tgen_arithi(TCGContext *s, int c, int r0, c &= 7; } - /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce - partial flags update stalls on Pentium4 and are not recommended - by current Intel optimization manuals. */ - if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) { - int is_inc = (c == ARITH_ADD) ^ (val < 0); - if (TCG_TARGET_REG_BITS == 64) { - /* The single-byte increment encodings are re-tasked as the - REX prefixes. Use the MODRM encoding. */ - tcg_out_modrm(s, OPC_GRP5 + rexw, - (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); - } else { - tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); + switch (c) { + case ARITH_ADD: + case ARITH_SUB: + if (!cf) { + /* + * ??? While INC is 2 bytes shorter than ADDL $1, they also induce + * partial flags update stalls on Pentium4 and are not recommended + * by current Intel optimization manuals. + */ + if (val == 1 || val == -1) { + int is_inc = (c == ARITH_ADD) ^ (val < 0); + if (TCG_TARGET_REG_BITS == 64) { + /* + * The single-byte increment encodings are re-tasked + * as the REX prefixes. Use the MODRM encoding. + */ + tcg_out_modrm(s, OPC_GRP5 + rexw, + (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); + } else { + tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); + } + return; + } + if (val == 128) { + /* + * Facilitate using an 8-bit immediate. Carry is inverted + * by this transformation, so do it only if cf == 0. + */ + c ^= ARITH_ADD ^ ARITH_SUB; + val = -128; + } } - return; - } + break; - if (c == ARITH_AND) { + case ARITH_AND: if (TCG_TARGET_REG_BITS == 64) { if (val == 0xffffffffu) { tcg_out_ext32u(s, r0, r0); @@ -1351,6 +1393,17 @@ static void tgen_arithi(TCGContext *s, int c, int r0, tcg_out_ext16u(s, r0, r0); return; } + break; + + case ARITH_OR: + case ARITH_XOR: + if (val >= 0x80 && val <= 0xff + && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) { + tcg_out_modrm(s, OPC_ARITH_EbIb + P_REXB_RM, c, r0); + tcg_out8(s, val); + return; + } + break; } if (val == (int8_t)val) { @@ -1418,27 +1471,101 @@ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small) } } -static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2, - int const_arg2, int rexw) +static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1, + TCGArg arg2, int const_arg2, int rexw) { - if (const_arg2) { - if (arg2 == 0) { - /* test r, r */ + int jz, js; + + if (!is_tst_cond(cond)) { + if (!const_arg2) { + tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2); + } else if (arg2 == 0) { tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1); } else { + tcg_debug_assert(!rexw || arg2 == (int32_t)arg2); tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0); } - } else { - tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2); + return tcg_cond_to_jcc[cond]; + } + + jz = tcg_cond_to_jcc[cond]; + js = (cond == TCG_COND_TSTNE ? JCC_JS : JCC_JNS); + + if (!const_arg2) { + tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg2); + return jz; + } + + if (arg2 <= 0xff && (TCG_TARGET_REG_BITS == 64 || arg1 < 4)) { + if (arg2 == 0x80) { + tcg_out_modrm(s, OPC_TESTB | P_REXB_R, arg1, arg1); + return js; + } + if (arg2 == 0xff) { + tcg_out_modrm(s, OPC_TESTB | P_REXB_R, arg1, arg1); + return jz; + } + tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, arg1); + tcg_out8(s, arg2); + return jz; + } + + if ((arg2 & ~0xff00) == 0 && arg1 < 4) { + if (arg2 == 0x8000) { + tcg_out_modrm(s, OPC_TESTB, arg1 + 4, arg1 + 4); + return js; + } + if (arg2 == 0xff00) { + tcg_out_modrm(s, OPC_TESTB, arg1 + 4, arg1 + 4); + return jz; + } + tcg_out_modrm(s, OPC_GRP3_Eb, EXT3_TESTi, arg1 + 4); + tcg_out8(s, arg2 >> 8); + return jz; + } + + if (arg2 == 0xffff) { + tcg_out_modrm(s, OPC_TESTL | P_DATA16, arg1, arg1); + return jz; } + if (arg2 == 0xffffffffu) { + tcg_out_modrm(s, OPC_TESTL, arg1, arg1); + return jz; + } + + if (is_power_of_2(rexw ? arg2 : (uint32_t)arg2)) { + int jc = (cond == TCG_COND_TSTNE ? JCC_JB : JCC_JAE); + int sh = ctz64(arg2); + + rexw = (sh & 32 ? P_REXW : 0); + if ((sh & 31) == 31) { + tcg_out_modrm(s, OPC_TESTL | rexw, arg1, arg1); + return js; + } else { + tcg_out_modrm(s, OPC_GRPBT | rexw, OPC_GRPBT_BT, arg1); + tcg_out8(s, sh); + return jc; + } + } + + if (rexw) { + if (arg2 == (uint32_t)arg2) { + rexw = 0; + } else { + tcg_debug_assert(arg2 == (int32_t)arg2); + } + } + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_TESTi, arg1); + tcg_out32(s, arg2); + return jz; } static void tcg_out_brcond(TCGContext *s, int rexw, TCGCond cond, TCGArg arg1, TCGArg arg2, int const_arg2, TCGLabel *label, bool small) { - tcg_out_cmp(s, arg1, arg2, const_arg2, rexw); - tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small); + int jcc = tcg_out_cmp(s, cond, arg1, arg2, const_arg2, rexw); + tcg_out_jxx(s, jcc, label, small); } #if TCG_TARGET_REG_BITS == 32 @@ -1447,18 +1574,21 @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args, { TCGLabel *label_next = gen_new_label(); TCGLabel *label_this = arg_label(args[5]); + TCGCond cond = args[4]; - switch(args[4]) { + switch (cond) { case TCG_COND_EQ: - tcg_out_brcond(s, 0, TCG_COND_NE, args[0], args[2], const_args[2], - label_next, 1); - tcg_out_brcond(s, 0, TCG_COND_EQ, args[1], args[3], const_args[3], + case TCG_COND_TSTEQ: + tcg_out_brcond(s, 0, tcg_invert_cond(cond), + args[0], args[2], const_args[2], label_next, 1); + tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3], label_this, small); break; case TCG_COND_NE: - tcg_out_brcond(s, 0, TCG_COND_NE, args[0], args[2], const_args[2], + case TCG_COND_TSTNE: + tcg_out_brcond(s, 0, cond, args[0], args[2], const_args[2], label_this, small); - tcg_out_brcond(s, 0, TCG_COND_NE, args[1], args[3], const_args[3], + tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3], label_this, small); break; case TCG_COND_LT: @@ -1530,6 +1660,7 @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond, { bool inv = false; bool cleared; + int jcc; switch (cond) { case TCG_COND_NE: @@ -1566,7 +1697,7 @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond, * We can then use NEG or INC to produce the desired result. * This is always smaller than the SETCC expansion. */ - tcg_out_cmp(s, arg1, arg2, const_arg2, rexw); + tcg_out_cmp(s, TCG_COND_LTU, arg1, arg2, const_arg2, rexw); /* X - X - C = -C = (C ? -1 : 0) */ tgen_arithr(s, ARITH_SBB + (neg ? rexw : 0), dest, dest); @@ -1613,8 +1744,8 @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond, cleared = true; } - tcg_out_cmp(s, arg1, arg2, const_arg2, rexw); - tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); + jcc = tcg_out_cmp(s, cond, arg1, arg2, const_arg2, rexw); + tcg_out_modrm(s, OPC_SETCC | jcc, 0, dest); if (!cleared) { tcg_out_ext8u(s, dest, dest); @@ -1668,14 +1799,14 @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, } #endif -static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw, +static void tcg_out_cmov(TCGContext *s, int jcc, int rexw, TCGReg dest, TCGReg v1) { if (have_cmov) { - tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1); + tcg_out_modrm(s, OPC_CMOVCC | jcc | rexw, dest, v1); } else { TCGLabel *over = gen_new_label(); - tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1); + tcg_out_jxx(s, jcc ^ 1, over, 1); tcg_out_mov(s, TCG_TYPE_I32, dest, v1); tcg_out_label(s, over); } @@ -1685,8 +1816,8 @@ static void tcg_out_movcond(TCGContext *s, int rexw, TCGCond cond, TCGReg dest, TCGReg c1, TCGArg c2, int const_c2, TCGReg v1) { - tcg_out_cmp(s, c1, c2, const_c2, rexw); - tcg_out_cmov(s, cond, rexw, dest, v1); + int jcc = tcg_out_cmp(s, cond, c1, c2, const_c2, rexw); + tcg_out_cmov(s, jcc, rexw, dest, v1); } static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1, @@ -1698,12 +1829,12 @@ static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1, tcg_debug_assert(arg2 == (rexw ? 64 : 32)); } else { tcg_debug_assert(dest != arg2); - tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2); + tcg_out_cmov(s, JCC_JB, rexw, dest, arg2); } } else { tcg_debug_assert(dest != arg2); tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1); - tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2); + tcg_out_cmov(s, JCC_JE, rexw, dest, arg2); } } @@ -1716,7 +1847,7 @@ static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1, tcg_debug_assert(arg2 == (rexw ? 64 : 32)); } else { tcg_debug_assert(dest != arg2); - tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2); + tcg_out_cmov(s, JCC_JB, rexw, dest, arg2); } } else { tcg_debug_assert(!const_a2); @@ -1728,8 +1859,8 @@ static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1, tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0); /* Since we have destroyed the flags from BSR, we have to re-test. */ - tcg_out_cmp(s, arg1, 0, 1, rexw); - tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2); + int jcc = tcg_out_cmp(s, TCG_COND_EQ, arg1, 0, 1, rexw); + tcg_out_cmov(s, jcc, rexw, dest, arg2); } } @@ -1794,23 +1925,6 @@ static void tcg_out_nopn(TCGContext *s, int n) tcg_out8(s, 0x90); } -/* Test register R vs immediate bits I, setting Z flag for EQ/NE. */ -static void __attribute__((unused)) -tcg_out_testi(TCGContext *s, TCGReg r, uint32_t i) -{ - /* - * This is used for testing alignment, so we can usually use testb. - * For i686, we have to use testl for %esi/%edi. - */ - if (i <= 0xff && (TCG_TARGET_REG_BITS == 64 || r < 4)) { - tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, r); - tcg_out8(s, i); - } else { - tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, r); - tcg_out32(s, i); - } -} - typedef struct { TCGReg base; int index; @@ -2071,16 +2185,17 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0, offsetof(CPUTLBEntry, addend)); } else if (a_mask) { - ldst = new_ldst_label(s); + int jcc; + ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; ldst->addrlo_reg = addrlo; ldst->addrhi_reg = addrhi; - tcg_out_testi(s, addrlo, a_mask); /* jne slow_path */ - tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); + jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addrlo, a_mask, true, false); + tcg_out_opc(s, OPC_JCC_long + jcc, 0, 0, 0); ldst->label_ptr[0] = s->code_ptr; s->code_ptr += 4; } @@ -2226,9 +2341,10 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, } else { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); + int jcc; - tcg_out_testi(s, h.base, 15); - tcg_out_jxx(s, JCC_JNE, l1, true); + jcc = tcg_out_cmp(s, TCG_COND_TSTNE, h.base, 15, true, false); + tcg_out_jxx(s, jcc, l1, true); tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_VxWx + h.seg, TCG_TMP_VEC, 0, @@ -2354,9 +2470,10 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, } else { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); + int jcc; - tcg_out_testi(s, h.base, 15); - tcg_out_jxx(s, JCC_JNE, l1, true); + jcc = tcg_out_cmp(s, TCG_COND_TSTNE, h.base, 15, true, false); + tcg_out_jxx(s, jcc, l1, true); tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_WxVx + h.seg, TCG_TMP_VEC, 0, @@ -3343,7 +3460,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: - return C_O0_I2(r, re); + return C_O0_I2(r, reT); case INDEX_op_bswap16_i32: case INDEX_op_bswap16_i64: @@ -3391,11 +3508,11 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_setcond_i64: case INDEX_op_negsetcond_i32: case INDEX_op_negsetcond_i64: - return C_O1_I2(q, r, re); + return C_O1_I2(q, r, reT); case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: - return C_O1_I4(r, r, re, r, 0); + return C_O1_I4(r, r, reT, r, 0); case INDEX_op_div2_i32: case INDEX_op_div2_i64: diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index fa34deec47b..a10d4e1fcec 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -198,6 +198,8 @@ typedef enum { #define TCG_TARGET_HAS_qemu_ldst_i128 \ (TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA)) +#define TCG_TARGET_HAS_tst 1 + /* We do not support older SSE systems, only beginning with AVX1. */ #define TCG_TARGET_HAS_v64 have_avx1 #define TCG_TARGET_HAS_v128 have_avx1 diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index dcf02054580..69c5b8ac4f6 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -183,7 +183,8 @@ static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) } /* test if a constant matches the constraint */ -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) { if (ct & TCG_CT_CONST) { return true; diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h index 9c70ebfefc8..fede627bf74 100644 --- a/tcg/loongarch64/tcg-target.h +++ b/tcg/loongarch64/tcg-target.h @@ -169,6 +169,8 @@ typedef enum { #define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX) +#define TCG_TARGET_HAS_tst 0 + #define TCG_TARGET_HAS_v64 0 #define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX) #define TCG_TARGET_HAS_v256 0 diff --git a/tcg/meson.build b/tcg/meson.build index 895a11d3fa2..8251589fd4e 100644 --- a/tcg/meson.build +++ b/tcg/meson.build @@ -22,7 +22,12 @@ if get_option('tcg_interpreter') tcg_ss.add(files('tci.c')) endif -tcg_ss = tcg_ss.apply(config_targetos, strict: false) +tcg_ss.add(when: libdw, if_true: files('debuginfo.c')) +if host_os == 'linux' + tcg_ss.add(files('perf.c')) +endif + +tcg_ss = tcg_ss.apply({}) libtcg_user = static_library('tcg_user', tcg_ss.sources() + genh, diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index 8328dbdecc9..3b5b5c6d5ba 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -188,7 +188,8 @@ static bool is_p2m1(tcg_target_long val) } /* test if a constant matches the constraint */ -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) { if (ct & TCG_CT_CONST) { return 1; diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h index b98ffae1d0a..a996aa171dc 100644 --- a/tcg/mips/tcg-target.h +++ b/tcg/mips/tcg-target.h @@ -194,6 +194,8 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_HAS_qemu_ldst_i128 0 +#define TCG_TARGET_HAS_tst 0 + #define TCG_TARGET_DEFAULT_MO 0 #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/optimize.c b/tcg/optimize.c index f2d01654c59..2e9e5725a9c 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -124,11 +124,22 @@ static inline bool ts_is_const(TCGTemp *ts) return ts_info(ts)->is_const; } +static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val) +{ + TempOptInfo *ti = ts_info(ts); + return ti->is_const && ti->val == val; +} + static inline bool arg_is_const(TCGArg arg) { return ts_is_const(arg_temp(arg)); } +static inline bool arg_is_const_val(TCGArg arg, uint64_t val) +{ + return ts_is_const_val(arg_temp(arg), val); +} + static inline bool ts_is_copy(TCGTemp *ts) { return ts_info(ts)->next_copy != ts; @@ -353,6 +364,13 @@ static TCGArg arg_new_constant(OptContext *ctx, uint64_t val) return temp_arg(ts); } +static TCGArg arg_new_temp(OptContext *ctx) +{ + TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB); + init_ts_info(ctx, ts); + return temp_arg(ts); +} + static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) { TCGTemp *dst_ts = arg_temp(dst); @@ -614,9 +632,15 @@ static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c) return x <= y; case TCG_COND_GTU: return x > y; - default: - g_assert_not_reached(); + case TCG_COND_TSTEQ: + return (x & y) == 0; + case TCG_COND_TSTNE: + return (x & y) != 0; + case TCG_COND_ALWAYS: + case TCG_COND_NEVER: + break; } + g_assert_not_reached(); } static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c) @@ -642,12 +666,18 @@ static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c) return x <= y; case TCG_COND_GTU: return x > y; - default: - g_assert_not_reached(); + case TCG_COND_TSTEQ: + return (x & y) == 0; + case TCG_COND_TSTNE: + return (x & y) != 0; + case TCG_COND_ALWAYS: + case TCG_COND_NEVER: + break; } + g_assert_not_reached(); } -static bool do_constant_folding_cond_eq(TCGCond c) +static int do_constant_folding_cond_eq(TCGCond c) { switch (c) { case TCG_COND_GT: @@ -662,9 +692,14 @@ static bool do_constant_folding_cond_eq(TCGCond c) case TCG_COND_LEU: case TCG_COND_EQ: return 1; - default: - g_assert_not_reached(); + case TCG_COND_TSTEQ: + case TCG_COND_TSTNE: + return -1; + case TCG_COND_ALWAYS: + case TCG_COND_NEVER: + break; } + g_assert_not_reached(); } /* @@ -689,11 +724,13 @@ static int do_constant_folding_cond(TCGType type, TCGArg x, } } else if (args_are_copies(x, y)) { return do_constant_folding_cond_eq(c); - } else if (arg_is_const(y) && arg_info(y)->val == 0) { + } else if (arg_is_const_val(y, 0)) { switch (c) { case TCG_COND_LTU: + case TCG_COND_TSTNE: return 0; case TCG_COND_GEU: + case TCG_COND_TSTEQ: return 1; default: return -1; @@ -702,43 +739,6 @@ static int do_constant_folding_cond(TCGType type, TCGArg x, return -1; } -/* - * Return -1 if the condition can't be simplified, - * and the result of the condition (0 or 1) if it can. - */ -static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) -{ - TCGArg al = p1[0], ah = p1[1]; - TCGArg bl = p2[0], bh = p2[1]; - - if (arg_is_const(bl) && arg_is_const(bh)) { - tcg_target_ulong blv = arg_info(bl)->val; - tcg_target_ulong bhv = arg_info(bh)->val; - uint64_t b = deposit64(blv, 32, 32, bhv); - - if (arg_is_const(al) && arg_is_const(ah)) { - tcg_target_ulong alv = arg_info(al)->val; - tcg_target_ulong ahv = arg_info(ah)->val; - uint64_t a = deposit64(alv, 32, 32, ahv); - return do_constant_folding_cond_64(a, b, c); - } - if (b == 0) { - switch (c) { - case TCG_COND_LTU: - return 0; - case TCG_COND_GEU: - return 1; - default: - break; - } - } - } - if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { - return do_constant_folding_cond_eq(c); - } - return -1; -} - /** * swap_commutative: * @dest: TCGArg of the destination argument, or NO_DEST. @@ -785,6 +785,166 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2) return false; } +/* + * Return -1 if the condition can't be simplified, + * and the result of the condition (0 or 1) if it can. + */ +static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest, + TCGArg *p1, TCGArg *p2, TCGArg *pcond) +{ + TCGCond cond; + bool swap; + int r; + + swap = swap_commutative(dest, p1, p2); + cond = *pcond; + if (swap) { + *pcond = cond = tcg_swap_cond(cond); + } + + r = do_constant_folding_cond(ctx->type, *p1, *p2, cond); + if (r >= 0) { + return r; + } + if (!is_tst_cond(cond)) { + return -1; + } + + /* + * TSTNE x,x -> NE x,0 + * TSTNE x,-1 -> NE x,0 + */ + if (args_are_copies(*p1, *p2) || arg_is_const_val(*p2, -1)) { + *p2 = arg_new_constant(ctx, 0); + *pcond = tcg_tst_eqne_cond(cond); + return -1; + } + + /* TSTNE x,sign -> LT x,0 */ + if (arg_is_const_val(*p2, (ctx->type == TCG_TYPE_I32 + ? INT32_MIN : INT64_MIN))) { + *p2 = arg_new_constant(ctx, 0); + *pcond = tcg_tst_ltge_cond(cond); + return -1; + } + + /* Expand to AND with a temporary if no backend support. */ + if (!TCG_TARGET_HAS_tst) { + TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32 + ? INDEX_op_and_i32 : INDEX_op_and_i64); + TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3); + TCGArg tmp = arg_new_temp(ctx); + + op2->args[0] = tmp; + op2->args[1] = *p1; + op2->args[2] = *p2; + + *p1 = tmp; + *p2 = arg_new_constant(ctx, 0); + *pcond = tcg_tst_eqne_cond(cond); + } + return -1; +} + +static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args) +{ + TCGArg al, ah, bl, bh; + TCGCond c; + bool swap; + int r; + + swap = swap_commutative2(args, args + 2); + c = args[4]; + if (swap) { + args[4] = c = tcg_swap_cond(c); + } + + al = args[0]; + ah = args[1]; + bl = args[2]; + bh = args[3]; + + if (arg_is_const(bl) && arg_is_const(bh)) { + tcg_target_ulong blv = arg_info(bl)->val; + tcg_target_ulong bhv = arg_info(bh)->val; + uint64_t b = deposit64(blv, 32, 32, bhv); + + if (arg_is_const(al) && arg_is_const(ah)) { + tcg_target_ulong alv = arg_info(al)->val; + tcg_target_ulong ahv = arg_info(ah)->val; + uint64_t a = deposit64(alv, 32, 32, ahv); + + r = do_constant_folding_cond_64(a, b, c); + if (r >= 0) { + return r; + } + } + + if (b == 0) { + switch (c) { + case TCG_COND_LTU: + case TCG_COND_TSTNE: + return 0; + case TCG_COND_GEU: + case TCG_COND_TSTEQ: + return 1; + default: + break; + } + } + + /* TSTNE x,-1 -> NE x,0 */ + if (b == -1 && is_tst_cond(c)) { + args[3] = args[2] = arg_new_constant(ctx, 0); + args[4] = tcg_tst_eqne_cond(c); + return -1; + } + + /* TSTNE x,sign -> LT x,0 */ + if (b == INT64_MIN && is_tst_cond(c)) { + /* bl must be 0, so copy that to bh */ + args[3] = bl; + args[4] = tcg_tst_ltge_cond(c); + return -1; + } + } + + if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { + r = do_constant_folding_cond_eq(c); + if (r >= 0) { + return r; + } + + /* TSTNE x,x -> NE x,0 */ + if (is_tst_cond(c)) { + args[3] = args[2] = arg_new_constant(ctx, 0); + args[4] = tcg_tst_eqne_cond(c); + return -1; + } + } + + /* Expand to AND with a temporary if no backend support. */ + if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) { + TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3); + TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3); + TCGArg t1 = arg_new_temp(ctx); + TCGArg t2 = arg_new_temp(ctx); + + op1->args[0] = t1; + op1->args[1] = al; + op1->args[2] = bl; + op2->args[0] = t2; + op2->args[1] = ah; + op2->args[2] = bh; + + args[0] = t1; + args[1] = t2; + args[3] = args[2] = arg_new_constant(ctx, 0); + args[4] = tcg_tst_eqne_cond(c); + } + return -1; +} + static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) { for (int i = 0; i < nb_args; i++) { @@ -954,7 +1114,7 @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) /* If the binary operation has first argument @i, fold to @i. */ static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i) { - if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { + if (arg_is_const_val(op->args[1], i)) { return tcg_opt_gen_movi(ctx, op, op->args[0], i); } return false; @@ -963,7 +1123,7 @@ static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i) /* If the binary operation has first argument @i, fold to NOT. */ static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) { - if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { + if (arg_is_const_val(op->args[1], i)) { return fold_to_not(ctx, op, 2); } return false; @@ -972,7 +1132,7 @@ static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) /* If the binary operation has second argument @i, fold to @i. */ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) { - if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { + if (arg_is_const_val(op->args[2], i)) { return tcg_opt_gen_movi(ctx, op, op->args[0], i); } return false; @@ -981,7 +1141,7 @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) /* If the binary operation has second argument @i, fold to identity. */ static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i) { - if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { + if (arg_is_const_val(op->args[2], i)) { return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); } return false; @@ -990,7 +1150,7 @@ static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i) /* If the binary operation has second argument @i, fold to NOT. */ static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i) { - if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { + if (arg_is_const_val(op->args[2], i)) { return fold_to_not(ctx, op, 1); } return false; @@ -1182,14 +1342,8 @@ static bool fold_andc(OptContext *ctx, TCGOp *op) static bool fold_brcond(OptContext *ctx, TCGOp *op) { - TCGCond cond = op->args[2]; - int i; - - if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) { - op->args[2] = cond = tcg_swap_cond(cond); - } - - i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond); + int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0], + &op->args[1], &op->args[2]); if (i == 0) { tcg_op_remove(ctx->tcg, op); return true; @@ -1203,15 +1357,13 @@ static bool fold_brcond(OptContext *ctx, TCGOp *op) static bool fold_brcond2(OptContext *ctx, TCGOp *op) { - TCGCond cond = op->args[4]; - TCGArg label = op->args[5]; + TCGCond cond; + TCGArg label; int i, inv = 0; - if (swap_commutative2(&op->args[0], &op->args[2])) { - op->args[4] = cond = tcg_swap_cond(cond); - } - - i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond); + i = do_constant_folding_cond2(ctx, op, &op->args[0]); + cond = op->args[4]; + label = op->args[5]; if (i >= 0) { goto do_brcond_const; } @@ -1223,8 +1375,8 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) * Simplify LT/GE comparisons vs zero to a single compare * vs the high word of the input. */ - if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 && - arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) { + if (arg_is_const_val(op->args[2], 0) && + arg_is_const_val(op->args[3], 0)) { goto do_brcond_high; } break; @@ -1252,24 +1404,37 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) case 0: goto do_brcond_const; case 1: - op->opc = INDEX_op_brcond_i32; - op->args[1] = op->args[2]; - op->args[2] = cond; - op->args[3] = label; - break; + goto do_brcond_low; + } + break; + + case TCG_COND_TSTEQ: + case TCG_COND_TSTNE: + if (arg_is_const_val(op->args[2], 0)) { + goto do_brcond_high; + } + if (arg_is_const_val(op->args[3], 0)) { + goto do_brcond_low; } break; default: break; + do_brcond_low: + op->opc = INDEX_op_brcond_i32; + op->args[1] = op->args[2]; + op->args[2] = cond; + op->args[3] = label; + return fold_brcond(ctx, op); + do_brcond_high: op->opc = INDEX_op_brcond_i32; op->args[0] = op->args[1]; op->args[1] = op->args[3]; op->args[2] = cond; op->args[3] = label; - break; + return fold_brcond(ctx, op); do_brcond_const: if (i == 0) { @@ -1448,9 +1613,7 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) } /* Inserting a value into zero at offset 0. */ - if (arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == 0 - && op->args[3] == 0) { + if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) { uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]); op->opc = and_opc; @@ -1461,8 +1624,7 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) } /* Inserting zero into a value. */ - if (arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == 0) { + if (arg_is_const_val(op->args[2], 0)) { uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0); op->opc = and_opc; @@ -1687,21 +1849,18 @@ static bool fold_mov(OptContext *ctx, TCGOp *op) static bool fold_movcond(OptContext *ctx, TCGOp *op) { - TCGCond cond = op->args[5]; int i; - if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { - op->args[5] = cond = tcg_swap_cond(cond); - } /* * Canonicalize the "false" input reg to match the destination reg so * that the tcg backend can implement a "move if true" operation. */ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { - op->args[5] = cond = tcg_invert_cond(cond); + op->args[5] = tcg_invert_cond(op->args[5]); } - i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); + i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1], + &op->args[2], &op->args[5]); if (i >= 0) { return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); } @@ -1715,6 +1874,7 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op) uint64_t tv = arg_info(op->args[3])->val; uint64_t fv = arg_info(op->args[4])->val; TCGOpcode opc, negopc = 0; + TCGCond cond = op->args[5]; switch (ctx->type) { case TCG_TYPE_I32: @@ -1830,16 +1990,10 @@ static bool fold_nand(OptContext *ctx, TCGOp *op) return false; } -static bool fold_neg(OptContext *ctx, TCGOp *op) +static bool fold_neg_no_const(OptContext *ctx, TCGOp *op) { - uint64_t z_mask; - - if (fold_const1(ctx, op)) { - return true; - } - /* Set to 1 all bits to the left of the rightmost. */ - z_mask = arg_info(op->args[1])->z_mask; + uint64_t z_mask = arg_info(op->args[1])->z_mask; ctx->z_mask = -(z_mask & -z_mask); /* @@ -1850,6 +2004,11 @@ static bool fold_neg(OptContext *ctx, TCGOp *op) return true; } +static bool fold_neg(OptContext *ctx, TCGOp *op) +{ + return fold_const1(ctx, op) || fold_neg_no_const(ctx, op); +} + static bool fold_nor(OptContext *ctx, TCGOp *op) { if (fold_const2_commutative(ctx, op) || @@ -1940,19 +2099,108 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op) return false; } -static bool fold_setcond(OptContext *ctx, TCGOp *op) +static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg) { + TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc; + TCGOpcode uext_opc = 0, sext_opc = 0; TCGCond cond = op->args[3]; - int i; + TCGArg ret, src1, src2; + TCGOp *op2; + uint64_t val; + int sh; + bool inv; + + if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) { + return; + } + + src2 = op->args[2]; + val = arg_info(src2)->val; + if (!is_power_of_2(val)) { + return; + } + sh = ctz64(val); + + switch (ctx->type) { + case TCG_TYPE_I32: + and_opc = INDEX_op_and_i32; + sub_opc = INDEX_op_sub_i32; + xor_opc = INDEX_op_xor_i32; + shr_opc = INDEX_op_shr_i32; + neg_opc = INDEX_op_neg_i32; + if (TCG_TARGET_extract_i32_valid(sh, 1)) { + uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0; + sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0; + } + break; + case TCG_TYPE_I64: + and_opc = INDEX_op_and_i64; + sub_opc = INDEX_op_sub_i64; + xor_opc = INDEX_op_xor_i64; + shr_opc = INDEX_op_shr_i64; + neg_opc = INDEX_op_neg_i64; + if (TCG_TARGET_extract_i64_valid(sh, 1)) { + uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0; + sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0; + } + break; + default: + g_assert_not_reached(); + } + + ret = op->args[0]; + src1 = op->args[1]; + inv = cond == TCG_COND_TSTEQ; + + if (sh && sext_opc && neg && !inv) { + op->opc = sext_opc; + op->args[1] = src1; + op->args[2] = sh; + op->args[3] = 1; + return; + } else if (sh && uext_opc) { + op->opc = uext_opc; + op->args[1] = src1; + op->args[2] = sh; + op->args[3] = 1; + } else { + if (sh) { + op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3); + op2->args[0] = ret; + op2->args[1] = src1; + op2->args[2] = arg_new_constant(ctx, sh); + src1 = ret; + } + op->opc = and_opc; + op->args[1] = src1; + op->args[2] = arg_new_constant(ctx, 1); + } - if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { - op->args[3] = cond = tcg_swap_cond(cond); + if (neg && inv) { + op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3); + op2->args[0] = ret; + op2->args[1] = ret; + op2->args[2] = arg_new_constant(ctx, 1); + } else if (inv) { + op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3); + op2->args[0] = ret; + op2->args[1] = ret; + op2->args[2] = arg_new_constant(ctx, 1); + } else if (neg) { + op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2); + op2->args[0] = ret; + op2->args[1] = ret; } +} - i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); +static bool fold_setcond(OptContext *ctx, TCGOp *op) +{ + int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1], + &op->args[2], &op->args[3]); if (i >= 0) { return tcg_opt_gen_movi(ctx, op, op->args[0], i); } + fold_setcond_tst_pow2(ctx, op, false); ctx->z_mask = 1; ctx->s_mask = smask_from_zmask(1); @@ -1961,34 +2209,25 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op) static bool fold_negsetcond(OptContext *ctx, TCGOp *op) { - TCGCond cond = op->args[3]; - int i; - - if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { - op->args[3] = cond = tcg_swap_cond(cond); - } - - i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); + int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1], + &op->args[2], &op->args[3]); if (i >= 0) { return tcg_opt_gen_movi(ctx, op, op->args[0], -i); } + fold_setcond_tst_pow2(ctx, op, true); /* Value is {0,-1} so all bits are repetitions of the sign. */ ctx->s_mask = -1; return false; } - static bool fold_setcond2(OptContext *ctx, TCGOp *op) { - TCGCond cond = op->args[5]; + TCGCond cond; int i, inv = 0; - if (swap_commutative2(&op->args[1], &op->args[3])) { - op->args[5] = cond = tcg_swap_cond(cond); - } - - i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond); + i = do_constant_folding_cond2(ctx, op, &op->args[1]); + cond = op->args[5]; if (i >= 0) { goto do_setcond_const; } @@ -2000,8 +2239,8 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) * Simplify LT/GE comparisons vs zero to a single compare * vs the high word of the input. */ - if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 && - arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) { + if (arg_is_const_val(op->args[3], 0) && + arg_is_const_val(op->args[4], 0)) { goto do_setcond_high; } break; @@ -2029,22 +2268,35 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) case 0: goto do_setcond_const; case 1: - op->args[2] = op->args[3]; - op->args[3] = cond; - op->opc = INDEX_op_setcond_i32; - break; + goto do_setcond_low; + } + break; + + case TCG_COND_TSTEQ: + case TCG_COND_TSTNE: + if (arg_is_const_val(op->args[2], 0)) { + goto do_setcond_high; + } + if (arg_is_const_val(op->args[4], 0)) { + goto do_setcond_low; } break; default: break; + do_setcond_low: + op->args[2] = op->args[3]; + op->args[3] = cond; + op->opc = INDEX_op_setcond_i32; + return fold_setcond(ctx, op); + do_setcond_high: op->args[1] = op->args[2]; op->args[2] = op->args[4]; op->args[3] = cond; op->opc = INDEX_op_setcond_i32; - break; + return fold_setcond(ctx, op); } ctx->z_mask = 1; @@ -2123,7 +2375,7 @@ static bool fold_shift(OptContext *ctx, TCGOp *op) * will not reduced the number of input sign repetitions. */ sign = (s_mask & -s_mask) >> 1; - if (!(z_mask & sign)) { + if (sign && !(z_mask & sign)) { ctx->s_mask = s_mask; } break; @@ -2165,7 +2417,7 @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) if (have_neg) { op->opc = neg_op; op->args[1] = op->args[2]; - return fold_neg(ctx, op); + return fold_neg_no_const(ctx, op); } return false; } diff --git a/accel/tcg/perf.c b/tcg/perf.c similarity index 97% rename from accel/tcg/perf.c rename to tcg/perf.c index cd1aa99a7ee..412a987d956 100644 --- a/accel/tcg/perf.c +++ b/tcg/perf.c @@ -10,13 +10,13 @@ #include "qemu/osdep.h" #include "elf.h" -#include "exec/exec-all.h" +#include "exec/target_page.h" +#include "exec/translation-block.h" #include "qemu/timer.h" +#include "tcg/debuginfo.h" +#include "tcg/perf.h" #include "tcg/tcg.h" -#include "debuginfo.h" -#include "perf.h" - static FILE *safe_fopen_w(const char *path) { int saved_errno; @@ -335,11 +335,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb, /* FIXME: This replicates the restore_state_to_opc() logic. */ q[insn].address = gen_insn_data[insn * start_words + 0]; if (tb_cflags(tb) & CF_PCREL) { - q[insn].address |= (guest_pc & TARGET_PAGE_MASK); - } else { -#if defined(TARGET_I386) - q[insn].address -= tb->cs_base; -#endif + q[insn].address |= (guest_pc & qemu_target_page_mask()); } q[insn].flags = DEBUGINFO_SYMBOL | (jitdump ? DEBUGINFO_LINE : 0); } diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h index cb47b29452e..9f99bde505b 100644 --- a/tcg/ppc/tcg-target-con-set.h +++ b/tcg/ppc/tcg-target-con-set.h @@ -11,7 +11,7 @@ */ C_O0_I1(r) C_O0_I2(r, r) -C_O0_I2(r, ri) +C_O0_I2(r, rC) C_O0_I2(v, r) C_O0_I3(r, r, r) C_O0_I3(o, m, r) @@ -26,13 +26,14 @@ C_O1_I2(r, rI, ri) C_O1_I2(r, rI, rT) C_O1_I2(r, r, r) C_O1_I2(r, r, ri) +C_O1_I2(r, r, rC) C_O1_I2(r, r, rI) C_O1_I2(r, r, rT) C_O1_I2(r, r, rU) C_O1_I2(r, r, rZW) C_O1_I2(v, v, v) C_O1_I3(v, v, v, v) -C_O1_I4(r, r, ri, rZ, rZ) +C_O1_I4(r, r, rC, rZ, rZ) C_O1_I4(r, r, r, ri, ri) C_O2_I1(r, r, r) C_N1O1_I1(o, m, r) diff --git a/tcg/ppc/tcg-target-con-str.h b/tcg/ppc/tcg-target-con-str.h index 20846901de9..16b687216e0 100644 --- a/tcg/ppc/tcg-target-con-str.h +++ b/tcg/ppc/tcg-target-con-str.h @@ -16,6 +16,7 @@ REGS('v', ALL_VECTOR_REGS) * Define constraint letters for constants: * CONST(letter, TCG_CT_CONST_* bit set) */ +CONST('C', TCG_CT_CONST_CMP) CONST('I', TCG_CT_CONST_S16) CONST('M', TCG_CT_CONST_MONE) CONST('T', TCG_CT_CONST_S32) diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 54816967bca..7f3829beeb8 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -92,11 +92,13 @@ #define SZR (TCG_TARGET_REG_BITS / 8) #define TCG_CT_CONST_S16 0x100 +#define TCG_CT_CONST_U16 0x200 #define TCG_CT_CONST_S32 0x400 #define TCG_CT_CONST_U32 0x800 #define TCG_CT_CONST_ZERO 0x1000 #define TCG_CT_CONST_MONE 0x2000 #define TCG_CT_CONST_WSZ 0x4000 +#define TCG_CT_CONST_CMP 0x8000 #define ALL_GENERAL_REGS 0xffffffffu #define ALL_VECTOR_REGS 0xffffffff00000000ull @@ -281,31 +283,78 @@ static bool reloc_pc34(tcg_insn_unit *src_rw, const tcg_insn_unit *target) return false; } +static bool mask_operand(uint32_t c, int *mb, int *me); +static bool mask64_operand(uint64_t c, int *mb, int *me); + /* test if a constant matches the constraint */ -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) +static bool tcg_target_const_match(int64_t sval, int ct, + TCGType type, TCGCond cond, int vece) { + uint64_t uval = sval; + int mb, me; + if (ct & TCG_CT_CONST) { return 1; } - /* The only 32-bit constraint we use aside from - TCG_CT_CONST is TCG_CT_CONST_S16. */ if (type == TCG_TYPE_I32) { - val = (int32_t)val; + uval = (uint32_t)sval; + sval = (int32_t)sval; + } + + if (ct & TCG_CT_CONST_CMP) { + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + ct |= TCG_CT_CONST_S16 | TCG_CT_CONST_U16; + break; + case TCG_COND_LT: + case TCG_COND_GE: + case TCG_COND_LE: + case TCG_COND_GT: + ct |= TCG_CT_CONST_S16; + break; + case TCG_COND_LTU: + case TCG_COND_GEU: + case TCG_COND_LEU: + case TCG_COND_GTU: + ct |= TCG_CT_CONST_U16; + break; + case TCG_COND_TSTEQ: + case TCG_COND_TSTNE: + if ((uval & ~0xffff) == 0 || (uval & ~0xffff0000ull) == 0) { + return 1; + } + if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32 + ? mask_operand(uval, &mb, &me) + : mask64_operand(uval << clz64(uval), &mb, &me)) { + return 1; + } + return 0; + default: + g_assert_not_reached(); + } } - if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { + if ((ct & TCG_CT_CONST_S16) && sval == (int16_t)sval) { + return 1; + } + if ((ct & TCG_CT_CONST_U16) && uval == (uint16_t)uval) { return 1; - } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { + } + if ((ct & TCG_CT_CONST_S32) && sval == (int32_t)sval) { return 1; - } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { + } + if ((ct & TCG_CT_CONST_U32) && uval == (uint32_t)uval) { return 1; - } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + } + if ((ct & TCG_CT_CONST_ZERO) && sval == 0) { return 1; - } else if ((ct & TCG_CT_CONST_MONE) && val == -1) { + } + if ((ct & TCG_CT_CONST_MONE) && sval == -1) { return 1; - } else if ((ct & TCG_CT_CONST_WSZ) - && val == (type == TCG_TYPE_I32 ? 32 : 64)) { + } + if ((ct & TCG_CT_CONST_WSZ) && sval == (type == TCG_TYPE_I32 ? 32 : 64)) { return 1; } return 0; @@ -669,31 +718,35 @@ enum { CR_SO }; -static const uint32_t tcg_to_bc[] = { - [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE, - [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE, - [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE, - [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE, - [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE, - [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE, - [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE, - [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE, - [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE, - [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE, +static const uint32_t tcg_to_bc[16] = { + [TCG_COND_EQ] = BC | BI(0, CR_EQ) | BO_COND_TRUE, + [TCG_COND_NE] = BC | BI(0, CR_EQ) | BO_COND_FALSE, + [TCG_COND_TSTEQ] = BC | BI(0, CR_EQ) | BO_COND_TRUE, + [TCG_COND_TSTNE] = BC | BI(0, CR_EQ) | BO_COND_FALSE, + [TCG_COND_LT] = BC | BI(0, CR_LT) | BO_COND_TRUE, + [TCG_COND_GE] = BC | BI(0, CR_LT) | BO_COND_FALSE, + [TCG_COND_LE] = BC | BI(0, CR_GT) | BO_COND_FALSE, + [TCG_COND_GT] = BC | BI(0, CR_GT) | BO_COND_TRUE, + [TCG_COND_LTU] = BC | BI(0, CR_LT) | BO_COND_TRUE, + [TCG_COND_GEU] = BC | BI(0, CR_LT) | BO_COND_FALSE, + [TCG_COND_LEU] = BC | BI(0, CR_GT) | BO_COND_FALSE, + [TCG_COND_GTU] = BC | BI(0, CR_GT) | BO_COND_TRUE, }; /* The low bit here is set if the RA and RB fields must be inverted. */ -static const uint32_t tcg_to_isel[] = { - [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ), - [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1, - [TCG_COND_LT] = ISEL | BC_(7, CR_LT), - [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1, - [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1, - [TCG_COND_GT] = ISEL | BC_(7, CR_GT), - [TCG_COND_LTU] = ISEL | BC_(7, CR_LT), - [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1, - [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1, - [TCG_COND_GTU] = ISEL | BC_(7, CR_GT), +static const uint32_t tcg_to_isel[16] = { + [TCG_COND_EQ] = ISEL | BC_(0, CR_EQ), + [TCG_COND_NE] = ISEL | BC_(0, CR_EQ) | 1, + [TCG_COND_TSTEQ] = ISEL | BC_(0, CR_EQ), + [TCG_COND_TSTNE] = ISEL | BC_(0, CR_EQ) | 1, + [TCG_COND_LT] = ISEL | BC_(0, CR_LT), + [TCG_COND_GE] = ISEL | BC_(0, CR_LT) | 1, + [TCG_COND_LE] = ISEL | BC_(0, CR_GT) | 1, + [TCG_COND_GT] = ISEL | BC_(0, CR_GT), + [TCG_COND_LTU] = ISEL | BC_(0, CR_LT), + [TCG_COND_GEU] = ISEL | BC_(0, CR_LT) | 1, + [TCG_COND_LEU] = ISEL | BC_(0, CR_GT) | 1, + [TCG_COND_GTU] = ISEL | BC_(0, CR_GT), }; static bool patch_reloc(tcg_insn_unit *code_ptr, int type, @@ -838,19 +891,31 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) return true; } -static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, - int sh, int mb) +static void tcg_out_rld_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs, + int sh, int mb, bool rc) { tcg_debug_assert(TCG_TARGET_REG_BITS == 64); sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1); mb = MB64((mb >> 5) | ((mb << 1) & 0x3f)); - tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb); + tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb | rc); +} + +static void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, + int sh, int mb) +{ + tcg_out_rld_rc(s, op, ra, rs, sh, mb, false); +} + +static void tcg_out_rlw_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs, + int sh, int mb, int me, bool rc) +{ + tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me) | rc); } -static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, - int sh, int mb, int me) +static void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, + int sh, int mb, int me) { - tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me)); + tcg_out_rlw_rc(s, op, ra, rs, sh, mb, me, false); } static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) @@ -1668,6 +1733,50 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, return false; } +/* + * Set dest non-zero if and only if (arg1 & arg2) is non-zero. + * If RC, then also set RC0. + */ +static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2, + bool const_arg2, TCGType type, bool rc) +{ + int mb, me; + + if (!const_arg2) { + tcg_out32(s, AND | SAB(arg1, dest, arg2) | rc); + return; + } + + if (type == TCG_TYPE_I32) { + arg2 = (uint32_t)arg2; + } else if (arg2 == (uint32_t)arg2) { + type = TCG_TYPE_I32; + } + + if ((arg2 & ~0xffff) == 0) { + tcg_out32(s, ANDI | SAI(arg1, dest, arg2)); + return; + } + if ((arg2 & ~0xffff0000ull) == 0) { + tcg_out32(s, ANDIS | SAI(arg1, dest, arg2 >> 16)); + return; + } + if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { + if (mask_operand(arg2, &mb, &me)) { + tcg_out_rlw_rc(s, RLWINM, dest, arg1, 0, mb, me, rc); + return; + } + } else { + int sh = clz64(arg2); + if (mask64_operand(arg2 << sh, &mb, &me)) { + tcg_out_rld_rc(s, RLDICR, dest, arg1, sh, me, rc); + return; + } + } + /* Constraints should satisfy this. */ + g_assert_not_reached(); +} + static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, int const_arg2, int cr, TCGType type) { @@ -1676,7 +1785,10 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - /* Simplify the comparisons below wrt CMPI. */ + /* + * Simplify the comparisons below wrt CMPI. + * All of the tests are 16-bit, so a 32-bit sign extend always works. + */ if (type == TCG_TYPE_I32) { arg2 = (int32_t)arg2; } @@ -1699,6 +1811,12 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, imm = 0; break; + case TCG_COND_TSTEQ: + case TCG_COND_TSTNE: + tcg_debug_assert(cr == 0); + tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, true); + return; + case TCG_COND_LT: case TCG_COND_GE: case TCG_COND_LE: @@ -1826,7 +1944,7 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, if (have_isa_3_10) { tcg_insn_unit bi, opc; - tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); + tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type); /* Re-use tcg_to_bc for BI and BO_COND_{TRUE,FALSE}. */ bi = tcg_to_bc[cond] & (0x1f << 16); @@ -1879,7 +1997,7 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, if (have_isel) { int isel, tab; - tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); + tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type); isel = tcg_to_isel[cond]; @@ -1909,6 +2027,16 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, tcg_out_setcond_ne0(s, type, arg0, arg1, neg); break; + case TCG_COND_TSTEQ: + tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, false); + tcg_out_setcond_eq0(s, type, arg0, TCG_REG_R0, neg); + break; + + case TCG_COND_TSTNE: + tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, false); + tcg_out_setcond_ne0(s, type, arg0, TCG_REG_R0, neg); + break; + case TCG_COND_LE: case TCG_COND_LEU: inv = true; @@ -1945,22 +2073,28 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, } } -static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l) +static void tcg_out_bc(TCGContext *s, TCGCond cond, int bd) +{ + tcg_out32(s, tcg_to_bc[cond] | bd); +} + +static void tcg_out_bc_lab(TCGContext *s, TCGCond cond, TCGLabel *l) { + int bd = 0; if (l->has_value) { - bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr); + bd = reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr); } else { tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0); } - tcg_out32(s, bc); + tcg_out_bc(s, cond, bd); } static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGArg arg1, TCGArg arg2, int const_arg2, TCGLabel *l, TCGType type) { - tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); - tcg_out_bc(s, tcg_to_bc[cond], l); + tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type); + tcg_out_bc_lab(s, cond, l); } static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, @@ -1973,7 +2107,7 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, return; } - tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type); + tcg_out_cmp(s, cond, c1, c2, const_c2, 0, type); if (have_isel) { int isel = tcg_to_isel[cond]; @@ -2002,7 +2136,7 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, } } /* Branch forward over one insn */ - tcg_out32(s, tcg_to_bc[cond] | 8); + tcg_out_bc(s, cond, 8); if (v2 == 0) { tcg_out_movi(s, type, dest, 0); } else { @@ -2017,17 +2151,17 @@ static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc, if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) { tcg_out32(s, opc | RA(a0) | RS(a1)); } else { - tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type); + tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 0, type); /* Note that the only other valid constant for a2 is 0. */ if (have_isel) { tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1)); tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0)); } else if (!const_a2 && a0 == a2) { - tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8); + tcg_out_bc(s, TCG_COND_EQ, 8); tcg_out32(s, opc | RA(a0) | RS(a1)); } else { tcg_out32(s, opc | RA(a0) | RS(a1)); - tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8); + tcg_out_bc(s, TCG_COND_NE, 8); if (const_a2) { tcg_out_movi(s, type, a0, 0); } else { @@ -2072,7 +2206,22 @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, do_equality: tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32); tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32); - tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); + tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); + break; + + case TCG_COND_TSTEQ: + case TCG_COND_TSTNE: + if (blconst) { + tcg_out_andi32(s, TCG_REG_R0, al, bl); + } else { + tcg_out32(s, AND | SAB(al, TCG_REG_R0, bl)); + } + if (bhconst) { + tcg_out_andi32(s, TCG_REG_TMP1, ah, bh); + } else { + tcg_out32(s, AND | SAB(ah, TCG_REG_TMP1, bh)); + } + tcg_out32(s, OR | SAB(TCG_REG_R0, TCG_REG_R0, TCG_REG_TMP1) | 1); break; case TCG_COND_LT: @@ -2090,8 +2239,8 @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32); tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32); - tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2)); - tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ)); + tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2)); + tcg_out32(s, CROR | BT(0, CR_EQ) | BA(6, bit1) | BB(0, CR_EQ)); break; default: @@ -2103,15 +2252,15 @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, const int *const_args) { tcg_out_cmp2(s, args + 1, const_args + 1); - tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7)); - tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31); + tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(0)); + tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31); } -static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args, - const int *const_args) +static void tcg_out_brcond2(TCGContext *s, const TCGArg *args, + const int *const_args) { tcg_out_cmp2(s, args, const_args); - tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5])); + tcg_out_bc_lab(s, TCG_COND_EQ, arg_label(args[5])); } static void tcg_out_mb(TCGContext *s, TCGArg a0) @@ -2435,17 +2584,17 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, 0, 6, TCG_TYPE_I32); - /* Combine comparisons into cr7. */ - tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); + /* Combine comparisons into cr0. */ + tcg_out32(s, CRAND | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); } else { - /* Full comparison into cr7. */ + /* Full comparison into cr0. */ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, - 0, 7, addr_type); + 0, 0, addr_type); } /* Load a pointer into the current opcode w/conditional branch-link. */ ldst->label_ptr[0] = s->code_ptr; - tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); + tcg_out_bc(s, TCG_COND_NE, LK); h->base = TCG_REG_TMP1; } else { @@ -3979,8 +4128,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_sar_i32: case INDEX_op_rotl_i32: case INDEX_op_rotr_i32: - case INDEX_op_setcond_i32: - case INDEX_op_negsetcond_i32: case INDEX_op_and_i64: case INDEX_op_andc_i64: case INDEX_op_shl_i64: @@ -3988,8 +4135,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_sar_i64: case INDEX_op_rotl_i64: case INDEX_op_rotr_i64: - case INDEX_op_setcond_i64: - case INDEX_op_negsetcond_i64: return C_O1_I2(r, r, ri); case INDEX_op_mul_i32: @@ -4033,11 +4178,16 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: - return C_O0_I2(r, ri); - + return C_O0_I2(r, rC); + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + case INDEX_op_negsetcond_i32: + case INDEX_op_negsetcond_i64: + return C_O1_I2(r, r, rC); case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: - return C_O1_I4(r, r, ri, rZ, rZ); + return C_O1_I4(r, r, rC, rZ, rZ); + case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: return C_O1_I2(r, 0, rZ); diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 5295e4f9abd..04a7aba4d3a 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -143,6 +143,8 @@ typedef enum { #define TCG_TARGET_HAS_qemu_ldst_i128 \ (TCG_TARGET_REG_BITS == 64 && have_isa_2_07) +#define TCG_TARGET_HAS_tst 1 + /* * While technically Altivec could support V64, it has no 64-bit store * instruction and substituting two 32-bit stores makes the generated diff --git a/tcg/region.c b/tcg/region.c index 86692455c00..478ec051c4b 100644 --- a/tcg/region.c +++ b/tcg/region.c @@ -584,7 +584,9 @@ static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) buf_rx = mmap(NULL, size, host_prot_read_exec(), MAP_SHARED, fd, 0); if (buf_rx == MAP_FAILED) { - goto fail_rx; + error_setg_errno(errp, errno, + "failed to map shared memory for execute"); + goto fail; } close(fd); @@ -594,12 +596,8 @@ static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) return PROT_READ | PROT_WRITE; - fail_rx: - error_setg_errno(errp, errno, "failed to map shared memory for execute"); fail: - if (buf_rx != MAP_FAILED) { - munmap(buf_rx, size); - } + /* buf_rx is always equal to MAP_FAILED here and does not require cleanup */ if (buf_rw) { munmap(buf_rw, size); } diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 34e10e77d98..639363039b1 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -145,7 +145,8 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define sextreg sextract64 /* test if a constant matches the constraint */ -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) { if (ct & TCG_CT_CONST) { return 1; diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index a4edc3dc74f..2c1b680b934 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -158,6 +158,8 @@ extern bool have_zbb; #define TCG_TARGET_HAS_qemu_ldst_i128 0 +#define TCG_TARGET_HAS_tst 0 + #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_NEED_LDST_LABELS diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h index 9a420374999..f75955eaa87 100644 --- a/tcg/s390x/tcg-target-con-set.h +++ b/tcg/s390x/tcg-target-con-set.h @@ -15,7 +15,7 @@ C_O0_I1(r) C_O0_I2(r, r) C_O0_I2(r, ri) -C_O0_I2(r, rA) +C_O0_I2(r, rC) C_O0_I2(v, r) C_O0_I3(o, m, r) C_O1_I1(r, r) @@ -27,7 +27,7 @@ C_O1_I2(r, 0, rI) C_O1_I2(r, 0, rJ) C_O1_I2(r, r, r) C_O1_I2(r, r, ri) -C_O1_I2(r, r, rA) +C_O1_I2(r, r, rC) C_O1_I2(r, r, rI) C_O1_I2(r, r, rJ) C_O1_I2(r, r, rK) @@ -39,10 +39,10 @@ C_O1_I2(v, v, r) C_O1_I2(v, v, v) C_O1_I3(v, v, v, v) C_O1_I4(r, r, ri, rI, r) -C_O1_I4(r, r, rA, rI, r) +C_O1_I4(r, r, rC, rI, r) C_O2_I1(o, m, r) C_O2_I2(o, m, 0, r) C_O2_I2(o, m, r, r) C_O2_I3(o, m, 0, 1, r) C_N1_O1_I4(r, r, 0, 1, ri, r) -C_N1_O1_I4(r, r, 0, 1, rA, r) +C_N1_O1_I4(r, r, 0, 1, rJU, r) diff --git a/tcg/s390x/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h index 25675b449e3..745f6c0df51 100644 --- a/tcg/s390x/tcg-target-con-str.h +++ b/tcg/s390x/tcg-target-con-str.h @@ -16,10 +16,11 @@ REGS('o', 0xaaaa) /* odd numbered general regs */ * Define constraint letters for constants: * CONST(letter, TCG_CT_CONST_* bit set) */ -CONST('A', TCG_CT_CONST_S33) +CONST('C', TCG_CT_CONST_CMP) CONST('I', TCG_CT_CONST_S16) CONST('J', TCG_CT_CONST_S32) CONST('K', TCG_CT_CONST_P32) CONST('N', TCG_CT_CONST_INV) CONST('R', TCG_CT_CONST_INVRISBG) +CONST('U', TCG_CT_CONST_U32) CONST('Z', TCG_CT_CONST_ZERO) diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index 7f6b84aa2ce..ad587325fc8 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -30,11 +30,12 @@ #define TCG_CT_CONST_S16 (1 << 8) #define TCG_CT_CONST_S32 (1 << 9) -#define TCG_CT_CONST_S33 (1 << 10) +#define TCG_CT_CONST_U32 (1 << 10) #define TCG_CT_CONST_ZERO (1 << 11) #define TCG_CT_CONST_P32 (1 << 12) #define TCG_CT_CONST_INV (1 << 13) #define TCG_CT_CONST_INVRISBG (1 << 14) +#define TCG_CT_CONST_CMP (1 << 15) #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16) #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) @@ -111,6 +112,9 @@ typedef enum S390Opcode { RI_OILH = 0xa50a, RI_OILL = 0xa50b, RI_TMLL = 0xa701, + RI_TMLH = 0xa700, + RI_TMHL = 0xa703, + RI_TMHH = 0xa702, RIEb_CGRJ = 0xec64, RIEb_CLGRJ = 0xec65, @@ -403,10 +407,15 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define S390_CC_NEVER 0 #define S390_CC_ALWAYS 15 +#define S390_TM_EQ 8 /* CC == 0 */ +#define S390_TM_NE 7 /* CC in {1,2,3} */ + /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */ -static const uint8_t tcg_cond_to_s390_cond[] = { +static const uint8_t tcg_cond_to_s390_cond[16] = { [TCG_COND_EQ] = S390_CC_EQ, [TCG_COND_NE] = S390_CC_NE, + [TCG_COND_TSTEQ] = S390_CC_EQ, + [TCG_COND_TSTNE] = S390_CC_NE, [TCG_COND_LT] = S390_CC_LT, [TCG_COND_LE] = S390_CC_LE, [TCG_COND_GT] = S390_CC_GT, @@ -420,9 +429,11 @@ static const uint8_t tcg_cond_to_s390_cond[] = { /* Condition codes that result from a LOAD AND TEST. Here, we have no unsigned instruction variation, however since the test is vs zero we can re-map the outcomes appropriately. */ -static const uint8_t tcg_cond_to_ltr_cond[] = { +static const uint8_t tcg_cond_to_ltr_cond[16] = { [TCG_COND_EQ] = S390_CC_EQ, [TCG_COND_NE] = S390_CC_NE, + [TCG_COND_TSTEQ] = S390_CC_ALWAYS, + [TCG_COND_TSTNE] = S390_CC_NEVER, [TCG_COND_LT] = S390_CC_LT, [TCG_COND_LE] = S390_CC_LE, [TCG_COND_GT] = S390_CC_GT, @@ -538,42 +549,74 @@ static bool risbg_mask(uint64_t c) } /* Test if a constant matches the constraint. */ -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) { + uint64_t uval = val; + if (ct & TCG_CT_CONST) { - return 1; + return true; } - if (type == TCG_TYPE_I32) { + uval = (uint32_t)val; val = (int32_t)val; } - /* The following are mutually exclusive. */ - if (ct & TCG_CT_CONST_S16) { - return val == (int16_t)val; - } else if (ct & TCG_CT_CONST_S32) { - return val == (int32_t)val; - } else if (ct & TCG_CT_CONST_S33) { - return val >= -0xffffffffll && val <= 0xffffffffll; - } else if (ct & TCG_CT_CONST_ZERO) { - return val == 0; + if (ct & TCG_CT_CONST_CMP) { + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + ct |= TCG_CT_CONST_S32 | TCG_CT_CONST_U32; /* CGFI or CLGFI */ + break; + case TCG_COND_LT: + case TCG_COND_GE: + case TCG_COND_LE: + case TCG_COND_GT: + ct |= TCG_CT_CONST_S32; /* CGFI */ + break; + case TCG_COND_LTU: + case TCG_COND_GEU: + case TCG_COND_LEU: + case TCG_COND_GTU: + ct |= TCG_CT_CONST_U32; /* CLGFI */ + break; + case TCG_COND_TSTNE: + case TCG_COND_TSTEQ: + if (is_const_p16(uval) >= 0) { + return true; /* TMxx */ + } + if (risbg_mask(uval)) { + return true; /* RISBG */ + } + break; + default: + g_assert_not_reached(); + } + } + + if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { + return true; + } + if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { + return true; + } + if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { + return true; + } + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return true; } if (ct & TCG_CT_CONST_INV) { val = ~val; } - /* - * Note that is_const_p16 is a subset of is_const_p32, - * so we don't need both constraints. - */ if ((ct & TCG_CT_CONST_P32) && is_const_p32(val) >= 0) { return true; } if ((ct & TCG_CT_CONST_INVRISBG) && risbg_mask(~val)) { return true; } - - return 0; + return false; } /* Emit instructions according to the given instruction format. */ @@ -843,6 +886,9 @@ static const S390Opcode oi_insns[4] = { static const S390Opcode lif_insns[2] = { RIL_LLILF, RIL_LLIHF, }; +static const S390Opcode tm_insns[4] = { + RI_TMLL, RI_TMLH, RI_TMHL, RI_TMHH +}; /* load a register with an immediate value */ static void tcg_out_movi(TCGContext *s, TCGType type, @@ -1203,6 +1249,36 @@ static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, TCGCond inv_c = tcg_invert_cond(c); S390Opcode op; + if (is_tst_cond(c)) { + tcg_debug_assert(!need_carry); + + if (!c2const) { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, NRK, TCG_REG_R0, r1, c2); + } else { + tcg_out_insn(s, RRFa, NGRK, TCG_REG_R0, r1, c2); + } + goto exit; + } + + if (type == TCG_TYPE_I32) { + c2 = (uint32_t)c2; + } + + int i = is_const_p16(c2); + if (i >= 0) { + tcg_out_insn_RI(s, tm_insns[i], r1, c2 >> (i * 16)); + *inv_cc = c == TCG_COND_TSTEQ ? S390_TM_NE : S390_TM_EQ; + return *inv_cc ^ 15; + } + + if (risbg_mask(c2)) { + tgen_andi_risbg(s, TCG_REG_R0, r1, c2); + goto exit; + } + g_assert_not_reached(); + } + if (c2const) { if (c2 == 0) { if (!(is_unsigned && need_carry)) { @@ -1228,22 +1304,34 @@ static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, goto exit; } - /* - * Constraints are for a signed 33-bit operand, which is a - * convenient superset of this signed/unsigned test. - */ - if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) { - op = (is_unsigned ? RIL_CLGFI : RIL_CGFI); - tcg_out_insn_RIL(s, op, r1, c2); - goto exit; + /* Should match TCG_CT_CONST_CMP. */ + switch (c) { + case TCG_COND_LT: + case TCG_COND_GE: + case TCG_COND_LE: + case TCG_COND_GT: + tcg_debug_assert(c2 == (int32_t)c2); + op = RIL_CGFI; + break; + case TCG_COND_EQ: + case TCG_COND_NE: + if (c2 == (int32_t)c2) { + op = RIL_CGFI; + break; + } + /* fall through */ + case TCG_COND_LTU: + case TCG_COND_GEU: + case TCG_COND_LEU: + case TCG_COND_GTU: + tcg_debug_assert(c2 == (uint32_t)c2); + op = RIL_CLGFI; + break; + default: + g_assert_not_reached(); } - - /* Load everything else into a register. */ - tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, c2); - c2 = TCG_TMP0; - } - - if (type == TCG_TYPE_I32) { + tcg_out_insn_RIL(s, op, r1, c2); + } else if (type == TCG_TYPE_I32) { op = (is_unsigned ? RR_CLR : RR_CR); tcg_out_insn_RR(s, op, r1, c2); } else { @@ -1516,46 +1604,49 @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, TCGArg c2, int c2const, TCGLabel *l) { int cc; - bool is_unsigned = is_unsigned_cond(c); - bool in_range; - S390Opcode opc; - cc = tcg_cond_to_s390_cond[c]; + if (!is_tst_cond(c)) { + bool is_unsigned = is_unsigned_cond(c); + bool in_range; + S390Opcode opc; - if (!c2const) { - opc = (type == TCG_TYPE_I32 - ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ) - : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ)); - tgen_compare_branch(s, opc, cc, r1, c2, l); - return; - } + cc = tcg_cond_to_s390_cond[c]; - /* - * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field. - * If the immediate we've been given does not fit that range, we'll - * fall back to separate compare and branch instructions using the - * larger comparison range afforded by COMPARE IMMEDIATE. - */ - if (type == TCG_TYPE_I32) { - if (is_unsigned) { - opc = RIEc_CLIJ; - in_range = (uint32_t)c2 == (uint8_t)c2; - } else { - opc = RIEc_CIJ; - in_range = (int32_t)c2 == (int8_t)c2; + if (!c2const) { + opc = (type == TCG_TYPE_I32 + ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ) + : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ)); + tgen_compare_branch(s, opc, cc, r1, c2, l); + return; } - } else { - if (is_unsigned) { - opc = RIEc_CLGIJ; - in_range = (uint64_t)c2 == (uint8_t)c2; + + /* + * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field. + * If the immediate we've been given does not fit that range, we'll + * fall back to separate compare and branch instructions using the + * larger comparison range afforded by COMPARE IMMEDIATE. + */ + if (type == TCG_TYPE_I32) { + if (is_unsigned) { + opc = RIEc_CLIJ; + in_range = (uint32_t)c2 == (uint8_t)c2; + } else { + opc = RIEc_CIJ; + in_range = (int32_t)c2 == (int8_t)c2; + } } else { - opc = RIEc_CGIJ; - in_range = (int64_t)c2 == (int8_t)c2; + if (is_unsigned) { + opc = RIEc_CLGIJ; + in_range = (uint64_t)c2 == (uint8_t)c2; + } else { + opc = RIEc_CGIJ; + in_range = (int64_t)c2 == (int8_t)c2; + } + } + if (in_range) { + tgen_compare_imm_branch(s, opc, cc, r1, c2, l); + return; } - } - if (in_range) { - tgen_compare_imm_branch(s, opc, cc, r1, c2, l); - return; } cc = tgen_cmp(s, type, c, r1, c2, c2const, false); @@ -1834,11 +1925,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->oi = oi; ldst->addrlo_reg = addr_reg; - /* We are expecting a_bits to max out at 7, much lower than TMLL. */ tcg_debug_assert(a_mask <= 0xffff); tcg_out_insn(s, RI, TMLL, addr_reg, a_mask); - tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */ + tcg_out16(s, RI_BRC | (S390_TM_NE << 4)); ldst->label_ptr[0] = s->code_ptr++; } @@ -1919,7 +2009,7 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, l2 = gen_new_label(); tcg_out_insn(s, RI, TMLL, addr_reg, 15); - tgen_branch(s, 7, l1); /* CC in {1,2,3} */ + tgen_branch(s, S390_TM_NE, l1); } tcg_debug_assert(!need_bswap); @@ -3136,7 +3226,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) return C_O1_I2(r, r, ri); case INDEX_op_setcond_i64: case INDEX_op_negsetcond_i64: - return C_O1_I2(r, r, rA); + return C_O1_I2(r, r, rC); case INDEX_op_clz_i64: return C_O1_I2(r, r, rI); @@ -3186,7 +3276,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_brcond_i32: return C_O0_I2(r, ri); case INDEX_op_brcond_i64: - return C_O0_I2(r, rA); + return C_O0_I2(r, rC); case INDEX_op_bswap16_i32: case INDEX_op_bswap16_i64: @@ -3239,7 +3329,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_movcond_i32: return C_O1_I4(r, r, ri, rI, r); case INDEX_op_movcond_i64: - return C_O1_I4(r, r, rA, rI, r); + return C_O1_I4(r, r, rC, rI, r); case INDEX_op_div2_i32: case INDEX_op_div2_i64: @@ -3258,7 +3348,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_add2_i64: case INDEX_op_sub2_i64: - return C_N1_O1_I4(r, r, 0, 1, rA, r); + return C_N1_O1_I4(r, r, 0, 1, rJU, r); case INDEX_op_st_vec: return C_O0_I2(v, r); diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h index e69b0d2dddb..ae448c3a3a3 100644 --- a/tcg/s390x/tcg-target.h +++ b/tcg/s390x/tcg-target.h @@ -138,6 +138,8 @@ extern uint64_t s390_facilities[3]; #define TCG_TARGET_HAS_qemu_ldst_i128 1 +#define TCG_TARGET_HAS_tst 1 + #define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR) #define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR) #define TCG_TARGET_HAS_v256 0 diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index a91defd0ac1..176c98740bc 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -322,7 +322,8 @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type, } /* test if a constant matches the constraint */ -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) { if (ct & TCG_CT_CONST) { return 1; @@ -606,9 +607,11 @@ static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1, uns ? ARITH_UDIV : ARITH_SDIV); } -static const uint8_t tcg_cond_to_bcond[] = { +static const uint8_t tcg_cond_to_bcond[16] = { [TCG_COND_EQ] = COND_E, [TCG_COND_NE] = COND_NE, + [TCG_COND_TSTEQ] = COND_E, + [TCG_COND_TSTNE] = COND_NE, [TCG_COND_LT] = COND_L, [TCG_COND_GE] = COND_GE, [TCG_COND_LE] = COND_LE, @@ -619,7 +622,7 @@ static const uint8_t tcg_cond_to_bcond[] = { [TCG_COND_GTU] = COND_GU, }; -static const uint8_t tcg_cond_to_rcond[] = { +static const uint8_t tcg_cond_to_rcond[16] = { [TCG_COND_EQ] = RCOND_Z, [TCG_COND_NE] = RCOND_NZ, [TCG_COND_LT] = RCOND_LZ, @@ -645,15 +648,17 @@ static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l) tcg_out_bpcc0(s, scond, flags, off19); } -static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const) +static void tcg_out_cmp(TCGContext *s, TCGCond cond, + TCGReg c1, int32_t c2, int c2const) { - tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC); + tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, + is_tst_cond(cond) ? ARITH_ANDCC : ARITH_SUBCC); } static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1, int32_t arg2, int const_arg2, TCGLabel *l) { - tcg_out_cmp(s, arg1, arg2, const_arg2); + tcg_out_cmp(s, cond, arg1, arg2, const_arg2); tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l); tcg_out_nop(s); } @@ -670,7 +675,7 @@ static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t c2, int c2const, int32_t v1, int v1const) { - tcg_out_cmp(s, c1, c2, c2const); + tcg_out_cmp(s, cond, c1, c2, c2const); tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const); } @@ -678,7 +683,8 @@ static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1, int32_t arg2, int const_arg2, TCGLabel *l) { /* For 64-bit signed comparisons vs zero, we can avoid the compare. */ - if (arg2 == 0 && !is_unsigned_cond(cond)) { + int rcond = tcg_cond_to_rcond[cond]; + if (arg2 == 0 && rcond) { int off16 = 0; if (l->has_value) { @@ -687,19 +693,18 @@ static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1, tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0); } tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1) - | INSN_COND(tcg_cond_to_rcond[cond]) | off16); + | INSN_COND(rcond) | off16); } else { - tcg_out_cmp(s, arg1, arg2, const_arg2); + tcg_out_cmp(s, cond, arg1, arg2, const_arg2); tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l); } tcg_out_nop(s); } -static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, +static void tcg_out_movr(TCGContext *s, int rcond, TCGReg ret, TCGReg c1, int32_t v1, int v1const) { - tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) - | (tcg_cond_to_rcond[cond] << 10) + tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) | (rcond << 10) | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1))); } @@ -710,11 +715,11 @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, /* For 64-bit signed comparisons vs zero, we can avoid the compare. Note that the immediate range is one bit smaller, so we must check for that as well. */ - if (c2 == 0 && !is_unsigned_cond(cond) - && (!v1const || check_fit_i32(v1, 10))) { - tcg_out_movr(s, cond, ret, c1, v1, v1const); + int rcond = tcg_cond_to_rcond[cond]; + if (c2 == 0 && rcond && (!v1const || check_fit_i32(v1, 10))) { + tcg_out_movr(s, rcond, ret, c1, v1, v1const); } else { - tcg_out_cmp(s, c1, c2, c2const); + tcg_out_cmp(s, cond, c1, c2, c2const); tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const); } } @@ -742,6 +747,15 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU); break; + case TCG_COND_TSTEQ: + case TCG_COND_TSTNE: + /* Transform to inequality vs zero. */ + tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_AND); + c1 = TCG_REG_G0; + c2 = TCG_REG_T1, c2const = 0; + cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU); + break; + case TCG_COND_GTU: case TCG_COND_LEU: /* If we don't need to load a constant into a register, we can @@ -758,13 +772,13 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, /* FALLTHRU */ default: - tcg_out_cmp(s, c1, c2, c2const); + tcg_out_cmp(s, cond, c1, c2, c2const); tcg_out_movi_s13(s, ret, 0); tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1); return; } - tcg_out_cmp(s, c1, c2, c2const); + tcg_out_cmp(s, cond, c1, c2, c2const); if (cond == TCG_COND_LTU) { if (neg) { /* 0 - 0 - C = -C = (C ? -1 : 0) */ @@ -787,6 +801,8 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t c2, int c2const, bool neg) { + int rcond; + if (use_vis3_instructions && !neg) { switch (cond) { case TCG_COND_NE: @@ -796,7 +812,7 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, c2 = c1, c2const = 0, c1 = TCG_REG_G0; /* FALLTHRU */ case TCG_COND_LTU: - tcg_out_cmp(s, c1, c2, c2const); + tcg_out_cmp(s, cond, c1, c2, c2const); tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC); return; default: @@ -806,11 +822,12 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, /* For 64-bit signed comparisons vs zero, we can avoid the compare if the input does not overlap the output. */ - if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) { + rcond = tcg_cond_to_rcond[cond]; + if (c2 == 0 && rcond && c1 != ret) { tcg_out_movi_s13(s, ret, 0); - tcg_out_movr(s, cond, ret, c1, neg ? -1 : 1, 1); + tcg_out_movr(s, rcond, ret, c1, neg ? -1 : 1, 1); } else { - tcg_out_cmp(s, c1, c2, c2const); + tcg_out_cmp(s, cond, c1, c2, c2const); tcg_out_movi_s13(s, ret, 0); tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1); } @@ -1098,7 +1115,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_movi_s32(s, TCG_REG_T3, compare_mask); tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND); } - tcg_out_cmp(s, TCG_REG_T2, TCG_REG_T3, 0); + tcg_out_cmp(s, TCG_COND_NE, TCG_REG_T2, TCG_REG_T3, 0); ldst = new_ldst_label(s); ldst->is_ld = is_ld; diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h index f8cf145266f..a18906a14ec 100644 --- a/tcg/sparc64/tcg-target.h +++ b/tcg/sparc64/tcg-target.h @@ -149,6 +149,8 @@ extern bool use_vis3_instructions; #define TCG_TARGET_HAS_qemu_ldst_i128 0 +#define TCG_TARGET_HAS_tst 1 + #define TCG_AREG0 TCG_REG_I0 #define TCG_TARGET_DEFAULT_MO (0) diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h index 6c9d9e48db0..9b0d982f659 100644 --- a/tcg/tcg-internal.h +++ b/tcg/tcg-internal.h @@ -83,6 +83,8 @@ static inline TCGv_i64 TCGV128_HIGH(TCGv_i128 t) bool tcg_target_has_memory_bswap(MemOp memop); +TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind); + /* * Locate or create a read-only temporary that is a constant. * This kind of temporary need not be freed, but for convenience diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index 2d09a2979d0..d62bd8696d4 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -130,23 +130,30 @@ static void tcg_gen_req_mo(TCGBar type) } } +static TCGv_i64 preserve_addr(TCGTemp *addr)\ +{ + /* Save a copy of the vaddr for use after a load. */ + TCGv_i64 temp = tcg_temp_ebb_new_i64(); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr)); + } else { + tcg_gen_mov_i64(temp, temp_tcgv_i64(addr)); + } + return temp; +} + /* Only required for loads, where value might overlap addr. */ +/* static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { - /* Save a copy of the vaddr for use after a load. */ - TCGv_i64 temp = tcg_temp_ebb_new_i64(); - if (tcg_ctx->addr_type == TCG_TYPE_I32) { - tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr)); - } else { - tcg_gen_mov_i64(temp, temp_tcgv_i64(addr)); - } - return temp; + return preserve_addr(addr); } #endif return NULL; } +*/ static void plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi, @@ -182,14 +189,7 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, MemOpIdx orig_oi, oi; TCGv_i64 copy_addr; TCGOpcode opc; - TCGv_i64 load_size, mmu_idx, addr_loc; - - /* Temporary bugfix - * On some architectures, the addr and val(ret) params are, for some reasons, - * getting optimized after the concrete load operation (ret => addr) - * This trick ensure that the addr value is preseved */ - addr_loc = tcg_temp_new_i64(); - tcg_gen_mov_i64(addr_loc, temp_tcgv_i64(addr)); + TCGv_i64 load_size, mmu_idx; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0); @@ -204,7 +204,7 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, oi = make_memop_idx(memop, idx); } - copy_addr = plugin_maybe_preserve_addr(addr); + copy_addr = preserve_addr(addr); if (tcg_ctx->addr_type == TCG_TYPE_I32) { opc = INDEX_op_qemu_ld_a32_i32; } else { @@ -218,7 +218,7 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, load_size = tcg_constant_i64(1 << (memop & MO_SIZE)); mmu_idx = tcg_constant_i64(idx); gen_helper_sym_load_guest_i32(tcgv_i32_expr(val), tcg_env, - addr_loc, tcgv_i64_expr(addr_loc), + copy_addr, tcgv_i64_expr(copy_addr), load_size, mmu_idx); if ((orig_memop ^ memop) & MO_BSWAP) { @@ -344,7 +344,7 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr, oi = make_memop_idx(memop, idx); } - copy_addr = plugin_maybe_preserve_addr(addr); + copy_addr = preserve_addr(addr); if (tcg_ctx->addr_type == TCG_TYPE_I32) { opc = INDEX_op_qemu_ld_a32_i64; } else { @@ -358,7 +358,7 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr, mmu_idx = tcg_constant_i64(idx); load_size = tcg_constant_i64(1 << (memop & MO_SIZE)); gen_helper_sym_load_guest_i64(tcgv_i64_expr(val), tcg_env, - temp_tcgv_i64(addr), tcgv_i64_expr(temp_tcgv_i64(addr)), + copy_addr, tcgv_i64_expr(copy_addr), load_size, mmu_idx); if ((orig_memop ^ memop) & MO_BSWAP) { diff --git a/tcg/tcg.c b/tcg/tcg.c index 6e5b0f19847..c244eb679e9 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -55,7 +55,7 @@ #include "tcg/tcg-ldst.h" #include "tcg/tcg-temp-internal.h" #include "tcg-internal.h" -#include "accel/tcg/perf.h" +#include "tcg/perf.h" #ifdef CONFIG_USER_ONLY #include "exec/user/guest-base.h" #endif @@ -173,7 +173,8 @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target, const TCGHelperInfo *info); static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot); -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece); +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece); #ifdef TCG_TARGET_NEED_LDST_LABELS static int tcg_out_ldst_finalize(TCGContext *s); #endif @@ -1521,6 +1522,7 @@ void tcg_func_start(TCGContext *s) QTAILQ_INIT(&s->ops); QTAILQ_INIT(&s->free_ops); + s->emit_before_op = NULL; QSIMPLEQ_INIT(&s->labels); tcg_debug_assert(s->addr_type == TCG_TYPE_I32 || @@ -1700,7 +1702,7 @@ TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t off, const char *name) return temp_tcgv_ptr(ts); } -static TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind) +TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind) { TCGContext *s = tcg_ctx; TCGTemp *ts, *ts_expr; @@ -2436,7 +2438,11 @@ static void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, TCGTemp **args) op->args[pi++] = (uintptr_t)info; tcg_debug_assert(pi == total_args); - QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link); + if (tcg_ctx->emit_before_op) { + QTAILQ_INSERT_BEFORE(tcg_ctx->emit_before_op, op, link); + } else { + QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link); + } tcg_debug_assert(n_extend < ARRAY_SIZE(extend_free)); for (i = 0; i < n_extend; ++i) { @@ -2622,7 +2628,9 @@ static const char * const cond_name[] = [TCG_COND_LTU] = "ltu", [TCG_COND_GEU] = "geu", [TCG_COND_LEU] = "leu", - [TCG_COND_GTU] = "gtu" + [TCG_COND_GTU] = "gtu", + [TCG_COND_TSTEQ] = "tsteq", + [TCG_COND_TSTNE] = "tstne", }; static const char * const ldst_name[(MO_BSWAP | MO_SSIZE) + 1] = @@ -3352,7 +3360,12 @@ static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs) TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs) { TCGOp *op = tcg_op_alloc(opc, nargs); - QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link); + + if (tcg_ctx->emit_before_op) { + QTAILQ_INSERT_BEFORE(tcg_ctx->emit_before_op, op, link); + } else { + QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link); + } return op; } @@ -4924,6 +4937,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) TCGTemp *ts; TCGArg new_args[TCG_MAX_OP_ARGS]; int const_args[TCG_MAX_OP_ARGS]; + TCGCond op_cond; nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; @@ -4936,6 +4950,33 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) i_allocated_regs = s->reserved_regs; o_allocated_regs = s->reserved_regs; + switch (op->opc) { + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + op_cond = op->args[2]; + break; + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + case INDEX_op_negsetcond_i32: + case INDEX_op_negsetcond_i64: + case INDEX_op_cmp_vec: + op_cond = op->args[3]; + break; + case INDEX_op_brcond2_i32: + op_cond = op->args[4]; + break; + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + case INDEX_op_setcond2_i32: + case INDEX_op_cmpsel_vec: + op_cond = op->args[5]; + break; + default: + /* No condition within opcode. */ + op_cond = TCG_COND_ALWAYS; + break; + } + /* satisfy input constraints */ for (k = 0; k < nb_iargs; k++) { TCGRegSet i_preferred_regs, i_required_regs; @@ -4949,7 +4990,8 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) ts = arg_temp(arg); if (ts->val_type == TEMP_VAL_CONST - && tcg_target_const_match(ts->val, ts->type, arg_ct->ct, TCGOP_VECE(op))) { + && tcg_target_const_match(ts->val, arg_ct->ct, ts->type, + op_cond, TCGOP_VECE(op))) { /* constant is OK for instruction */ const_args[i] = 1; new_args[i] = ts->val; diff --git a/tcg/tci.c b/tcg/tci.c index 3cc851b7bdf..39adcb7d82e 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -228,6 +228,12 @@ static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) case TCG_COND_GTU: result = (u0 > u1); break; + case TCG_COND_TSTEQ: + result = (u0 & u1) == 0; + break; + case TCG_COND_TSTNE: + result = (u0 & u1) != 0; + break; default: g_assert_not_reached(); } @@ -270,6 +276,12 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) case TCG_COND_GTU: result = (u0 > u1); break; + case TCG_COND_TSTEQ: + result = (u0 & u1) == 0; + break; + case TCG_COND_TSTNE: + result = (u0 & u1) != 0; + break; default: g_assert_not_reached(); } @@ -1041,6 +1053,8 @@ static const char *str_c(TCGCond c) [TCG_COND_GEU] = "geu", [TCG_COND_LEU] = "leu", [TCG_COND_GTU] = "gtu", + [TCG_COND_TSTEQ] = "tsteq", + [TCG_COND_TSTNE] = "tstne", }; assert((unsigned)c < ARRAY_SIZE(cond)); diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index 461f4b47ffc..c740864b96d 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -913,7 +913,8 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, } /* Test if a constant matches the constraint. */ -static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) { return ct & TCG_CT_CONST; } diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index 2a13816c8e4..a076f401d21 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -117,6 +117,8 @@ #define TCG_TARGET_HAS_qemu_ldst_i128 0 +#define TCG_TARGET_HAS_tst 1 + /* Number of registers available. */ #define TCG_TARGET_NB_REGS 16 diff --git a/tests/avocado/acpi-bits/bits-tests/smbios.py2 b/tests/avocado/acpi-bits/bits-tests/smbios.py2 index fc623de072a..5868a7137a7 100644 --- a/tests/avocado/acpi-bits/bits-tests/smbios.py2 +++ b/tests/avocado/acpi-bits/bits-tests/smbios.py2 @@ -1060,7 +1060,7 @@ class EventLogDescriptor(unpack.Struct): 0x16: 'Log Area Reset/Cleared', 0x17: 'System boot', xrange(0x18, 0x7F): 'Unused, available for assignment', - xrange(0x80, 0xFE): 'Availalbe for system- and OEM-specific assignments', + xrange(0x80, 0xFE): 'Available for system- and OEM-specific assignments', 0xFF: 'End of log' } yield 'log_type', u.unpack_one('B'), unpack.format_table("{}", _event_log_type_descriptors) diff --git a/tests/avocado/acpi-bits/bits-tests/smilatency.py2 b/tests/avocado/acpi-bits/bits-tests/smilatency.py2 new file mode 100644 index 00000000000..405af67e190 --- /dev/null +++ b/tests/avocado/acpi-bits/bits-tests/smilatency.py2 @@ -0,0 +1,107 @@ +# Copyright (c) 2015, Intel Corporation +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This script runs only from the biosbits VM. + +"""SMI latency test.""" + +import bits +from collections import namedtuple +import testsuite +import time +import usb + +def register_tests(): + pass +# testsuite.add_test("SMI latency test", smi_latency); +# testsuite.add_test("SMI latency test with USB disabled via BIOS handoff", test_with_usb_disabled, runall=False); + +def smi_latency(): + MSR_SMI_COUNT = 0x34 + + print "Warning: touching the keyboard can affect the results of this test." + + tsc_per_sec = bits.tsc_per_sec() + tsc_per_usec = tsc_per_sec / (1000 * 1000) + bins = [long(tsc_per_usec * 10**i) for i in range(9)] + bin_descs = [ + "0 < t <= 1us", + "1us < t <= 10us", + "10us < t <= 100us", + "100us < t <= 1ms", + "1ms < t <= 10ms", + "10ms < t <= 100ms", + "100ms < t <= 1s ", + "1s < t <= 10s ", + "10s < t <= 100s ", + "100s < t ", + ] + + print "Starting test. Wait here, I will be back in 15 seconds." + (max_latency, smi_count_delta, bins) = bits.smi_latency(long(15 * tsc_per_sec), bins) + BinType = namedtuple('BinType', ("max", "total", "count", "times")) + bins = [BinType(*b) for b in bins] + + testsuite.test("SMI latency < 150us to minimize risk of OS timeouts", max_latency / tsc_per_usec <= 150) + if not testsuite.show_detail(): + return + + for bin, desc in zip(bins, bin_descs): + if bin.count == 0: + continue + testsuite.print_detail("{}; average = {}; count = {}".format(desc, bits.format_tsc(bin.total/bin.count), bin.count)) + deltas = (bits.format_tsc(t2 - t1) for t1,t2 in zip(bin.times, bin.times[1:])) + testsuite.print_detail(" Times between first few observations: {}".format(" ".join("{:>6}".format(delta) for delta in deltas))) + + if smi_count_delta is not None: + testsuite.print_detail("{} SMI detected using MSR_SMI_COUNT (MSR {:#x})".format(smi_count_delta, MSR_SMI_COUNT)) + + testsuite.print_detail("Summary of impact: observed maximum latency = {}".format(bits.format_tsc(max_latency))) + +def test_with_usb_disabled(): + if usb.handoff_to_os(): + smi_latency() + +def average_io_smi(port, value, count): + def f(): + tsc_start = bits.rdtsc() + bits.outb(port, value) + return bits.rdtsc() - tsc_start + counts = [f() for i in range(count)] + return sum(counts)/len(counts) + +def time_io_smi(port=0xb2, value=0, count=1000): + count_for_estimate = 10 + start = time.time() + average_io_smi(port, value, count_for_estimate) + avg10 = time.time() - start + estimate = avg10 * count/count_for_estimate + if estimate > 1: + print "Running test, estimated time: {}s".format(int(estimate)) + average = average_io_smi(port, value, count) + print "Average of {} SMIs (via outb, port={:#x}, value={:#x}): {}".format(count, port, value, bits.format_tsc(average)) diff --git a/tests/avocado/boot_linux.py b/tests/avocado/boot_linux.py index 7c4769904e3..cdce4cbcba0 100644 --- a/tests/avocado/boot_linux.py +++ b/tests/avocado/boot_linux.py @@ -93,18 +93,25 @@ class BootLinuxPPC64(LinuxTest): timeout = 360 - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - + @skipUnless(os.getenv('SPEED') == 'slow', 'runtime limited') def test_pseries_tcg(self): """ :avocado: tags=machine:pseries :avocado: tags=accel:tcg - :avocado: tags=flaky """ self.require_accelerator("tcg") self.vm.add_args("-accel", "tcg") self.launch_and_wait(set_up_ssh_connection=False) + def test_pseries_kvm(self): + """ + :avocado: tags=machine:pseries + :avocado: tags=accel:kvm + """ + self.require_accelerator("kvm") + self.vm.add_args("-accel", "kvm") + self.vm.add_args("-machine", "cap-ccf-assist=off") + self.launch_and_wait(set_up_ssh_connection=False) class BootLinuxS390X(LinuxTest): """ @@ -113,13 +120,11 @@ class BootLinuxS390X(LinuxTest): timeout = 240 - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - + @skipUnless(os.getenv('SPEED') == 'slow', 'runtime limited') def test_s390_ccw_virtio_tcg(self): """ :avocado: tags=machine:s390-ccw-virtio :avocado: tags=accel:tcg - :avocado: tags=flaky """ self.require_accelerator("tcg") self.vm.add_args("-accel", "tcg") diff --git a/tests/avocado/boot_linux_console.py b/tests/avocado/boot_linux_console.py index 3f0180e1f8e..989b65111c0 100644 --- a/tests/avocado/boot_linux_console.py +++ b/tests/avocado/boot_linux_console.py @@ -501,6 +501,103 @@ def test_arm_raspi2_initrd(self): # Wait for VM to shut down gracefully self.vm.wait() + def test_arm_raspi4(self): + """ + :avocado: tags=arch:aarch64 + :avocado: tags=machine:raspi4b + :avocado: tags=device:pl011 + :avocado: tags=accel:tcg + :avocado: tags=rpi4b + + The kernel can be rebuilt using the kernel source referenced + and following the instructions on the on: + https://www.raspberrypi.org/documentation/linux/kernel/building.md + """ + + deb_url = ('http://archive.raspberrypi.org/debian/' + 'pool/main/r/raspberrypi-firmware/' + 'raspberrypi-kernel_1.20230106-1_arm64.deb') + deb_hash = '08dc55696535b18a6d4fe6fa10d4c0d905cbb2ed' + deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) + kernel_path = self.extract_from_deb(deb_path, '/boot/kernel8.img') + dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2711-rpi-4-b.dtb') + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'earlycon=pl011,mmio32,0xfe201000 ' + + 'console=ttyAMA0,115200 ' + + 'root=/dev/mmcblk1p2 rootwait ' + + 'dwc_otg.fiq_fsm_enable=0') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-append', kernel_command_line) + # When PCI is supported we can add a USB controller: + # '-device', 'qemu-xhci,bus=pcie.1,id=xhci', + # '-device', 'usb-kbd,bus=xhci.0', + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + # When USB is enabled we can look for this + # console_pattern = 'Product: QEMU USB Keyboard' + # self.wait_for_console_pattern(console_pattern) + console_pattern = 'Waiting for root device' + self.wait_for_console_pattern(console_pattern) + + + def test_arm_raspi4_initrd(self): + """ + :avocado: tags=arch:aarch64 + :avocado: tags=machine:raspi4b + :avocado: tags=device:pl011 + :avocado: tags=accel:tcg + :avocado: tags=rpi4b + + The kernel can be rebuilt using the kernel source referenced + and following the instructions on the on: + https://www.raspberrypi.org/documentation/linux/kernel/building.md + """ + deb_url = ('http://archive.raspberrypi.org/debian/' + 'pool/main/r/raspberrypi-firmware/' + 'raspberrypi-kernel_1.20230106-1_arm64.deb') + deb_hash = '08dc55696535b18a6d4fe6fa10d4c0d905cbb2ed' + deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) + kernel_path = self.extract_from_deb(deb_path, '/boot/kernel8.img') + dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2711-rpi-4-b.dtb') + + initrd_url = ('https://github.com/groeck/linux-build-test/raw/' + '86b2be1384d41c8c388e63078a847f1e1c4cb1de/rootfs/' + 'arm64/rootfs.cpio.gz') + initrd_hash = 'f3d4f9fa92a49aa542f1b44d34be77bbf8ca5b9d' + initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) + initrd_path = os.path.join(self.workdir, 'rootfs.cpio') + archive.gzip_uncompress(initrd_path_gz, initrd_path) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'earlycon=pl011,mmio32,0xfe201000 ' + + 'console=ttyAMA0,115200 ' + + 'panic=-1 noreboot ' + + 'dwc_otg.fiq_fsm_enable=0') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-no-reboot') + # When PCI is supported we can add a USB controller: + # '-device', 'qemu-xhci,bus=pcie.1,id=xhci', + # '-device', 'usb-kbd,bus=xhci.0', + self.vm.launch() + self.wait_for_console_pattern('Boot successful.') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'BCM2835') + exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', + 'cprman@7e101000') + exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted') + # TODO: Raspberry Pi4 doesn't shut down properly with recent kernels + # Wait for VM to shut down gracefully + #self.vm.wait() + def test_arm_exynos4210_initrd(self): """ :avocado: tags=arch:arm @@ -1368,7 +1465,8 @@ def do_test_ppc64_powernv(self, proc): self.wait_for_console_pattern("CPU: " + proc + " generation processor") self.wait_for_console_pattern("zImage starting: loaded") self.wait_for_console_pattern("Run /init as init process") - self.wait_for_console_pattern("Creating 1 MTD partitions") + # Device detection output driven by udev probing is sometimes cut off + # from console output, suspect S14silence-console init script. def test_ppc_powernv8(self): """ @@ -1386,6 +1484,14 @@ def test_ppc_powernv9(self): """ self.do_test_ppc64_powernv('P9') + def test_ppc_powernv10(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:powernv10 + :avocado: tags=accel:tcg + """ + self.do_test_ppc64_powernv('P10') + def test_ppc_g3beige(self): """ :avocado: tags=arch:ppc diff --git a/tests/avocado/kvm_xen_guest.py b/tests/avocado/kvm_xen_guest.py index 5391283113e..f8cb458d5db 100644 --- a/tests/avocado/kvm_xen_guest.py +++ b/tests/avocado/kvm_xen_guest.py @@ -59,7 +59,7 @@ def common_vm_setup(self): def run_and_check(self): self.vm.add_args('-kernel', self.kernel_path, '-append', self.kernel_params, - '-drive', f"file={self.rootfs},if=none,format=raw,id=drv0", + '-drive', f"file={self.rootfs},if=none,snapshot=on,format=raw,id=drv0", '-device', 'xen-disk,drive=drv0,vdev=xvda', '-device', 'virtio-net-pci,netdev=unet', '-netdev', 'user,id=unet,hostfwd=:127.0.0.1:0-:22') diff --git a/tests/avocado/machine_aarch64_sbsaref.py b/tests/avocado/machine_aarch64_sbsaref.py index 528c7d2934a..98c76c1ff70 100644 --- a/tests/avocado/machine_aarch64_sbsaref.py +++ b/tests/avocado/machine_aarch64_sbsaref.py @@ -1,6 +1,6 @@ # Functional test that boots a Linux kernel and checks the console # -# SPDX-FileCopyrightText: 2023 Linaro Ltd. +# SPDX-FileCopyrightText: 2023-2024 Linaro Ltd. # SPDX-FileContributor: Philippe Mathieu-Daudé # SPDX-FileContributor: Marcin Juszkiewicz # @@ -32,34 +32,36 @@ def fetch_firmware(self): """ Flash volumes generated using: - - Fedora GNU Toolchain version 13.2.1 20230728 (Red Hat 13.2.1-1) + Toolchain from Debian: + aarch64-linux-gnu-gcc (Debian 12.2.0-14) 12.2.0 - - Trusted Firmware-A - https://github.com/ARM-software/arm-trusted-firmware/tree/7c3ff62d + Used components: + + - Trusted Firmware 2.10.2 + - Tianocore EDK2 stable202402 + - Tianocore EDK2-platforms commit 085c2fb - - Tianocore EDK II - https://github.com/tianocore/edk2/tree/0f9283429dd4 - https://github.com/tianocore/edk2/tree/ad1c0394b177 - https://github.com/tianocore/edk2-platforms/tree/d03a60523a60 """ # Secure BootRom (TF-A code) fs0_xz_url = ( - "https://fileserver.linaro.org/s/rE43RJyTfxPtBkc/" - "download/SBSA_FLASH0.fd.xz" + "https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/" + "20240313-116475/edk2/SBSA_FLASH0.fd.xz" ) - fs0_xz_hash = "cdb8e4ffdaaa79292b7b465693f9e5fae6b7062d" - tar_xz_path = self.fetch_asset(fs0_xz_url, asset_hash=fs0_xz_hash) + fs0_xz_hash = "637593749cc307dea7dc13265c32e5d020267552f22b18a31850b8429fc5e159" + tar_xz_path = self.fetch_asset(fs0_xz_url, asset_hash=fs0_xz_hash, + algorithm='sha256') archive.extract(tar_xz_path, self.workdir) fs0_path = os.path.join(self.workdir, "SBSA_FLASH0.fd") # Non-secure rom (UEFI and EFI variables) fs1_xz_url = ( - "https://fileserver.linaro.org/s/AGWPDXbcqJTKS4R/" - "download/SBSA_FLASH1.fd.xz" + "https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/" + "20240313-116475/edk2/SBSA_FLASH1.fd.xz" ) - fs1_xz_hash = "411155ae6984334714dff08d5d628178e790c875" - tar_xz_path = self.fetch_asset(fs1_xz_url, asset_hash=fs1_xz_hash) + fs1_xz_hash = "cb0a5e8cf5e303c5d3dc106cfd5943ffe9714b86afddee7164c69ee1dd41991c" + tar_xz_path = self.fetch_asset(fs1_xz_url, asset_hash=fs1_xz_hash, + algorithm='sha256') archive.extract(tar_xz_path, self.workdir) fs1_path = os.path.join(self.workdir, "SBSA_FLASH1.fd") @@ -96,15 +98,15 @@ def test_sbsaref_edk2_firmware(self): # AP Trusted ROM wait_for_console_pattern(self, "Booting Trusted Firmware") - wait_for_console_pattern(self, "BL1: v2.9(release):v2.9") + wait_for_console_pattern(self, "BL1: v2.10.2(release):") wait_for_console_pattern(self, "BL1: Booting BL2") # Trusted Boot Firmware - wait_for_console_pattern(self, "BL2: v2.9(release)") + wait_for_console_pattern(self, "BL2: v2.10.2(release)") wait_for_console_pattern(self, "Booting BL31") # EL3 Runtime Software - wait_for_console_pattern(self, "BL31: v2.9(release)") + wait_for_console_pattern(self, "BL31: v2.10.2(release)") # Non-trusted Firmware wait_for_console_pattern(self, "UEFI firmware (version 1.0") @@ -130,10 +132,6 @@ def boot_alpine_linux(self, cpu): cpu, "-drive", f"file={iso_path},format=raw", - "-device", - "virtio-rng-pci,rng=rng0", - "-object", - "rng-random,id=rng0,filename=/dev/urandom", ) self.vm.launch() @@ -142,18 +140,36 @@ def boot_alpine_linux(self, cpu): def test_sbsaref_alpine_linux_cortex_a57(self): """ :avocado: tags=cpu:cortex-a57 + :avocado: tags=os:linux """ self.boot_alpine_linux("cortex-a57") def test_sbsaref_alpine_linux_neoverse_n1(self): """ :avocado: tags=cpu:neoverse-n1 + :avocado: tags=os:linux """ self.boot_alpine_linux("neoverse-n1") + def test_sbsaref_alpine_linux_max_pauth_off(self): + """ + :avocado: tags=cpu:max + :avocado: tags=os:linux + """ + self.boot_alpine_linux("max,pauth=off") + + def test_sbsaref_alpine_linux_max_pauth_impdef(self): + """ + :avocado: tags=cpu:max + :avocado: tags=os:linux + """ + self.boot_alpine_linux("max,pauth-impdef=on") + + @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') def test_sbsaref_alpine_linux_max(self): """ :avocado: tags=cpu:max + :avocado: tags=os:linux """ self.boot_alpine_linux("max") @@ -177,10 +193,6 @@ def boot_openbsd73(self, cpu): cpu, "-drive", f"file={img_path},format=raw", - "-device", - "virtio-rng-pci,rng=rng0", - "-object", - "rng-random,id=rng0,filename=/dev/urandom", ) self.vm.launch() @@ -191,18 +203,36 @@ def boot_openbsd73(self, cpu): def test_sbsaref_openbsd73_cortex_a57(self): """ :avocado: tags=cpu:cortex-a57 + :avocado: tags=os:openbsd """ self.boot_openbsd73("cortex-a57") def test_sbsaref_openbsd73_neoverse_n1(self): """ :avocado: tags=cpu:neoverse-n1 + :avocado: tags=os:openbsd """ self.boot_openbsd73("neoverse-n1") + def test_sbsaref_openbsd73_max_pauth_off(self): + """ + :avocado: tags=cpu:max + :avocado: tags=os:openbsd + """ + self.boot_openbsd73("max,pauth=off") + + @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') + def test_sbsaref_openbsd73_max_pauth_impdef(self): + """ + :avocado: tags=cpu:max + :avocado: tags=os:openbsd + """ + self.boot_openbsd73("max,pauth-impdef=on") + + @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') def test_sbsaref_openbsd73_max(self): """ :avocado: tags=cpu:max + :avocado: tags=os:openbsd """ self.boot_openbsd73("max") - diff --git a/tests/avocado/machine_aspeed.py b/tests/avocado/machine_aspeed.py index 6fa5459a07b..cec01814245 100644 --- a/tests/avocado/machine_aspeed.py +++ b/tests/avocado/machine_aspeed.py @@ -155,6 +155,7 @@ def do_test_arm_aspeed_buildroot_start(self, image, cpu_id, pattern='Aspeed EVB' time.sleep(0.1) exec_command(self, 'root') time.sleep(0.1) + exec_command(self, "passw0rd") def do_test_arm_aspeed_buildroot_poweroff(self): exec_command_and_wait_for_pattern(self, 'poweroff', @@ -167,14 +168,14 @@ def test_arm_ast2500_evb_buildroot(self): """ image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' - 'images/ast2500-evb/buildroot-2022.11-2-g15d3648df9/flash.img') - image_hash = ('f96d11db521fe7a2787745e9e391225deeeec3318ee0fc07c8b799b8833dd474') + 'images/ast2500-evb/buildroot-2023.11/flash.img') + image_hash = ('c23db6160cf77d0258397eb2051162c8473a56c441417c52a91ba217186e715f') image_path = self.fetch_asset(image_url, asset_hash=image_hash, algorithm='sha256') self.vm.add_args('-device', 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); - self.do_test_arm_aspeed_buildroot_start(image_path, '0x0') + self.do_test_arm_aspeed_buildroot_start(image_path, '0x0', 'Aspeed AST2500 EVB') exec_command_and_wait_for_pattern(self, 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', @@ -195,8 +196,8 @@ def test_arm_ast2600_evb_buildroot(self): """ image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' - 'images/ast2600-evb/buildroot-2022.11-2-g15d3648df9/flash.img') - image_hash = ('e598d86e5ea79671ca8b59212a326c911bc8bea728dec1a1f5390d717a28bb8b') + 'images/ast2600-evb/buildroot-2023.11/flash.img') + image_hash = ('b62808daef48b438d0728ee07662290490ecfa65987bb91294cafb1bb7ad1a68') image_path = self.fetch_asset(image_url, asset_hash=image_hash, algorithm='sha256') @@ -206,17 +207,17 @@ def test_arm_ast2600_evb_buildroot(self): 'ds1338,bus=aspeed.i2c.bus.3,address=0x32'); self.vm.add_args('-device', 'i2c-echo,bus=aspeed.i2c.bus.3,address=0x42'); - self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00') + self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00', 'Aspeed AST2600 EVB') exec_command_and_wait_for_pattern(self, 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d'); exec_command_and_wait_for_pattern(self, - 'cat /sys/class/hwmon/hwmon0/temp1_input', '0') + 'cat /sys/class/hwmon/hwmon1/temp1_input', '0') self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', property='temperature', value=18000); exec_command_and_wait_for_pattern(self, - 'cat /sys/class/hwmon/hwmon0/temp1_input', '18000') + 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000') exec_command_and_wait_for_pattern(self, 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-3/device/new_device', @@ -261,7 +262,6 @@ def test_arm_ast2600_evb_buildroot_tpm(self): self.vm.add_args('-device', 'tpm-tis-i2c,tpmdev=tpm0,bus=aspeed.i2c.bus.12,address=0x2e') self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00', 'Aspeed AST2600 EVB') - exec_command(self, "passw0rd") exec_command_and_wait_for_pattern(self, 'echo tpm_tis_i2c 0x2e > /sys/bus/i2c/devices/i2c-12/new_device', diff --git a/tests/avocado/machine_microblaze.py b/tests/avocado/machine_microblaze.py index 8d0efff30d2..807709cd11e 100644 --- a/tests/avocado/machine_microblaze.py +++ b/tests/avocado/machine_microblaze.py @@ -5,6 +5,8 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. +import time +from avocado_qemu import exec_command, exec_command_and_wait_for_pattern from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import archive @@ -33,3 +35,27 @@ def test_microblaze_s3adsp1800(self): # The kernel sometimes gets stuck after the "This architecture ..." # message, that's why we don't test for a later string here. This # needs some investigation by a microblaze wizard one day... + + def test_microblazeel_s3adsp1800(self): + """ + :avocado: tags=arch:microblazeel + :avocado: tags=machine:petalogix-s3adsp1800 + """ + + self.require_netdev('user') + tar_url = ('http://www.qemu-advent-calendar.org/2023/download/' + 'day13.tar.gz') + tar_hash = '6623d5fff5f84cfa8f34e286f32eff6a26546f44' + file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) + archive.extract(file_path, self.workdir) + self.vm.set_console() + self.vm.add_args('-kernel', self.workdir + '/day13/xmaton.bin') + self.vm.add_args('-nic', 'user,tftp=' + self.workdir + '/day13/') + self.vm.launch() + wait_for_console_pattern(self, 'QEMU Advent Calendar 2023') + time.sleep(0.1) + exec_command(self, 'root') + time.sleep(0.1) + exec_command_and_wait_for_pattern(self, + 'tftp -g -r xmaton.png 10.0.2.2 ; md5sum xmaton.png', + '821cd3cab8efd16ad6ee5acc3642a8ea') diff --git a/tests/avocado/mem-addr-space-check.py b/tests/avocado/mem-addr-space-check.py index 363c3f12a64..af019969c06 100644 --- a/tests/avocado/mem-addr-space-check.py +++ b/tests/avocado/mem-addr-space-check.py @@ -165,7 +165,7 @@ def test_phybits_low_tcg_q35_70_amd(self): For q35-7.0 machines, "above 4G" memory starts are 4G. pci64_hole size is 32 GiB. Since TCG_PHYS_ADDR_BITS is defined to be 40, TCG emulated CPUs have maximum of 1 TiB (1024 GiB) of - directly addressible memory. + directly addressable memory. Hence, maxmem value at most can be 1024 GiB - 4 GiB - 1 GiB per slot for alignment - 32 GiB + 0.5 GiB which is equal to 987.5 GiB. Setting the value to 988 GiB should @@ -190,7 +190,7 @@ def test_phybits_low_tcg_q35_71_amd(self): AMD_HT_START is defined to be at 1012 GiB. So for q35 machines version > 7.0 and AMD cpus, instead of 1024 GiB limit for 40 bit processor address space, it has to be 1012 GiB , that is 12 GiB - less than the case above in order to accomodate HT hole. + less than the case above in order to accommodate HT hole. Make sure QEMU fails when maxmem size is 976 GiB (12 GiB less than 988 GiB). """ @@ -297,7 +297,7 @@ def test_phybits_ok_tcg_q35_71_amd_41bits(self): :avocado: tags=arch:x86_64 AMD processor with 41 bits. Max cpu hw address = 2 TiB. - Same as above but by setting maxram beween 976 GiB and 992 Gib, + Same as above but by setting maxram between 976 GiB and 992 Gib, QEMU should start fine. """ self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41', diff --git a/tests/avocado/migration.py b/tests/avocado/migration.py index 09b62f813eb..be6234b3c24 100644 --- a/tests/avocado/migration.py +++ b/tests/avocado/migration.py @@ -123,7 +123,6 @@ class PPC64(MigrationTest): """ :avocado: tags=arch:ppc64 :avocado: tags=machine:pseries - :avocado: tags=cpu:power9_v2.0 """ def test_migration_with_tcp_localhost(self): diff --git a/tests/avocado/ppc_hv_tests.py b/tests/avocado/ppc_hv_tests.py new file mode 100644 index 00000000000..bf8822bb97b --- /dev/null +++ b/tests/avocado/ppc_hv_tests.py @@ -0,0 +1,206 @@ +# Tests that specifically try to exercise hypervisor features of the +# target machines. powernv supports the Power hypervisor ISA, and +# pseries supports the nested-HV hypervisor spec. +# +# Copyright (c) 2023 IBM Corporation +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from avocado import skipIf, skipUnless +from avocado.utils import archive +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern, exec_command +import os +import time +import subprocess +from datetime import datetime + +deps = ["xorriso"] # dependent tools needed in the test setup/box. + +def which(tool): + """ looks up the full path for @tool, returns None if not found + or if @tool does not have executable permissions. + """ + paths=os.getenv('PATH') + for p in paths.split(os.path.pathsep): + p = os.path.join(p, tool) + if os.path.exists(p) and os.access(p, os.X_OK): + return p + return None + +def missing_deps(): + """ returns True if any of the test dependent tools are absent. + """ + for dep in deps: + if which(dep) is None: + return True + return False + +# Alpine is a light weight distro that supports QEMU. These tests boot +# that on the machine then run a QEMU guest inside it in KVM mode, +# that runs the same Alpine distro image. +# QEMU packages are downloaded and installed on each test. That's not a +# large download, but it may be more polite to create qcow2 image with +# QEMU already installed and use that. +# XXX: The order of these tests seems to matter, see git blame. +@skipIf(missing_deps(), 'dependencies (%s) not installed' % ','.join(deps)) +@skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test sometimes gets stuck due to console handling problem') +@skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') +@skipUnless(os.getenv('SPEED') == 'slow', 'runtime limited') +class HypervisorTest(QemuSystemTest): + + timeout = 1000 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 ' + panic_message = 'Kernel panic - not syncing' + good_message = 'VFS: Cannot open root device' + + def extract_from_iso(self, iso, path): + """ + Extracts a file from an iso file into the test workdir + + :param iso: path to the iso file + :param path: path within the iso file of the file to be extracted + :returns: path of the extracted file + """ + filename = os.path.basename(path) + + cwd = os.getcwd() + os.chdir(self.workdir) + + with open(filename, "w") as outfile: + cmd = "xorriso -osirrox on -indev %s -cpx %s %s" % (iso, path, filename) + subprocess.run(cmd.split(), + stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + + os.chdir(cwd) + + # Return complete path to extracted file. Because callers to + # extract_from_iso() specify 'path' with a leading slash, it is + # necessary to use os.path.relpath() as otherwise os.path.join() + # interprets it as an absolute path and drops the self.workdir part. + return os.path.normpath(os.path.join(self.workdir, filename)) + + def setUp(self): + super().setUp() + + iso_url = ('https://dl-cdn.alpinelinux.org/alpine/v3.18/releases/ppc64le/alpine-standard-3.18.4-ppc64le.iso') + + # Alpine use sha256 so I recalculated this myself + iso_sha256 = 'c26b8d3e17c2f3f0fed02b4b1296589c2390e6d5548610099af75300edd7b3ff' + iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha256, + algorithm = "sha256") + + self.iso_path = iso_path + self.vmlinuz = self.extract_from_iso(iso_path, '/boot/vmlinuz-lts') + self.initramfs = self.extract_from_iso(iso_path, '/boot/initramfs-lts') + + def do_start_alpine(self): + self.vm.set_console() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + self.vm.add_args("-kernel", self.vmlinuz) + self.vm.add_args("-initrd", self.initramfs) + self.vm.add_args("-smp", "4", "-m", "2g") + self.vm.add_args("-drive", f"file={self.iso_path},format=raw,if=none,id=drive0") + + self.vm.launch() + wait_for_console_pattern(self, 'Welcome to Alpine Linux 3.18') + exec_command(self, 'root') + wait_for_console_pattern(self, 'localhost login:') + wait_for_console_pattern(self, 'You may change this message by editing /etc/motd.') + # If the time is wrong, SSL certificates can fail. + exec_command(self, 'date -s "' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S' + '"')) + exec_command(self, 'setup-alpine -qe') + wait_for_console_pattern(self, 'Updating repository indexes... done.') + + def do_stop_alpine(self): + exec_command(self, 'poweroff') + wait_for_console_pattern(self, 'alpine:~#') + self.vm.wait() + + def do_setup_kvm(self): + exec_command(self, 'echo http://dl-cdn.alpinelinux.org/alpine/v3.18/main > /etc/apk/repositories') + wait_for_console_pattern(self, 'alpine:~#') + exec_command(self, 'echo http://dl-cdn.alpinelinux.org/alpine/v3.18/community >> /etc/apk/repositories') + wait_for_console_pattern(self, 'alpine:~#') + exec_command(self, 'apk update') + wait_for_console_pattern(self, 'alpine:~#') + exec_command(self, 'apk add qemu-system-ppc64') + wait_for_console_pattern(self, 'alpine:~#') + exec_command(self, 'modprobe kvm-hv') + wait_for_console_pattern(self, 'alpine:~#') + + # This uses the host's block device as the source file for guest block + # device for install media. This is a bit hacky but allows reuse of the + # iso without having a passthrough filesystem configured. + def do_test_kvm(self, hpt=False): + if hpt: + append = 'disable_radix' + else: + append = '' + exec_command(self, 'qemu-system-ppc64 -nographic -smp 2 -m 1g ' + '-machine pseries,x-vof=on,accel=kvm ' + '-machine cap-cfpc=broken,cap-sbbc=broken,' + 'cap-ibs=broken,cap-ccf-assist=off ' + '-drive file=/dev/nvme0n1,format=raw,readonly=on ' + '-initrd /media/nvme0n1/boot/initramfs-lts ' + '-kernel /media/nvme0n1/boot/vmlinuz-lts ' + '-append \'usbcore.nousb ' + append + '\'') + # Alpine 3.18 kernel seems to crash in XHCI USB driver. + wait_for_console_pattern(self, 'Welcome to Alpine Linux 3.18') + exec_command(self, 'root') + wait_for_console_pattern(self, 'localhost login:') + wait_for_console_pattern(self, 'You may change this message by editing /etc/motd.') + exec_command(self, 'poweroff >& /dev/null') + wait_for_console_pattern(self, 'localhost:~#') + wait_for_console_pattern(self, 'reboot: Power down') + time.sleep(1) + exec_command(self, '') + wait_for_console_pattern(self, 'alpine:~#') + + def test_hv_pseries(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:pseries + :avocado: tags=accel:tcg + """ + self.require_accelerator("tcg") + self.vm.add_args("-accel", "tcg,thread=multi") + self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0') + self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on") + self.do_start_alpine() + self.do_setup_kvm() + self.do_test_kvm() + self.do_stop_alpine() + + def test_hv_pseries_kvm(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:pseries + :avocado: tags=accel:kvm + """ + self.require_accelerator("kvm") + self.vm.add_args("-accel", "kvm") + self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0') + self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on,cap-ccf-assist=off") + self.do_start_alpine() + self.do_setup_kvm() + self.do_test_kvm() + self.do_stop_alpine() + + def test_hv_powernv(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:powernv + :avocado: tags=accel:tcg + """ + self.require_accelerator("tcg") + self.vm.add_args("-accel", "tcg,thread=multi") + self.vm.add_args('-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234,drive=drive0', + '-device', 'e1000e,netdev=net0,mac=C0:FF:EE:00:00:02,bus=pcie.0,addr=0x0', + '-netdev', 'user,id=net0,hostfwd=::20022-:22,hostname=alpine') + self.do_start_alpine() + self.do_setup_kvm() + self.do_test_kvm() + self.do_test_kvm(True) + self.do_stop_alpine() diff --git a/tests/avocado/ppc_powernv.py b/tests/avocado/ppc_powernv.py index d0e5c07bde1..4342941d5db 100644 --- a/tests/avocado/ppc_powernv.py +++ b/tests/avocado/ppc_powernv.py @@ -12,11 +12,11 @@ class powernvMachine(QemuSystemTest): timeout = 90 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 ' panic_message = 'Kernel panic - not syncing' good_message = 'VFS: Cannot open root device' - def do_test_linux_boot(self): + def do_test_linux_boot(self, command_line = KERNEL_COMMON_COMMAND_LINE): self.require_accelerator("tcg") kernel_url = ('https://archives.fedoraproject.org/pub/archive' '/fedora-secondary/releases/29/Everything/ppc64le/os' @@ -25,9 +25,8 @@ def do_test_linux_boot(self): kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0' self.vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line) + '-append', command_line) self.vm.launch() def test_linux_boot(self): @@ -54,6 +53,22 @@ def test_linux_smp_boot(self): wait_for_console_pattern(self, console_pattern, self.panic_message) wait_for_console_pattern(self, self.good_message, self.panic_message) + def test_linux_smp_hpt_boot(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:powernv + :avocado: tags=accel:tcg + """ + + self.vm.add_args('-smp', '4') + self.do_test_linux_boot(self.KERNEL_COMMON_COMMAND_LINE + + 'disable_radix') + console_pattern = 'smp: Brought up 1 node, 4 CPUs' + wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu', + self.panic_message) + wait_for_console_pattern(self, console_pattern, self.panic_message) + wait_for_console_pattern(self, self.good_message, self.panic_message) + def test_linux_smt_boot(self): """ :avocado: tags=arch:ppc64 diff --git a/tests/avocado/ppc_pseries.py b/tests/avocado/ppc_pseries.py index a8311e65554..74aaa4ac4ad 100644 --- a/tests/avocado/ppc_pseries.py +++ b/tests/avocado/ppc_pseries.py @@ -12,11 +12,11 @@ class pseriesMachine(QemuSystemTest): timeout = 90 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 ' panic_message = 'Kernel panic - not syncing' good_message = 'VFS: Cannot open root device' - def do_test_ppc64_linux_boot(self): + def do_test_ppc64_linux_boot(self, kernel_command_line = KERNEL_COMMON_COMMAND_LINE): kernel_url = ('https://archives.fedoraproject.org/pub/archive' '/fedora-secondary/releases/29/Everything/ppc64le/os' '/ppc/ppc64/vmlinuz') @@ -24,7 +24,6 @@ def do_test_ppc64_linux_boot(self): kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0' self.vm.add_args('-kernel', kernel_path, '-append', kernel_command_line) self.vm.launch() @@ -62,6 +61,21 @@ def test_ppc64_linux_smp_boot(self): wait_for_console_pattern(self, console_pattern, self.panic_message) wait_for_console_pattern(self, self.good_message, self.panic_message) + def test_ppc64_linux_hpt_smp_boot(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:pseries + """ + + self.vm.add_args('-smp', '4') + self.do_test_ppc64_linux_boot(self.KERNEL_COMMON_COMMAND_LINE + + 'disable_radix') + console_pattern = 'smp: Brought up 1 node, 4 CPUs' + wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu', + self.panic_message) + wait_for_console_pattern(self, console_pattern, self.panic_message) + wait_for_console_pattern(self, self.good_message, self.panic_message) + def test_ppc64_linux_smt_boot(self): """ :avocado: tags=arch:ppc64 diff --git a/tests/avocado/replay_kernel.py b/tests/avocado/replay_kernel.py index c37afa662c2..10d99403a4c 100644 --- a/tests/avocado/replay_kernel.py +++ b/tests/avocado/replay_kernel.py @@ -82,7 +82,23 @@ def run_rr(self, kernel_path, kernel_command_line, console_pattern, class ReplayKernelNormal(ReplayKernelBase): - # See https://gitlab.com/qemu-project/qemu/-/issues/2010 + def test_i386_pc(self): + """ + :avocado: tags=arch:i386 + :avocado: tags=machine:pc + """ + kernel_url = ('https://storage.tuxboot.com/20230331/i386/bzImage') + kernel_hash = 'a3e5b32a354729e65910f5a1ffcda7c14a6c12a55e8213fb86e277f1b76ed956' + kernel_path = self.fetch_asset(kernel_url, + asset_hash=kernel_hash, + algorithm = "sha256") + + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + console_pattern = 'VFS: Cannot open root device' + + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) + + # See https://gitlab.com/qemu-project/qemu/-/issues/2094 @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test sometimes gets stuck') def test_x86_64_pc(self): """ @@ -119,8 +135,6 @@ def test_mips_malta(self): self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) - # See https://gitlab.com/qemu-project/qemu/-/issues/2013 - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') def test_mips64el_malta(self): """ This test requires the ar tool to extract "data.tar.gz" from @@ -136,7 +150,6 @@ def test_mips64el_malta(self): :avocado: tags=arch:mips64el :avocado: tags=machine:malta - :avocado: tags=flaky """ deb_url = ('http://snapshot.debian.org/archive/debian/' '20130217T032700Z/pool/main/l/linux-2.6/' @@ -184,13 +197,10 @@ def test_arm_virt(self): self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1) - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - def test_arm_cubieboard_initrd(self): """ :avocado: tags=arch:arm :avocado: tags=machine:cubieboard - :avocado: tags=flaky """ deb_url = ('https://apt.armbian.com/pool/main/l/' 'linux-5.10.16-sunxi/linux-image-current-sunxi_21.02.2_armhf.deb') @@ -338,7 +348,6 @@ def test_m68k_mcf5208evb(self): file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'sanity-clause.elf') - @skip("Test currently broken") # Console stuck as of 5.2-rc1 def test_microblaze_s3adsp1800(self): """ :avocado: tags=arch:microblaze @@ -373,7 +382,6 @@ def test_or1k_sim(self): file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'vmlinux') - @skip("nios2 emulation is buggy under record/replay") def test_nios2_10m50(self): """ :avocado: tags=arch:nios2 diff --git a/tests/avocado/replay_linux.py b/tests/avocado/replay_linux.py index 270ccc1eae8..f3a43dc98c2 100644 --- a/tests/avocado/replay_linux.py +++ b/tests/avocado/replay_linux.py @@ -48,12 +48,15 @@ def vm_add_disk(self, vm, path, id, device): bus_string = '' if self.bus: bus_string = ',bus=%s.%d' % (self.bus, id,) - vm.add_args('-drive', 'file=%s,snapshot,id=disk%s,if=none' % (path, id)) + vm.add_args('-drive', 'file=%s,snapshot=on,id=disk%s,if=none' % (path, id)) vm.add_args('-drive', 'driver=blkreplay,id=disk%s-rr,if=none,image=disk%s' % (id, id)) vm.add_args('-device', '%s,drive=disk%s-rr%s' % (device, id, bus_string)) + def vm_add_cdrom(self, vm, path, id, device): + vm.add_args('-drive', 'file=%s,id=disk%s,if=none,media=cdrom' % (path, id)) + def launch_and_wait(self, record, args, shift): self.require_netdev('user') vm = self.get_vm() @@ -65,7 +68,7 @@ def launch_and_wait(self, record, args, shift): if args: vm.add_args(*args) self.vm_add_disk(vm, self.boot_path, 0, self.hdd) - self.vm_add_disk(vm, self.cloudinit_path, 1, self.cd) + self.vm_add_cdrom(vm, self.cloudinit_path, 1, self.cd) logger = logging.getLogger('replay') if record: logger.info('recording the execution...') @@ -94,7 +97,7 @@ def launch_and_wait(self, record, args, shift): else: vm.event_wait('SHUTDOWN', self.timeout) vm.wait() - logger.info('successfully fihished the replay') + logger.info('successfully finished the replay') elapsed = time.time() - start_time logger.info('elapsed time %.2f sec' % elapsed) return elapsed diff --git a/tests/avocado/reverse_debugging.py b/tests/avocado/reverse_debugging.py index 4cce5a55982..92855a02a54 100644 --- a/tests/avocado/reverse_debugging.py +++ b/tests/avocado/reverse_debugging.py @@ -191,7 +191,7 @@ def reverse_debugging(self, shift=7, args=None): self.check_pc(g, steps[-1]) logger.info('successfully reached %x' % steps[-1]) - logger.info('exitting gdb and qemu') + logger.info('exiting gdb and qemu') vm.shutdown() class ReverseDebugging_X86_64(ReverseDebugging): diff --git a/tests/bench/meson.build b/tests/bench/meson.build index 3c799dbd983..7e76338a52d 100644 --- a/tests/bench/meson.build +++ b/tests/bench/meson.build @@ -3,9 +3,9 @@ qht_bench = executable('qht-bench', sources: 'qht-bench.c', dependencies: [qemuutil]) -qtree_bench = executable('qtree-bench', - sources: 'qtree-bench.c', - dependencies: [qemuutil]) +executable('qtree-bench', + sources: 'qtree-bench.c', + dependencies: [qemuutil]) executable('atomic_add-bench', sources: files('atomic_add-bench.c'), diff --git a/tests/data/acpi/q35/IVRS.ivrs b/tests/data/acpi/q35/IVRS.ivrs index 17611202e53..7f9e91aabc0 100644 Binary files a/tests/data/acpi/q35/IVRS.ivrs and b/tests/data/acpi/q35/IVRS.ivrs differ diff --git a/tests/data/acpi/q35/SSDT.dimmpxm b/tests/data/acpi/q35/SSDT.dimmpxm index 70f133412f5..9ea4e0d0cea 100644 Binary files a/tests/data/acpi/q35/SSDT.dimmpxm and b/tests/data/acpi/q35/SSDT.dimmpxm differ diff --git a/tests/data/acpi/virt/FACP b/tests/data/acpi/virt/FACP index ac05c35a694..da0c3644cc4 100644 Binary files a/tests/data/acpi/virt/FACP and b/tests/data/acpi/virt/FACP differ diff --git a/tests/data/acpi/virt/GTDT b/tests/data/acpi/virt/GTDT index 6f8cb9b8f30..7f330e04d14 100644 Binary files a/tests/data/acpi/virt/GTDT and b/tests/data/acpi/virt/GTDT differ diff --git a/tests/data/smbios/type11_blob b/tests/data/smbios/type11_blob new file mode 100644 index 00000000000..1d8fea4b0c6 Binary files /dev/null and b/tests/data/smbios/type11_blob differ diff --git a/tests/data/smbios/type11_blob.legacy b/tests/data/smbios/type11_blob.legacy new file mode 100644 index 00000000000..aef463aab90 Binary files /dev/null and b/tests/data/smbios/type11_blob.legacy differ diff --git a/tests/docker/dockerfiles/debian-hexagon-cross.docker b/tests/docker/dockerfiles/debian-hexagon-cross.docker index 7c38d7c9e46..60bd8faa20c 100644 --- a/tests/docker/dockerfiles/debian-hexagon-cross.docker +++ b/tests/docker/dockerfiles/debian-hexagon-cross.docker @@ -38,9 +38,9 @@ RUN apt-get update && \ RUN /usr/bin/pip3 install tomli ENV TOOLCHAIN_INSTALL /opt -ENV TOOLCHAIN_RELEASE 16.0.0 +ENV TOOLCHAIN_RELEASE 12.Dec.2023 ENV TOOLCHAIN_BASENAME "clang+llvm-${TOOLCHAIN_RELEASE}-cross-hexagon-unknown-linux-musl" -ENV TOOLCHAIN_URL https://codelinaro.jfrog.io/artifactory/codelinaro-toolchain-for-hexagon/v${TOOLCHAIN_RELEASE}/${TOOLCHAIN_BASENAME}.tar.xz +ENV TOOLCHAIN_URL https://codelinaro.jfrog.io/artifactory/codelinaro-toolchain-for-hexagon/${TOOLCHAIN_RELEASE}/${TOOLCHAIN_BASENAME}.tar.xz ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" RUN curl -#SL "$TOOLCHAIN_URL" | tar -xJC "$TOOLCHAIN_INSTALL" diff --git a/tests/docker/dockerfiles/fedora-win32-cross.docker b/tests/docker/dockerfiles/fedora-win32-cross.docker deleted file mode 100644 index 08799219f98..00000000000 --- a/tests/docker/dockerfiles/fedora-win32-cross.docker +++ /dev/null @@ -1,111 +0,0 @@ -# THIS FILE WAS AUTO-GENERATED -# -# $ lcitool dockerfile --layers all --cross-arch mingw32 fedora-38 qemu -# -# https://gitlab.com/libvirt/libvirt-ci - -FROM registry.fedoraproject.org/fedora:38 - -RUN dnf install -y nosync && \ - printf '#!/bin/sh\n\ -if test -d /usr/lib64\n\ -then\n\ - export LD_PRELOAD=/usr/lib64/nosync/nosync.so\n\ -else\n\ - export LD_PRELOAD=/usr/lib/nosync/nosync.so\n\ -fi\n\ -exec "$@"\n' > /usr/bin/nosync && \ - chmod +x /usr/bin/nosync && \ - nosync dnf update -y && \ - nosync dnf install -y \ - bash \ - bc \ - bison \ - bzip2 \ - ca-certificates \ - ccache \ - ctags \ - dbus-daemon \ - diffutils \ - findutils \ - flex \ - gcc \ - gcovr \ - git \ - glib2-devel \ - glibc-langpack-en \ - hostname \ - llvm \ - make \ - meson \ - mtools \ - ninja-build \ - nmap-ncat \ - openssh-clients \ - pcre-static \ - python3 \ - python3-PyYAML \ - python3-numpy \ - python3-opencv \ - python3-pillow \ - python3-pip \ - python3-sphinx \ - python3-sphinx_rtd_theme \ - sed \ - socat \ - sparse \ - spice-protocol \ - swtpm \ - tar \ - tesseract \ - tesseract-langpack-eng \ - util-linux \ - which \ - xorriso \ - zstd && \ - nosync dnf autoremove -y && \ - nosync dnf clean all -y - -ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" -ENV LANG "en_US.UTF-8" -ENV MAKE "/usr/bin/make" -ENV NINJA "/usr/bin/ninja" -ENV PYTHON "/usr/bin/python3" - -RUN nosync dnf install -y \ - mingw32-SDL2 \ - mingw32-SDL2_image \ - mingw32-bzip2 \ - mingw32-curl \ - mingw32-gcc \ - mingw32-gcc-c++ \ - mingw32-gettext \ - mingw32-glib2 \ - mingw32-gnutls \ - mingw32-gtk3 \ - mingw32-libepoxy \ - mingw32-libgcrypt \ - mingw32-libjpeg-turbo \ - mingw32-libpng \ - mingw32-libtasn1 \ - mingw32-nettle \ - mingw32-nsis \ - mingw32-pixman \ - mingw32-pkg-config && \ - nosync dnf clean all -y && \ - rpm -qa | sort > /packages.txt && \ - mkdir -p /usr/libexec/ccache-wrappers && \ - ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/i686-w64-mingw32-c++ && \ - ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/i686-w64-mingw32-cc && \ - ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/i686-w64-mingw32-g++ && \ - ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/i686-w64-mingw32-gcc - -ENV ABI "i686-w64-mingw32" -ENV MESON_OPTS "--cross-file=/usr/share/mingw/toolchain-mingw32.meson" -ENV QEMU_CONFIGURE_OPTS --cross-prefix=i686-w64-mingw32- -ENV DEF_TARGET_LIST i386-softmmu -# As a final step configure the user (if env is defined) -ARG USER -ARG UID -RUN if [ "${USER}" ]; then \ - id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/fp/meson.build b/tests/fp/meson.build index cbc17392d67..114b4b483ea 100644 --- a/tests/fp/meson.build +++ b/tests/fp/meson.build @@ -1,9 +1,9 @@ -if 'CONFIG_TCG' not in config_all +if 'CONFIG_TCG' not in config_all_accel subdir_done() endif # There are namespace pollution issues on Windows, due to osdep.h # bringing in Windows headers that define a FLOAT128 type. -if targetos == 'windows' +if host_os == 'windows' subdir_done() endif @@ -124,7 +124,7 @@ test('fp-test-mulAdd', fptest, # no fptest_rounding_args args: fptest_args + ['f16_mulAdd', 'f32_mulAdd', 'f64_mulAdd', 'f128_mulAdd'], - suite: ['softfloat-slow', 'softfloat-ops-slow', 'slow'], timeout: 90) + suite: ['softfloat-slow', 'softfloat-ops-slow', 'slow'], timeout: 180) executable( 'fp-bench', diff --git a/tests/guest-debug/run-test.py b/tests/guest-debug/run-test.py index b13b27d4b19..368ff8a8903 100755 --- a/tests/guest-debug/run-test.py +++ b/tests/guest-debug/run-test.py @@ -97,7 +97,12 @@ def log(output, msg): sleep(1) log(output, "GDB CMD: %s" % (gdb_cmd)) - result = subprocess.call(gdb_cmd, shell=True, stdout=output, stderr=stderr) + gdb_env = dict(os.environ) + gdb_pythonpath = gdb_env.get("PYTHONPATH", "").split(os.pathsep) + gdb_pythonpath.append(os.path.dirname(os.path.realpath(__file__))) + gdb_env["PYTHONPATH"] = os.pathsep.join(gdb_pythonpath) + result = subprocess.call(gdb_cmd, shell=True, stdout=output, stderr=stderr, + env=gdb_env) # A result of greater than 128 indicates a fatal signal (likely a # crash due to gdb internal failure). That's a problem for GDB and diff --git a/tests/guest-debug/test_gdbstub.py b/tests/guest-debug/test_gdbstub.py new file mode 100644 index 00000000000..7f71d34da16 --- /dev/null +++ b/tests/guest-debug/test_gdbstub.py @@ -0,0 +1,60 @@ +"""Helper functions for gdbstub testing + +""" +from __future__ import print_function +import gdb +import os +import sys +import traceback + +fail_count = 0 + + +def report(cond, msg): + """Report success/fail of a test""" + if cond: + print("PASS: {}".format(msg)) + else: + print("FAIL: {}".format(msg)) + global fail_count + fail_count += 1 + + +def main(test, expected_arch=None): + """Run a test function + + This runs as the script it sourced (via -x, via run-test.py).""" + try: + inferior = gdb.selected_inferior() + arch = inferior.architecture() + print("ATTACHED: {}".format(arch.name())) + if expected_arch is not None: + report(arch.name() == expected_arch, + "connected to {}".format(expected_arch)) + except (gdb.error, AttributeError): + print("SKIP: not connected") + exit(0) + + if gdb.parse_and_eval("$pc") == 0: + print("SKIP: PC not set") + exit(0) + + try: + test() + except: + print("GDB Exception:") + traceback.print_exc(file=sys.stdout) + global fail_count + fail_count += 1 + if "QEMU_TEST_INTERACTIVE" in os.environ: + import code + code.InteractiveConsole(locals=globals()).interact() + raise + + try: + gdb.execute("kill") + except gdb.error: + pass + + print("All tests complete: {} failures".format(fail_count)) + exit(fail_count) diff --git a/tests/lcitool/refresh b/tests/lcitool/refresh index 0c93557ad67..fe7692c5006 100755 --- a/tests/lcitool/refresh +++ b/tests/lcitool/refresh @@ -192,11 +192,6 @@ try: trailer=cross_build("s390x-linux-gnu-", "s390x-softmmu,s390x-linux-user")) - generate_dockerfile("fedora-win32-cross", "fedora-38", - cross="mingw32", - trailer=cross_build("i686-w64-mingw32-", - "i386-softmmu")) - generate_dockerfile("fedora-win64-cross", "fedora-38", cross="mingw64", trailer=cross_build("x86_64-w64-mingw32-", diff --git a/tests/meson.build b/tests/meson.build index 9996a293fbb..0a6f96f8f84 100644 --- a/tests/meson.build +++ b/tests/meson.build @@ -68,7 +68,7 @@ test_deps = { 'test-qht-par': qht_bench, } -if have_tools and have_vhost_user and targetos == 'linux' +if have_tools and have_vhost_user and host_os == 'linux' executable('vhost-user-bridge', sources: files('vhost-user-bridge.c'), dependencies: [qemuutil, vhost_user]) @@ -76,7 +76,7 @@ endif subdir('decode') -if 'CONFIG_TCG' in config_all +if 'CONFIG_TCG' in config_all_accel subdir('fp') endif diff --git a/tests/migration/i386/Makefile b/tests/migration/i386/Makefile index 5c0324134a7..37a72ae353b 100644 --- a/tests/migration/i386/Makefile +++ b/tests/migration/i386/Makefile @@ -4,9 +4,10 @@ .PHONY: all clean all: a-b-bootblock.h -a-b-bootblock.h: x86.bootsect +a-b-bootblock.h: x86.bootsect x86.o echo "$$__note" > header.tmp xxd -i $< | sed -e 's/.*int.*//' >> header.tmp + nm x86.o | awk '{print "#define SYM_"$$3" 0x"$$1}' >> header.tmp mv header.tmp $@ x86.bootsect: x86.boot @@ -16,7 +17,7 @@ x86.boot: x86.o $(CROSS_PREFIX)objcopy -O binary $< $@ x86.o: a-b-bootblock.S - $(CROSS_PREFIX)gcc -m32 -march=i486 -c $< -o $@ + $(CROSS_PREFIX)gcc -I.. -m32 -march=i486 -c $< -o $@ clean: @rm -rf *.boot *.o *.bootsect diff --git a/tests/migration/i386/a-b-bootblock.S b/tests/migration/i386/a-b-bootblock.S index 6bb9999d601..6f39eb60511 100644 --- a/tests/migration/i386/a-b-bootblock.S +++ b/tests/migration/i386/a-b-bootblock.S @@ -9,6 +9,23 @@ # # Author: dgilbert@redhat.com +#include "migration-test.h" + +#define ACPI_ENABLE 0xf1 +#define ACPI_PORT_SMI_CMD 0xb2 +#define ACPI_PM_BASE 0x600 +#define PM1A_CNT_OFFSET 4 + +#define ACPI_SCI_ENABLE 0x0001 +#define ACPI_SLEEP_TYPE 0x0400 +#define ACPI_SLEEP_ENABLE 0x2000 +#define SLEEP (ACPI_SCI_ENABLE + ACPI_SLEEP_TYPE + ACPI_SLEEP_ENABLE) + +#define LOW_ADDR X86_TEST_MEM_START +#define HIGH_ADDR X86_TEST_MEM_END + +/* Save the suspended status at an address that is not written in the loop. */ +#define suspended (X86_TEST_MEM_START + 4) .code16 .org 0x7c00 @@ -35,8 +52,8 @@ start: # at 0x7c00 ? mov %eax,%ds # Start from 1MB -.set TEST_MEM_START, (1024*1024) -.set TEST_MEM_END, (100*1024*1024) +.set TEST_MEM_START, X86_TEST_MEM_START +.set TEST_MEM_END, X86_TEST_MEM_END mov $65,%ax mov $0x3f8,%dx @@ -69,7 +86,30 @@ innerloop: mov $0x3f8,%dx outb %al,%dx - jmp mainloop + # should this test suspend? + mov (suspend_me),%eax + cmp $0,%eax + je mainloop + + # are we waking after suspend? do not suspend again. + mov $suspended,%eax + mov (%eax),%eax + cmp $1,%eax + je mainloop + + # enable acpi + mov $ACPI_ENABLE,%al + outb %al,$ACPI_PORT_SMI_CMD + + # suspend to ram + mov $suspended,%eax + movl $1,(%eax) + mov $SLEEP,%ax + mov $(ACPI_PM_BASE + PM1A_CNT_OFFSET),%dx + outw %ax,%dx + # not reached. The wakeup causes reset and restart at 0x7c00, and we + # do not save and restore registers as a real kernel would do. + # GDT magic from old (GPLv2) Grub startup.S .p2align 2 /* force 4-byte alignment */ @@ -95,6 +135,10 @@ gdtdesc: .word 0x27 /* limit */ .long gdt /* addr */ + /* test launcher can poke a 1 here to exercise suspend */ +suspend_me: + .int 0 + /* I'm a bootable disk */ .org 0x7dfe .byte 0x55 diff --git a/tests/migration/i386/a-b-bootblock.h b/tests/migration/i386/a-b-bootblock.h index 5b523917cef..c83f8711dbf 100644 --- a/tests/migration/i386/a-b-bootblock.h +++ b/tests/migration/i386/a-b-bootblock.h @@ -4,7 +4,7 @@ * the header and the assembler differences in your patch submission. */ unsigned char x86_bootsect[] = { - 0xfa, 0x0f, 0x01, 0x16, 0x8c, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00, + 0xfa, 0x0f, 0x01, 0x16, 0xb8, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00, 0x0f, 0x22, 0xc0, 0x66, 0xea, 0x20, 0x7c, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0x92, 0x0c, 0x02, 0xe6, 0x92, 0xb8, 0x10, 0x00, 0x00, 0x00, 0x8e, 0xd8, 0x66, 0xb8, 0x41, @@ -13,13 +13,13 @@ unsigned char x86_bootsect[] = { 0x40, 0x06, 0x7c, 0xf1, 0xb8, 0x00, 0x00, 0x10, 0x00, 0xfe, 0x00, 0x05, 0x00, 0x10, 0x00, 0x00, 0x3d, 0x00, 0x00, 0x40, 0x06, 0x7c, 0xf2, 0xfe, 0xc3, 0x80, 0xe3, 0x3f, 0x75, 0xe6, 0x66, 0xb8, 0x42, 0x00, 0x66, 0xba, - 0xf8, 0x03, 0xee, 0xeb, 0xdb, 0x8d, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x9a, 0xcf, 0x00, - 0xff, 0xff, 0x00, 0x00, 0x00, 0x92, 0xcf, 0x00, 0x27, 0x00, 0x74, 0x7c, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf8, 0x03, 0xee, 0xa1, 0xbe, 0x7c, 0x00, 0x00, 0x83, 0xf8, 0x00, 0x74, + 0xd3, 0xb8, 0x04, 0x00, 0x10, 0x00, 0x8b, 0x00, 0x83, 0xf8, 0x01, 0x74, + 0xc7, 0xb0, 0xf1, 0xe6, 0xb2, 0xb8, 0x04, 0x00, 0x10, 0x00, 0xc7, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x66, 0xb8, 0x01, 0x24, 0x66, 0xba, 0x04, 0x06, + 0x66, 0xef, 0x66, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x00, 0x00, 0x9a, 0xcf, 0x00, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x92, 0xcf, 0x00, 0x27, 0x00, 0xa0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -49,3 +49,13 @@ unsigned char x86_bootsect[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0xaa }; +#define SYM_do_zero 0x00007c3d +#define SYM_gdt 0x00007ca0 +#define SYM_gdtdesc 0x00007cb8 +#define SYM_innerloop 0x00007c51 +#define SYM_mainloop 0x00007c4c +#define SYM_pre_zero 0x00007c38 +#define SYM_start 0x00007c00 +#define SYM_suspend_me 0x00007cbe +#define SYM_TEST_MEM_END 0x06400000 +#define SYM_TEST_MEM_START 0x00100000 diff --git a/tests/plugin/bb.c b/tests/plugin/bb.c index df50d1fd3bc..36776dee1e1 100644 --- a/tests/plugin/bb.c +++ b/tests/plugin/bb.c @@ -17,27 +17,25 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; typedef struct { - GMutex lock; - int index; uint64_t bb_count; uint64_t insn_count; } CPUCount; -/* Used by the inline & linux-user counts */ -static bool do_inline; -static CPUCount inline_count; +static struct qemu_plugin_scoreboard *counts; +static qemu_plugin_u64 bb_count; +static qemu_plugin_u64 insn_count; +static bool do_inline; /* Dump running CPU total on idle? */ static bool idle_report; -static GPtrArray *counts; -static int max_cpus; -static void gen_one_cpu_report(CPUCount *count, GString *report) +static void gen_one_cpu_report(CPUCount *count, GString *report, + unsigned int cpu_index) { if (count->bb_count) { g_string_append_printf(report, "CPU%d: " "bb's: %" PRIu64", insns: %" PRIu64 "\n", - count->index, + cpu_index, count->bb_count, count->insn_count); } } @@ -46,20 +44,23 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) { g_autoptr(GString) report = g_string_new(""); - if (do_inline || !max_cpus) { - g_string_printf(report, "bb's: %" PRIu64", insns: %" PRIu64 "\n", - inline_count.bb_count, inline_count.insn_count); - } else { - g_ptr_array_foreach(counts, (GFunc) gen_one_cpu_report, report); + for (int i = 0; i < qemu_plugin_num_vcpus(); ++i) { + CPUCount *count = qemu_plugin_scoreboard_find(counts, i); + gen_one_cpu_report(count, report, i); } + g_string_append_printf(report, "Total: " + "bb's: %" PRIu64", insns: %" PRIu64 "\n", + qemu_plugin_u64_sum(bb_count), + qemu_plugin_u64_sum(insn_count)); qemu_plugin_outs(report->str); + qemu_plugin_scoreboard_free(counts); } static void vcpu_idle(qemu_plugin_id_t id, unsigned int cpu_index) { - CPUCount *count = g_ptr_array_index(counts, cpu_index); + CPUCount *count = qemu_plugin_scoreboard_find(counts, cpu_index); g_autoptr(GString) report = g_string_new(""); - gen_one_cpu_report(count, report); + gen_one_cpu_report(count, report, cpu_index); if (report->len > 0) { g_string_prepend(report, "Idling "); @@ -69,14 +70,11 @@ static void vcpu_idle(qemu_plugin_id_t id, unsigned int cpu_index) static void vcpu_tb_exec(unsigned int cpu_index, void *udata) { - CPUCount *count = max_cpus ? - g_ptr_array_index(counts, cpu_index) : &inline_count; + CPUCount *count = qemu_plugin_scoreboard_find(counts, cpu_index); uintptr_t n_insns = (uintptr_t)udata; - g_mutex_lock(&count->lock); count->insn_count += n_insns; count->bb_count++; - g_mutex_unlock(&count->lock); } static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) @@ -84,11 +82,10 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) size_t n_insns = qemu_plugin_tb_n_insns(tb); if (do_inline) { - qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64, - &inline_count.bb_count, 1); - qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64, - &inline_count.insn_count, - n_insns); + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_ADD_U64, bb_count, 1); + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, n_insns); } else { qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, QEMU_PLUGIN_CB_NO_REGS, @@ -121,18 +118,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, } } - if (info->system_emulation && !do_inline) { - max_cpus = info->system.max_vcpus; - counts = g_ptr_array_new(); - for (i = 0; i < max_cpus; i++) { - CPUCount *count = g_new0(CPUCount, 1); - g_mutex_init(&count->lock); - count->index = i; - g_ptr_array_add(counts, count); - } - } else if (!do_inline) { - g_mutex_init(&inline_count.lock); - } + counts = qemu_plugin_scoreboard_new(sizeof(CPUCount)); + bb_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, bb_count); + insn_count = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, insn_count); if (idle_report) { qemu_plugin_register_vcpu_idle_cb(id, vcpu_idle); diff --git a/tests/plugin/inline.c b/tests/plugin/inline.c new file mode 100644 index 00000000000..0163e9b51c5 --- /dev/null +++ b/tests/plugin/inline.c @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2023, Pierrick Bouvier + * + * Demonstrates and tests usage of inline ops. + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include +#include +#include + +#include + +typedef struct { + uint64_t count_tb; + uint64_t count_tb_inline; + uint64_t count_insn; + uint64_t count_insn_inline; + uint64_t count_mem; + uint64_t count_mem_inline; +} CPUCount; + +static struct qemu_plugin_scoreboard *counts; +static qemu_plugin_u64 count_tb; +static qemu_plugin_u64 count_tb_inline; +static qemu_plugin_u64 count_insn; +static qemu_plugin_u64 count_insn_inline; +static qemu_plugin_u64 count_mem; +static qemu_plugin_u64 count_mem_inline; + +static uint64_t global_count_tb; +static uint64_t global_count_insn; +static uint64_t global_count_mem; +static unsigned int max_cpu_index; +static GMutex tb_lock; +static GMutex insn_lock; +static GMutex mem_lock; + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; + +static void stats_insn(void) +{ + const uint64_t expected = global_count_insn; + const uint64_t per_vcpu = qemu_plugin_u64_sum(count_insn); + const uint64_t inl_per_vcpu = + qemu_plugin_u64_sum(count_insn_inline); + printf("insn: %" PRIu64 "\n", expected); + printf("insn: %" PRIu64 " (per vcpu)\n", per_vcpu); + printf("insn: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu); + g_assert(expected > 0); + g_assert(per_vcpu == expected); + g_assert(inl_per_vcpu == expected); +} + +static void stats_tb(void) +{ + const uint64_t expected = global_count_tb; + const uint64_t per_vcpu = qemu_plugin_u64_sum(count_tb); + const uint64_t inl_per_vcpu = + qemu_plugin_u64_sum(count_tb_inline); + printf("tb: %" PRIu64 "\n", expected); + printf("tb: %" PRIu64 " (per vcpu)\n", per_vcpu); + printf("tb: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu); + g_assert(expected > 0); + g_assert(per_vcpu == expected); + g_assert(inl_per_vcpu == expected); +} + +static void stats_mem(void) +{ + const uint64_t expected = global_count_mem; + const uint64_t per_vcpu = qemu_plugin_u64_sum(count_mem); + const uint64_t inl_per_vcpu = + qemu_plugin_u64_sum(count_mem_inline); + printf("mem: %" PRIu64 "\n", expected); + printf("mem: %" PRIu64 " (per vcpu)\n", per_vcpu); + printf("mem: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu); + g_assert(expected > 0); + g_assert(per_vcpu == expected); + g_assert(inl_per_vcpu == expected); +} + +static void plugin_exit(qemu_plugin_id_t id, void *udata) +{ + const unsigned int num_cpus = qemu_plugin_num_vcpus(); + g_assert(num_cpus == max_cpu_index + 1); + + for (int i = 0; i < num_cpus ; ++i) { + const uint64_t tb = qemu_plugin_u64_get(count_tb, i); + const uint64_t tb_inline = qemu_plugin_u64_get(count_tb_inline, i); + const uint64_t insn = qemu_plugin_u64_get(count_insn, i); + const uint64_t insn_inline = qemu_plugin_u64_get(count_insn_inline, i); + const uint64_t mem = qemu_plugin_u64_get(count_mem, i); + const uint64_t mem_inline = qemu_plugin_u64_get(count_mem_inline, i); + printf("cpu %d: tb (%" PRIu64 ", %" PRIu64 ") | " + "insn (%" PRIu64 ", %" PRIu64 ") | " + "mem (%" PRIu64 ", %" PRIu64 ")" + "\n", + i, tb, tb_inline, insn, insn_inline, mem, mem_inline); + g_assert(tb == tb_inline); + g_assert(insn == insn_inline); + g_assert(mem == mem_inline); + } + + stats_tb(); + stats_insn(); + stats_mem(); + + qemu_plugin_scoreboard_free(counts); +} + +static void vcpu_tb_exec(unsigned int cpu_index, void *udata) +{ + qemu_plugin_u64_add(count_tb, cpu_index, 1); + g_mutex_lock(&tb_lock); + max_cpu_index = MAX(max_cpu_index, cpu_index); + global_count_tb++; + g_mutex_unlock(&tb_lock); +} + +static void vcpu_insn_exec(unsigned int cpu_index, void *udata) +{ + qemu_plugin_u64_add(count_insn, cpu_index, 1); + g_mutex_lock(&insn_lock); + global_count_insn++; + g_mutex_unlock(&insn_lock); +} + +static void vcpu_mem_access(unsigned int cpu_index, + qemu_plugin_meminfo_t info, + uint64_t vaddr, + void *userdata) +{ + qemu_plugin_u64_add(count_mem, cpu_index, 1); + g_mutex_lock(&mem_lock); + global_count_mem++; + g_mutex_unlock(&mem_lock); +} + +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + qemu_plugin_register_vcpu_tb_exec_cb( + tb, vcpu_tb_exec, QEMU_PLUGIN_CB_NO_REGS, 0); + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_ADD_U64, count_tb_inline, 1); + + for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) { + struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx); + qemu_plugin_register_vcpu_insn_exec_cb( + insn, vcpu_insn_exec, QEMU_PLUGIN_CB_NO_REGS, 0); + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + insn, QEMU_PLUGIN_INLINE_ADD_U64, count_insn_inline, 1); + qemu_plugin_register_vcpu_mem_cb(insn, &vcpu_mem_access, + QEMU_PLUGIN_CB_NO_REGS, + QEMU_PLUGIN_MEM_RW, 0); + qemu_plugin_register_vcpu_mem_inline_per_vcpu( + insn, QEMU_PLUGIN_MEM_RW, + QEMU_PLUGIN_INLINE_ADD_U64, + count_mem_inline, 1); + } +} + +QEMU_PLUGIN_EXPORT +int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, + int argc, char **argv) +{ + counts = qemu_plugin_scoreboard_new(sizeof(CPUCount)); + count_tb = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, count_tb); + count_insn = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, count_insn); + count_mem = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, count_mem); + count_tb_inline = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, count_tb_inline); + count_insn_inline = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, count_insn_inline); + count_mem_inline = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, count_mem_inline); + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); + qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); + + return 0; +} diff --git a/tests/plugin/insn.c b/tests/plugin/insn.c index 5fd3017c2b3..5e0aa03223e 100644 --- a/tests/plugin/insn.c +++ b/tests/plugin/insn.c @@ -16,25 +16,21 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; -#define MAX_CPUS 8 /* lets not go nuts */ - -typedef struct { - uint64_t insn_count; -} InstructionCount; - -static InstructionCount counts[MAX_CPUS]; -static uint64_t inline_insn_count; +static qemu_plugin_u64 insn_count; static bool do_inline; static bool do_size; static GArray *sizes; +typedef struct { + uint64_t hits; + uint64_t last_hit; + uint64_t total_delta; +} MatchCount; + typedef struct { char *match_string; - uint64_t hits[MAX_CPUS]; - uint64_t last_hit[MAX_CPUS]; - uint64_t total_delta[MAX_CPUS]; - GPtrArray *history[MAX_CPUS]; + struct qemu_plugin_scoreboard *counts; /* MatchCount */ } Match; static GArray *matches; @@ -46,43 +42,61 @@ typedef struct { char *disas; } Instruction; -static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata) +/* + * Initialise a new vcpu with reading the register list + */ +static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index) { - unsigned int i = cpu_index % MAX_CPUS; - InstructionCount *c = &counts[i]; + g_autoptr(GArray) reg_list = qemu_plugin_get_registers(); + g_autoptr(GByteArray) reg_value = g_byte_array_new(); + + if (reg_list) { + for (int i = 0; i < reg_list->len; i++) { + qemu_plugin_reg_descriptor *rd = &g_array_index( + reg_list, qemu_plugin_reg_descriptor, i); + int count = qemu_plugin_read_register(rd->handle, reg_value); + g_assert(count > 0); + } + } +} + - c->insn_count++; +static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata) +{ + qemu_plugin_u64_add(insn_count, cpu_index, 1); } static void vcpu_insn_matched_exec_before(unsigned int cpu_index, void *udata) { - unsigned int i = cpu_index % MAX_CPUS; Instruction *insn = (Instruction *) udata; - Match *match = insn->match; + Match *insn_match = insn->match; + MatchCount *match = qemu_plugin_scoreboard_find(insn_match->counts, + cpu_index); + g_autoptr(GString) ts = g_string_new(""); insn->hits++; g_string_append_printf(ts, "0x%" PRIx64 ", '%s', %"PRId64 " hits", insn->vaddr, insn->disas, insn->hits); - uint64_t icount = counts[i].insn_count; - uint64_t delta = icount - match->last_hit[i]; + uint64_t icount = qemu_plugin_u64_get(insn_count, cpu_index); + uint64_t delta = icount - match->last_hit; - match->hits[i]++; - match->total_delta[i] += delta; + match->hits++; + match->total_delta += delta; g_string_append_printf(ts, - ", %"PRId64" match hits, " - "Δ+%"PRId64 " since last match," + " , cpu %u," + " %"PRId64" match hits," + " Δ+%"PRId64 " since last match," " %"PRId64 " avg insns/match\n", - match->hits[i], delta, - match->total_delta[i] / match->hits[i]); + cpu_index, + match->hits, delta, + match->total_delta / match->hits); - match->last_hit[i] = icount; + match->last_hit = icount; qemu_plugin_outs(ts->str); - - g_ptr_array_add(match->history[i], insn); } static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) @@ -94,8 +108,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); if (do_inline) { - qemu_plugin_register_vcpu_insn_exec_inline( - insn, QEMU_PLUGIN_INLINE_ADD_U64, &inline_insn_count, 1); + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, 1); } else { uint64_t vaddr = qemu_plugin_insn_vaddr(insn); qemu_plugin_register_vcpu_insn_exec_cb( @@ -117,10 +131,9 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) * information about the instruction which we also need to * save if there is a hit. */ - if (matches) { + if (matches->len) { char *insn_disas = qemu_plugin_insn_disas(insn); - int j; - for (j = 0; j < matches->len; j++) { + for (int j = 0; j < matches->len; j++) { Match *m = &g_array_index(matches, Match, j); if (g_str_has_prefix(insn_disas, m->match_string)) { Instruction *rec = g_new0(Instruction, 1); @@ -150,36 +163,33 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) "len %d bytes: %ld insns\n", i, *cnt); } } - } else if (do_inline) { - g_string_append_printf(out, "insns: %" PRIu64 "\n", inline_insn_count); } else { - uint64_t total_insns = 0; - for (i = 0; i < MAX_CPUS; i++) { - InstructionCount *c = &counts[i]; - if (c->insn_count) { - g_string_append_printf(out, "cpu %d insns: %" PRIu64 "\n", - i, c->insn_count); - total_insns += c->insn_count; - } + for (i = 0; i < qemu_plugin_num_vcpus(); i++) { + g_string_append_printf(out, "cpu %d insns: %" PRIu64 "\n", + i, qemu_plugin_u64_get(insn_count, i)); } g_string_append_printf(out, "total insns: %" PRIu64 "\n", - total_insns); + qemu_plugin_u64_sum(insn_count)); } qemu_plugin_outs(out->str); + + qemu_plugin_scoreboard_free(insn_count.score); + for (i = 0; i < matches->len; ++i) { + Match *m = &g_array_index(matches, Match, i); + g_free(m->match_string); + qemu_plugin_scoreboard_free(m->counts); + } + g_array_free(matches, TRUE); + g_array_free(sizes, TRUE); } /* Add a match to the array of matches */ static void parse_match(char *match) { - Match new_match = { .match_string = match }; - int i; - for (i = 0; i < MAX_CPUS; i++) { - new_match.history[i] = g_ptr_array_new(); - } - if (!matches) { - matches = g_array_new(false, true, sizeof(Match)); - } + Match new_match = { + .match_string = g_strdup(match), + .counts = qemu_plugin_scoreboard_new(sizeof(MatchCount)) }; g_array_append_val(matches, new_match); } @@ -187,6 +197,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { + matches = g_array_new(false, true, sizeof(Match)); + /* null terminated so 0 is not a special case */ + sizes = g_array_new(true, true, sizeof(unsigned long)); + for (int i = 0; i < argc; i++) { char *opt = argv[i]; g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); @@ -208,10 +222,11 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, } } - if (do_size) { - sizes = g_array_new(true, true, sizeof(unsigned long)); - } + insn_count = qemu_plugin_scoreboard_u64( + qemu_plugin_scoreboard_new(sizeof(uint64_t))); + /* Register init, translation block and exit callbacks */ + qemu_plugin_register_vcpu_init_cb(id, vcpu_init); qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); return 0; diff --git a/tests/plugin/mem.c b/tests/plugin/mem.c index 44e91065ba7..b650dddcce1 100644 --- a/tests/plugin/mem.c +++ b/tests/plugin/mem.c @@ -16,9 +16,14 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; -static uint64_t inline_mem_count; -static uint64_t cb_mem_count; -static uint64_t io_count; +typedef struct { + uint64_t mem_count; + uint64_t io_count; +} CPUCount; + +static struct qemu_plugin_scoreboard *counts; +static qemu_plugin_u64 mem_count; +static qemu_plugin_u64 io_count; static bool do_inline, do_callback; static bool do_haddr; static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW; @@ -27,16 +32,16 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) { g_autoptr(GString) out = g_string_new(""); - if (do_inline) { - g_string_printf(out, "inline mem accesses: %" PRIu64 "\n", inline_mem_count); - } - if (do_callback) { - g_string_append_printf(out, "callback mem accesses: %" PRIu64 "\n", cb_mem_count); + if (do_inline || do_callback) { + g_string_printf(out, "mem accesses: %" PRIu64 "\n", + qemu_plugin_u64_sum(mem_count)); } if (do_haddr) { - g_string_append_printf(out, "io accesses: %" PRIu64 "\n", io_count); + g_string_append_printf(out, "io accesses: %" PRIu64 "\n", + qemu_plugin_u64_sum(io_count)); } qemu_plugin_outs(out->str); + qemu_plugin_scoreboard_free(counts); } static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, @@ -46,12 +51,12 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, struct qemu_plugin_hwaddr *hwaddr; hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr); if (qemu_plugin_hwaddr_is_io(hwaddr)) { - io_count++; + qemu_plugin_u64_add(io_count, cpu_index, 1); } else { - cb_mem_count++; + qemu_plugin_u64_add(mem_count, cpu_index, 1); } } else { - cb_mem_count++; + qemu_plugin_u64_add(mem_count, cpu_index, 1); } } @@ -64,9 +69,10 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); if (do_inline) { - qemu_plugin_register_vcpu_mem_inline(insn, rw, - QEMU_PLUGIN_INLINE_ADD_U64, - &inline_mem_count, 1); + qemu_plugin_register_vcpu_mem_inline_per_vcpu( + insn, rw, + QEMU_PLUGIN_INLINE_ADD_U64, + mem_count, 1); } if (do_callback) { qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem, @@ -117,6 +123,16 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, } } + if (do_inline && do_callback) { + fprintf(stderr, + "can't enable inline and callback counting at the same time\n"); + return -1; + } + + counts = qemu_plugin_scoreboard_new(sizeof(CPUCount)); + mem_count = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, mem_count); + io_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, io_count); qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); return 0; diff --git a/tests/plugin/meson.build b/tests/plugin/meson.build index 28a929dbcc0..9eece5bab51 100644 --- a/tests/plugin/meson.build +++ b/tests/plugin/meson.build @@ -1,7 +1,7 @@ t = [] if get_option('plugins') - foreach i : ['bb', 'empty', 'insn', 'mem', 'syscall'] - if targetos == 'windows' + foreach i : ['bb', 'empty', 'inline', 'insn', 'mem', 'syscall'] + if host_os == 'windows' t += shared_module(i, files(i + '.c') + '../../contrib/plugins/win32_linker.c', include_directories: '../../include/qemu', link_depends: [win32_qemu_plugin_api_lib], diff --git a/tests/qapi-schema/doc-bad-alternate-member.err b/tests/qapi-schema/doc-bad-alternate-member.err index d7286bb57c4..1f6b7d2fe40 100644 --- a/tests/qapi-schema/doc-bad-alternate-member.err +++ b/tests/qapi-schema/doc-bad-alternate-member.err @@ -1 +1 @@ -doc-bad-alternate-member.json:3: documented members 'aa', 'bb' do not exist +doc-bad-alternate-member.json:7: documented members 'aa', 'bb' do not exist diff --git a/tests/qapi-schema/doc-bad-alternate-member.json b/tests/qapi-schema/doc-bad-alternate-member.json index fa4143da4c4..37593b6698c 100644 --- a/tests/qapi-schema/doc-bad-alternate-member.json +++ b/tests/qapi-schema/doc-bad-alternate-member.json @@ -2,6 +2,8 @@ ## # @AorB: +# @a: a +# @b: b # @aa: a # @bb: b ## diff --git a/tests/qapi-schema/doc-bad-boxed-command-arg.err b/tests/qapi-schema/doc-bad-boxed-command-arg.err index 7137af3ec92..d6793e7fd84 100644 --- a/tests/qapi-schema/doc-bad-boxed-command-arg.err +++ b/tests/qapi-schema/doc-bad-boxed-command-arg.err @@ -1 +1 @@ -doc-bad-boxed-command-arg.json:9: documented member 'a' does not exist +doc-bad-boxed-command-arg.json:11: documented member 'a' does not exist diff --git a/tests/qapi-schema/doc-bad-command-arg.err b/tests/qapi-schema/doc-bad-command-arg.err index 18ed076cef6..b76167ec60b 100644 --- a/tests/qapi-schema/doc-bad-command-arg.err +++ b/tests/qapi-schema/doc-bad-command-arg.err @@ -1 +1 @@ -doc-bad-command-arg.json:3: documented member 'b' does not exist +doc-bad-command-arg.json:6: documented member 'b' does not exist diff --git a/tests/qapi-schema/doc-bad-enum-member.err b/tests/qapi-schema/doc-bad-enum-member.err index 7efeb473635..0aa8d8e8e20 100644 --- a/tests/qapi-schema/doc-bad-enum-member.err +++ b/tests/qapi-schema/doc-bad-enum-member.err @@ -1 +1 @@ -doc-bad-enum-member.json:3: documented member 'a' does not exist +doc-bad-enum-member.json:5: documented member 'a' does not exist diff --git a/tests/qapi-schema/doc-bad-event-arg.err b/tests/qapi-schema/doc-bad-event-arg.err index d13cacf21f0..90527d5f827 100644 --- a/tests/qapi-schema/doc-bad-event-arg.err +++ b/tests/qapi-schema/doc-bad-event-arg.err @@ -1 +1 @@ -doc-bad-event-arg.json:3: documented member 'a' does not exist +doc-bad-event-arg.json:5: documented member 'a' does not exist diff --git a/tests/qapi-schema/doc-bad-feature.err b/tests/qapi-schema/doc-bad-feature.err index 49d1746c3d1..3166c6a305a 100644 --- a/tests/qapi-schema/doc-bad-feature.err +++ b/tests/qapi-schema/doc-bad-feature.err @@ -1 +1 @@ -doc-bad-feature.json:3: documented feature 'a' does not exist +doc-bad-feature.json:7: documented feature 'a' does not exist diff --git a/tests/qapi-schema/doc-bad-union-member.err b/tests/qapi-schema/doc-bad-union-member.err index 6dd2726a656..cdf1225cab3 100644 --- a/tests/qapi-schema/doc-bad-union-member.err +++ b/tests/qapi-schema/doc-bad-union-member.err @@ -1 +1 @@ -doc-bad-union-member.json:3: documented members 'a', 'b' do not exist +doc-bad-union-member.json:5: documented members 'a', 'b' do not exist diff --git a/tests/qapi-schema/doc-duplicate-features.err b/tests/qapi-schema/doc-duplicate-features.err new file mode 100644 index 00000000000..cadb2957a6d --- /dev/null +++ b/tests/qapi-schema/doc-duplicate-features.err @@ -0,0 +1 @@ +doc-duplicate-features.json:9:1: duplicated 'Features:' line diff --git a/tests/qapi-schema/doc-duplicate-features.json b/tests/qapi-schema/doc-duplicate-features.json new file mode 100644 index 00000000000..a4d559e7408 --- /dev/null +++ b/tests/qapi-schema/doc-duplicate-features.json @@ -0,0 +1,11 @@ +# Duplicate 'Features:' line + +## +# @foo: +# +# Features: +# @feat: mumble +# +# Features: +## +{ 'command': 'foo', 'features': ['feat'] } diff --git a/tests/qapi-schema/doc-duplicate-features.out b/tests/qapi-schema/doc-duplicate-features.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/qapi-schema/doc-duplicated-arg.err b/tests/qapi-schema/doc-duplicated-arg.err index 0d0d777a1f9..d876312734d 100644 --- a/tests/qapi-schema/doc-duplicated-arg.err +++ b/tests/qapi-schema/doc-duplicated-arg.err @@ -1 +1 @@ -doc-duplicated-arg.json:6:1: 'a' parameter name duplicated +doc-duplicated-arg.json:6: 'a' parameter name duplicated diff --git a/tests/qapi-schema/doc-duplicated-return.err b/tests/qapi-schema/doc-duplicated-return.err index fe97e3db8dc..503b916b256 100644 --- a/tests/qapi-schema/doc-duplicated-return.err +++ b/tests/qapi-schema/doc-duplicated-return.err @@ -1 +1 @@ -doc-duplicated-return.json:7:1: duplicated 'Returns' section +doc-duplicated-return.json:8: duplicated 'Returns' section diff --git a/tests/qapi-schema/doc-duplicated-return.json b/tests/qapi-schema/doc-duplicated-return.json index b44b5ae979b..4e1ec2ef42f 100644 --- a/tests/qapi-schema/doc-duplicated-return.json +++ b/tests/qapi-schema/doc-duplicated-return.json @@ -4,5 +4,6 @@ # @foo: # # Returns: 0 +# # Returns: 1 ## diff --git a/tests/qapi-schema/doc-duplicated-since.err b/tests/qapi-schema/doc-duplicated-since.err index abca141a2c6..a9b60c0c3de 100644 --- a/tests/qapi-schema/doc-duplicated-since.err +++ b/tests/qapi-schema/doc-duplicated-since.err @@ -1 +1 @@ -doc-duplicated-since.json:7:1: duplicated 'Since' section +doc-duplicated-since.json:8: duplicated 'Since' section diff --git a/tests/qapi-schema/doc-duplicated-since.json b/tests/qapi-schema/doc-duplicated-since.json index 343cd872cb5..2755f95719b 100644 --- a/tests/qapi-schema/doc-duplicated-since.json +++ b/tests/qapi-schema/doc-duplicated-since.json @@ -4,5 +4,6 @@ # @foo: # # Since: 0 +# # Since: 1 ## diff --git a/tests/qapi-schema/doc-empty-arg.err b/tests/qapi-schema/doc-empty-arg.err index 2d0f35f310f..83f4fc66d54 100644 --- a/tests/qapi-schema/doc-empty-arg.err +++ b/tests/qapi-schema/doc-empty-arg.err @@ -1 +1 @@ -doc-empty-arg.json:5:1: invalid parameter name +doc-empty-arg.json:5: invalid parameter name diff --git a/tests/qapi-schema/doc-empty-features.err b/tests/qapi-schema/doc-empty-features.err new file mode 100644 index 00000000000..2709a18d8f6 --- /dev/null +++ b/tests/qapi-schema/doc-empty-features.err @@ -0,0 +1 @@ +doc-empty-features.json:8:1: feature descriptions expected diff --git a/tests/qapi-schema/doc-empty-features.json b/tests/qapi-schema/doc-empty-features.json new file mode 100644 index 00000000000..06f814e45d7 --- /dev/null +++ b/tests/qapi-schema/doc-empty-features.json @@ -0,0 +1,10 @@ +# 'Features:' line not followed by feature descriptions + +## +# @foo: +# +# Features: +# +# not a description +## +{ 'command': 'foo' } diff --git a/tests/qapi-schema/doc-empty-features.out b/tests/qapi-schema/doc-empty-features.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/qapi-schema/doc-empty-section.err b/tests/qapi-schema/doc-empty-section.err index ba7ba701258..5f03a6d733f 100644 --- a/tests/qapi-schema/doc-empty-section.err +++ b/tests/qapi-schema/doc-empty-section.err @@ -1 +1 @@ -doc-empty-section.json:7:1: empty doc section 'Note' +doc-empty-section.json:6: text required after 'Note:' diff --git a/tests/qapi-schema/doc-good.json b/tests/qapi-schema/doc-good.json index 354dfdf4616..de38a386e8f 100644 --- a/tests/qapi-schema/doc-good.json +++ b/tests/qapi-schema/doc-good.json @@ -3,11 +3,15 @@ # # Positive QAPI doc comment tests -{ 'pragma': { 'doc-required': true } } +{ 'pragma': { + 'doc-required': true, + 'documentation-exceptions': [ 'Enum', 'Variant1', 'Alternate', 'cmd' ] } } ## # = Section -# +## + +## # == Subsection # # *with emphasis* @@ -73,8 +77,8 @@ # @Base: # # @base1: -# description starts on a new line, -# not indented +# description starts on a new line, +# minimally indented ## { 'struct': 'Base', 'data': { 'base1': 'Enum' }, 'if': { 'all': ['IFALL1', 'IFALL2'] } } @@ -150,22 +154,31 @@ # Features: # @cmd-feat1: a feature # @cmd-feat2: another feature +# # Note: @arg3 is undocumented +# # Returns: @Object +# +# Errors: some +# # TODO: frobnicate +# # Notes: # -# - Lorem ipsum dolor sit amet -# - Ut enim ad minim veniam +# - Lorem ipsum dolor sit amet +# - Ut enim ad minim veniam +# +# Duis aute irure dolor # -# Duis aute irure dolor # Example: # -# -> in -# <- out +# -> in +# <- out +# # Examples: -# - *verbatim* -# - {braces} +# - *verbatim* +# - {braces} +# # Since: 2.10 ## { 'command': 'cmd', @@ -176,14 +189,16 @@ ## # @cmd-boxed: # If you're bored enough to read this, go see a video of boxed cats +# # Features: # @cmd-feat1: a feature # @cmd-feat2: another feature +# # Example: # -# -> in +# -> in # -# <- out +# <- out ## { 'command': 'cmd-boxed', 'boxed': true, 'data': 'Object', diff --git a/tests/qapi-schema/doc-good.out b/tests/qapi-schema/doc-good.out index 24d9ea954db..716a9a41026 100644 --- a/tests/qapi-schema/doc-good.out +++ b/tests/qapi-schema/doc-good.out @@ -118,7 +118,7 @@ doc symbol=Base arg=base1 description starts on a new line, -not indented +minimally indented doc symbol=Variant1 body= A paragraph @@ -173,6 +173,8 @@ another feature @arg3 is undocumented section=Returns @Object + section=Errors +some section=TODO frobnicate section=Notes diff --git a/tests/qapi-schema/doc-good.txt b/tests/qapi-schema/doc-good.txt index b3b76bd43fd..847db70412d 100644 --- a/tests/qapi-schema/doc-good.txt +++ b/tests/qapi-schema/doc-good.txt @@ -44,7 +44,7 @@ Values ~~~~~~ "one" (**If: **"IFONE") - The _one_ {and only} + The _one_ {and only}, description on the same line "two" Not documented @@ -76,7 +76,7 @@ Members ~~~~~~~ "base1": "Enum" - the first member + description starts on a new line, minimally indented If @@ -90,7 +90,9 @@ If A paragraph -Another paragraph (but no "var": line) +Another paragraph + +"var1" is undocumented Members @@ -141,7 +143,8 @@ Members ~~~~~~~ "i": "int" - an integer "b" is undocumented + description starts on the same line remainder indented the same "b" + is undocumented "b": "boolean" Not documented @@ -172,10 +175,10 @@ Arguments ~~~~~~~~~ "arg1": "int" - the first argument + description starts on a new line, indented "arg2": "string" (optional) - the second argument + description starts on the same line remainder indented differently "arg3": "boolean" Not documented @@ -203,10 +206,10 @@ Returns "Object" -TODO -~~~~ +Errors +~~~~~~ -frobnicate +some Notes diff --git a/tests/qapi-schema/doc-invalid-return.err b/tests/qapi-schema/doc-invalid-return.err index 2ad89c59411..aafd57b135a 100644 --- a/tests/qapi-schema/doc-invalid-return.err +++ b/tests/qapi-schema/doc-invalid-return.err @@ -1 +1 @@ -doc-invalid-return.json:3: 'Returns:' is only valid for commands +doc-invalid-return.json:6: 'Returns' section is only valid for commands diff --git a/tests/qapi-schema/doc-invalid-return.json b/tests/qapi-schema/doc-invalid-return.json index 95e75839307..1aabef3482b 100644 --- a/tests/qapi-schema/doc-invalid-return.json +++ b/tests/qapi-schema/doc-invalid-return.json @@ -2,6 +2,7 @@ ## # @FOO: +# # Returns: blah ## { 'event': 'FOO' } diff --git a/tests/qapi-schema/doc-invalid-return2.err b/tests/qapi-schema/doc-invalid-return2.err new file mode 100644 index 00000000000..c3d0c7a4527 --- /dev/null +++ b/tests/qapi-schema/doc-invalid-return2.err @@ -0,0 +1 @@ +doc-invalid-return2.json:5: 'Returns' section, but command doesn't return anything diff --git a/tests/qapi-schema/doc-invalid-return2.json b/tests/qapi-schema/doc-invalid-return2.json new file mode 100644 index 00000000000..37883d4fea4 --- /dev/null +++ b/tests/qapi-schema/doc-invalid-return2.json @@ -0,0 +1,7 @@ +# Command doesn't return anything + +## +# @foo: +# Returns: blah +## +{ 'command': 'foo' } diff --git a/tests/qapi-schema/doc-invalid-return2.out b/tests/qapi-schema/doc-invalid-return2.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/qapi-schema/doc-non-first-section.err b/tests/qapi-schema/doc-non-first-section.err new file mode 100644 index 00000000000..eeced2bca71 --- /dev/null +++ b/tests/qapi-schema/doc-non-first-section.err @@ -0,0 +1 @@ +doc-non-first-section.json:5:1: '=' heading must come first in a comment block diff --git a/tests/qapi-schema/doc-non-first-section.json b/tests/qapi-schema/doc-non-first-section.json new file mode 100644 index 00000000000..1590876061d --- /dev/null +++ b/tests/qapi-schema/doc-non-first-section.json @@ -0,0 +1,6 @@ +# = section must be first line + +## +# +# = Not first +## diff --git a/tests/qapi-schema/doc-non-first-section.out b/tests/qapi-schema/doc-non-first-section.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/qapi-schema/meson.build b/tests/qapi-schema/meson.build index af085f745d6..0f479d93170 100644 --- a/tests/qapi-schema/meson.build +++ b/tests/qapi-schema/meson.build @@ -66,10 +66,12 @@ schemas = [ 'doc-bad-union-member.json', 'doc-before-include.json', 'doc-before-pragma.json', + 'doc-duplicate-features.json', 'doc-duplicated-arg.json', 'doc-duplicated-return.json', 'doc-duplicated-since.json', 'doc-empty-arg.json', + 'doc-empty-features.json', 'doc-empty-section.json', 'doc-empty-symbol.json', 'doc-good.json', @@ -77,6 +79,7 @@ schemas = [ 'doc-invalid-end.json', 'doc-invalid-end2.json', 'doc-invalid-return.json', + 'doc-invalid-return2.json', 'doc-invalid-section.json', 'doc-invalid-start.json', 'doc-missing-colon.json', @@ -273,15 +276,17 @@ if build_docs output: ['doc-good.txt.nocr'], input: qapi_doc_out[0], build_by_default: true, - command: [remove_cr, '@INPUT@'], - capture: true) + command: [remove_cr], + capture: true, + feed: true) qapi_doc_ref_nocr = custom_target('QAPI rST doc reference newline-sanitized', output: ['doc-good.ref.nocr'], input: files('doc-good.txt'), build_by_default: true, - command: [remove_cr, '@INPUT@'], - capture: true) + command: [remove_cr], + capture: true, + feed: true) test('QAPI rST doc', diff, args: ['-u', qapi_doc_ref_nocr[0], qapi_doc_out_nocr[0]], suite: ['qapi-schema', 'qapi-doc']) diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py index 14f7b62a441..40095431aeb 100755 --- a/tests/qapi-schema/test-qapi.py +++ b/tests/qapi-schema/test-qapi.py @@ -130,7 +130,7 @@ def test_frontend(fname): for feat, section in doc.features.items(): print(' feature=%s\n%s' % (feat, section.text)) for section in doc.sections: - print(' section=%s\n%s' % (section.name, section.text)) + print(' section=%s\n%s' % (section.tag, section.text)) def open_test_result(dir_name, file_name, update): diff --git a/tests/qemu-iotests/033 b/tests/qemu-iotests/033 index da9133c44bc..4bc7a071bdf 100755 --- a/tests/qemu-iotests/033 +++ b/tests/qemu-iotests/033 @@ -123,9 +123,9 @@ do_test 512 "write -P 1 0 0x200" "$TEST_IMG" | _filter_qemu_io # next L2 table do_test 512 "write -P 1 $L2_COVERAGE 0x200" "$TEST_IMG" | _filter_qemu_io -# only interested in qcow2 here; also other formats might respond with -# "not supported" error message -if [ $IMGFMT = "qcow2" ]; then +# only interested in qcow2 with file protocol here; also other formats +# might respond with "not supported" error message +if [ $IMGFMT = "qcow2" ] && [ $IMGPROTO = "file" ]; then do_test 512 "truncate $L2_COVERAGE" "$TEST_IMG" | _filter_qemu_io fi diff --git a/tests/qemu-iotests/066 b/tests/qemu-iotests/066 index cf63144cb94..336d8565ddf 100755 --- a/tests/qemu-iotests/066 +++ b/tests/qemu-iotests/066 @@ -39,7 +39,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 # This tests qcow2-specific low-level functionality _supported_fmt qcow2 -_supported_proto generic +_supported_proto file # We need zero clusters and snapshots # (TODO: Consider splitting the snapshot part into a separate test # file, so this one runs with refcount_bits=1 and data_file) diff --git a/tests/qemu-iotests/114 b/tests/qemu-iotests/114 index de6fd327eea..dccc71008b5 100755 --- a/tests/qemu-iotests/114 +++ b/tests/qemu-iotests/114 @@ -38,7 +38,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 . ./common.filter _supported_fmt qcow2 -_supported_proto generic +_supported_proto file # At least OpenBSD doesn't seem to have truncate _supported_os Linux # qcow2.py does not work too well with external data files diff --git a/tests/qemu-iotests/130 b/tests/qemu-iotests/130 index 7257f096774..7af85d20a89 100755 --- a/tests/qemu-iotests/130 +++ b/tests/qemu-iotests/130 @@ -42,7 +42,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 . ./common.qemu _supported_fmt qcow2 -_supported_proto generic +_supported_proto file _supported_os Linux # We are going to use lazy-refcounts _unsupported_imgopts 'compat=0.10' diff --git a/tests/qemu-iotests/134 b/tests/qemu-iotests/134 index ded153c0b9f..b2c3c03f08b 100755 --- a/tests/qemu-iotests/134 +++ b/tests/qemu-iotests/134 @@ -38,7 +38,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 . ./common.filter _supported_fmt qcow qcow2 -_supported_proto generic +_supported_proto file size=128M diff --git a/tests/qemu-iotests/156 b/tests/qemu-iotests/156 index a9540bd80d2..97c2d86ce57 100755 --- a/tests/qemu-iotests/156 +++ b/tests/qemu-iotests/156 @@ -50,7 +50,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 . ./common.qemu _supported_fmt qcow2 qed -_supported_proto generic +_supported_proto file # Copying files around with cp does not work with external data files _unsupported_imgopts data_file diff --git a/tests/qemu-iotests/157 b/tests/qemu-iotests/157 index 0dc9ba68d20..aa2ebbfb4b4 100755 --- a/tests/qemu-iotests/157 +++ b/tests/qemu-iotests/157 @@ -40,6 +40,8 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 _supported_fmt generic _supported_proto file +_require_devices virtio-blk + do_run_qemu() { ( diff --git a/tests/qemu-iotests/158 b/tests/qemu-iotests/158 index a95878e4cee..3a9ad7eed03 100755 --- a/tests/qemu-iotests/158 +++ b/tests/qemu-iotests/158 @@ -38,7 +38,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 . ./common.filter _supported_fmt qcow qcow2 -_supported_proto generic +_supported_proto file size=128M diff --git a/tests/qemu-iotests/176.out b/tests/qemu-iotests/176.out index 45e9153ef39..9c73ef2eea7 100644 --- a/tests/qemu-iotests/176.out +++ b/tests/qemu-iotests/176.out @@ -37,8 +37,8 @@ Offset Length File 0x7ffe0000 0x20000 TEST_DIR/t.IMGFMT.itmd 0x83400000 0x200 TEST_DIR/t.IMGFMT.itmd Snapshot list: -ID TAG -1 snap +ID TAG +1 snap === Test pass snapshot.1 === @@ -78,8 +78,8 @@ Offset Length File 0x7fff0000 0x10000 TEST_DIR/t.IMGFMT 0x83400000 0x200 TEST_DIR/t.IMGFMT Snapshot list: -ID TAG -1 snap +ID TAG +1 snap === Test pass snapshot.2 === @@ -119,8 +119,8 @@ Offset Length File 0x7fff0000 0x10000 TEST_DIR/t.IMGFMT 0x83400000 0x200 TEST_DIR/t.IMGFMT Snapshot list: -ID TAG -1 snap +ID TAG +1 snap === Test pass snapshot.3 === @@ -157,8 +157,8 @@ Offset Length File 0x7fff0000 0x10000 TEST_DIR/t.IMGFMT 0x83400000 0x200 TEST_DIR/t.IMGFMT Snapshot list: -ID TAG -1 snap +ID TAG +1 snap === Test pass bitmap.0 === diff --git a/tests/qemu-iotests/183.out b/tests/qemu-iotests/183.out index 51aa41c8885..8aef74a25d6 100644 --- a/tests/qemu-iotests/183.out +++ b/tests/qemu-iotests/183.out @@ -30,13 +30,13 @@ read 65536/65536 bytes at offset 0 'arguments': { 'uri': 'unix:SOCK_DIR/migrate', 'blk': true } } {"return": {}} { 'execute': 'query-status' } -{"return": {"status": "postmigrate", "singlestep": false, "running": false}} +{"return": {"status": "postmigrate", "running": false}} === Do some I/O on the destination === { 'execute': 'query-status' } {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "RESUME"} -{"return": {"status": "running", "singlestep": false, "running": true}} +{"return": {"status": "running", "running": true}} { 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "read -P 0x55 0 64k"' } } diff --git a/tests/qemu-iotests/188 b/tests/qemu-iotests/188 index ce087d18739..2950b1dc31f 100755 --- a/tests/qemu-iotests/188 +++ b/tests/qemu-iotests/188 @@ -38,7 +38,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 . ./common.filter _supported_fmt qcow2 -_supported_proto generic +_supported_proto file _supported_os Linux _require_working_luks diff --git a/tests/qemu-iotests/189 b/tests/qemu-iotests/189 index 801494c6b97..008f73b07d0 100755 --- a/tests/qemu-iotests/189 +++ b/tests/qemu-iotests/189 @@ -38,7 +38,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 . ./common.filter _supported_fmt qcow2 -_supported_proto generic +_supported_proto file _supported_os Linux _require_working_luks diff --git a/tests/qemu-iotests/198 b/tests/qemu-iotests/198 index 1c93dea1f73..6ddeffddd2c 100755 --- a/tests/qemu-iotests/198 +++ b/tests/qemu-iotests/198 @@ -38,7 +38,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 . ./common.filter _supported_fmt qcow2 -_supported_proto generic +_supported_proto file _supported_os Linux _require_working_luks diff --git a/tests/qemu-iotests/198.out b/tests/qemu-iotests/198.out index 805494916f1..62fb73fa3ea 100644 --- a/tests/qemu-iotests/198.out +++ b/tests/qemu-iotests/198.out @@ -39,6 +39,7 @@ Format specific information: compression type: COMPRESSION_TYPE encrypt: ivgen alg: plain64 + detached header: false hash alg: sha256 cipher alg: aes-256 uuid: 00000000-0000-0000-0000-000000000000 @@ -84,6 +85,7 @@ Format specific information: compression type: COMPRESSION_TYPE encrypt: ivgen alg: plain64 + detached header: false hash alg: sha256 cipher alg: aes-256 uuid: 00000000-0000-0000-0000-000000000000 diff --git a/tests/qemu-iotests/202 b/tests/qemu-iotests/202 index b784dcd791a..13304242e5c 100755 --- a/tests/qemu-iotests/202 +++ b/tests/qemu-iotests/202 @@ -21,7 +21,7 @@ # Check that QMP 'transaction' blockdev-snapshot-sync with multiple drives on a # single IOThread completes successfully. This particular command triggered a # hang due to recursive AioContext locking and BDRV_POLL_WHILE(). Protect -# against regressions. +# against regressions even though the AioContext lock no longer exists. import iotests diff --git a/tests/qemu-iotests/203 b/tests/qemu-iotests/203 index ab80fd0e44a..1ba878522b0 100755 --- a/tests/qemu-iotests/203 +++ b/tests/qemu-iotests/203 @@ -21,7 +21,8 @@ # Check that QMP 'migrate' with multiple drives on a single IOThread completes # successfully. This particular command triggered a hang in the source QEMU # process due to recursive AioContext locking in bdrv_invalidate_all() and -# BDRV_POLL_WHILE(). +# BDRV_POLL_WHILE(). Protect against regressions even though the AioContext +# lock no longer exists. import iotests diff --git a/tests/qemu-iotests/206.out b/tests/qemu-iotests/206.out index 7e956947772..979f00f9bf2 100644 --- a/tests/qemu-iotests/206.out +++ b/tests/qemu-iotests/206.out @@ -114,6 +114,7 @@ Format specific information: refcount bits: 16 encrypt: ivgen alg: plain64 + detached header: false hash alg: sha1 cipher alg: aes-128 uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX diff --git a/tests/qemu-iotests/210.out b/tests/qemu-iotests/210.out index 96d9f749dda..94b29b2120a 100644 --- a/tests/qemu-iotests/210.out +++ b/tests/qemu-iotests/210.out @@ -18,6 +18,7 @@ virtual size: 128 MiB (134217728 bytes) encrypted: yes Format specific information: ivgen alg: plain64 + detached header: false hash alg: sha256 cipher alg: aes-256 uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX @@ -70,6 +71,7 @@ virtual size: 64 MiB (67108864 bytes) encrypted: yes Format specific information: ivgen alg: plain64 + detached header: false hash alg: sha1 cipher alg: aes-128 uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX @@ -125,6 +127,7 @@ virtual size: 0 B (0 bytes) encrypted: yes Format specific information: ivgen alg: plain64 + detached header: false hash alg: sha256 cipher alg: aes-256 uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX @@ -195,6 +198,7 @@ virtual size: 0 B (0 bytes) encrypted: yes Format specific information: ivgen alg: plain64 + detached header: false hash alg: sha256 cipher alg: aes-256 uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX diff --git a/tests/qemu-iotests/227 b/tests/qemu-iotests/227 index 7e45a47ac61..eddaad679e2 100755 --- a/tests/qemu-iotests/227 +++ b/tests/qemu-iotests/227 @@ -40,6 +40,8 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 _supported_fmt generic _supported_proto file +_require_devices virtio-blk + do_run_qemu() { echo Testing: "$@" diff --git a/tests/qemu-iotests/234.out b/tests/qemu-iotests/234.out index 692976d1c6a..ac8b64350c3 100644 --- a/tests/qemu-iotests/234.out +++ b/tests/qemu-iotests/234.out @@ -15,8 +15,8 @@ Starting migration to B... {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} completed completed -{"return": {"running": false, "singlestep": false, "status": "postmigrate"}} -{"return": {"running": true, "singlestep": false, "status": "running"}} +{"return": {"running": false, "status": "postmigrate"}} +{"return": {"running": true, "status": "running"}} Add a second parent to drive0-file... {"return": {}} Restart A with -incoming and second parent... @@ -32,5 +32,5 @@ Starting migration back to A... {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} completed completed -{"return": {"running": true, "singlestep": false, "status": "running"}} -{"return": {"running": false, "singlestep": false, "status": "postmigrate"}} +{"return": {"running": true, "status": "running"}} +{"return": {"running": false, "status": "postmigrate"}} diff --git a/tests/qemu-iotests/261 b/tests/qemu-iotests/261 index b73da565da4..22b969d3108 100755 --- a/tests/qemu-iotests/261 +++ b/tests/qemu-iotests/261 @@ -393,7 +393,7 @@ _check_test_img -r all echo echo "$((sn_count - 1)) snapshots should remain:" -echo " qemu-img info reports $(_img_info | grep -c '^ \{32\}') snapshots" +echo " qemu-img info reports $(_img_info | grep -c '^ \{30\}') snapshots" echo " Image header reports $(peek_file_be "$TEST_IMG" 60 4) snapshots" echo @@ -520,7 +520,7 @@ _check_test_img -r all echo echo '65536 snapshots should remain:' -echo " qemu-img info reports $(_img_info | grep -c '^ \{32\}') snapshots" +echo " qemu-img info reports $(_img_info | grep -c '^ \{30\}') snapshots" echo " Image header reports $(peek_file_be "$TEST_IMG" 60 4) snapshots" # success, all done diff --git a/tests/qemu-iotests/262.out b/tests/qemu-iotests/262.out index 8e04c496c40..b8a2d3598d8 100644 --- a/tests/qemu-iotests/262.out +++ b/tests/qemu-iotests/262.out @@ -13,5 +13,5 @@ Starting migration to B... {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} completed completed -{"return": {"running": false, "singlestep": false, "status": "postmigrate"}} -{"return": {"running": true, "singlestep": false, "status": "running"}} +{"return": {"running": false, "status": "postmigrate"}} +{"return": {"running": true, "status": "running"}} diff --git a/tests/qemu-iotests/263 b/tests/qemu-iotests/263 index ec09b41405a..44fdada0d6f 100755 --- a/tests/qemu-iotests/263 +++ b/tests/qemu-iotests/263 @@ -34,6 +34,8 @@ _cleanup() } trap "_cleanup; exit \$status" 0 1 2 3 15 +IMGOPTSSYNTAX=true + # get standard environment, filters and checks . ./common.rc . ./common.filter @@ -73,7 +75,7 @@ echo "testing LUKS qcow2 encryption" echo _make_test_img --object $SECRET -o "encrypt.format=luks,encrypt.key-secret=sec0,encrypt.iter-time=10,cluster_size=64K" $size -_run_test "driver=$IMGFMT,encrypt.key-secret=sec0,file.filename=$TEST_IMG" +_run_test "$TEST_IMG,encrypt.key-secret=sec0" _cleanup_test_img echo @@ -82,7 +84,7 @@ echo _make_test_img --object $SECRET -o "encrypt.format=aes,encrypt.key-secret=sec0,cluster_size=64K" $size -_run_test "driver=$IMGFMT,encrypt.key-secret=sec0,file.filename=$TEST_IMG" +_run_test "$TEST_IMG,encrypt.key-secret=sec0" _cleanup_test_img diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264 index c532ccd8094..c6ba2754e27 100755 --- a/tests/qemu-iotests/264 +++ b/tests/qemu-iotests/264 @@ -25,7 +25,8 @@ import os import iotests from iotests import qemu_img_create, file_path, qemu_nbd_popen -disk_a, disk_b, nbd_sock = file_path('disk_a', 'disk_b', 'nbd-sock') +disk_a, disk_b = file_path('disk_a', 'disk_b') +nbd_sock = file_path('nbd-sock', base_dir=iotests.sock_dir) nbd_uri = 'nbd+unix:///?socket=' + nbd_sock wait_limit = 3.0 wait_step = 0.2 diff --git a/tests/qemu-iotests/267.out b/tests/qemu-iotests/267.out index 7176e376e1f..f6f5d8715ad 100644 --- a/tests/qemu-iotests/267.out +++ b/tests/qemu-iotests/267.out @@ -33,8 +33,8 @@ QEMU X.Y.Z monitor - type 'help' for more information (qemu) savevm snap0 (qemu) info snapshots List of snapshots present on all disks: -ID TAG VM SIZE DATE VM CLOCK ICOUNT --- snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +-- snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- (qemu) loadvm snap0 (qemu) quit @@ -44,8 +44,8 @@ QEMU X.Y.Z monitor - type 'help' for more information (qemu) savevm snap0 (qemu) info snapshots List of snapshots present on all disks: -ID TAG VM SIZE DATE VM CLOCK ICOUNT --- snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +-- snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- (qemu) loadvm snap0 (qemu) quit @@ -69,8 +69,8 @@ QEMU X.Y.Z monitor - type 'help' for more information (qemu) savevm snap0 (qemu) info snapshots List of snapshots present on all disks: -ID TAG VM SIZE DATE VM CLOCK ICOUNT --- snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +-- snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- (qemu) loadvm snap0 (qemu) quit @@ -94,8 +94,8 @@ QEMU X.Y.Z monitor - type 'help' for more information (qemu) savevm snap0 (qemu) info snapshots List of snapshots present on all disks: -ID TAG VM SIZE DATE VM CLOCK ICOUNT --- snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +-- snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- (qemu) loadvm snap0 (qemu) quit @@ -105,8 +105,8 @@ QEMU X.Y.Z monitor - type 'help' for more information (qemu) savevm snap0 (qemu) info snapshots List of snapshots present on all disks: -ID TAG VM SIZE DATE VM CLOCK ICOUNT --- snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +-- snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- (qemu) loadvm snap0 (qemu) quit @@ -119,8 +119,8 @@ QEMU X.Y.Z monitor - type 'help' for more information (qemu) savevm snap0 (qemu) info snapshots List of snapshots present on all disks: -ID TAG VM SIZE DATE VM CLOCK ICOUNT --- snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +-- snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- (qemu) loadvm snap0 (qemu) quit @@ -134,8 +134,8 @@ QEMU X.Y.Z monitor - type 'help' for more information (qemu) savevm snap0 (qemu) info snapshots List of snapshots present on all disks: -ID TAG VM SIZE DATE VM CLOCK ICOUNT --- snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +-- snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- (qemu) loadvm snap0 (qemu) quit @@ -145,15 +145,15 @@ QEMU X.Y.Z monitor - type 'help' for more information (qemu) savevm snap0 (qemu) info snapshots List of snapshots present on all disks: -ID TAG VM SIZE DATE VM CLOCK ICOUNT --- snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +-- snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- (qemu) loadvm snap0 (qemu) quit Internal snapshots on overlay: Snapshot list: -ID TAG VM SIZE DATE VM CLOCK ICOUNT -1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +1 snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- Internal snapshots on backing file: === -blockdev with NBD server on the backing file === @@ -166,17 +166,17 @@ QEMU X.Y.Z monitor - type 'help' for more information (qemu) savevm snap0 (qemu) info snapshots List of snapshots present on all disks: -ID TAG VM SIZE DATE VM CLOCK ICOUNT --- snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +-- snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- (qemu) loadvm snap0 (qemu) quit Internal snapshots on overlay: Snapshot list: -ID TAG VM SIZE DATE VM CLOCK ICOUNT -1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +1 snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- Internal snapshots on backing file: Snapshot list: -ID TAG VM SIZE DATE VM CLOCK ICOUNT -1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +1 snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- *** done diff --git a/tests/qemu-iotests/277 b/tests/qemu-iotests/277 index 24833e7eb68..4224202ac2c 100755 --- a/tests/qemu-iotests/277 +++ b/tests/qemu-iotests/277 @@ -27,7 +27,8 @@ from iotests import file_path, log iotests.script_initialize() -nbd_sock, conf_file = file_path('nbd-sock', 'nbd-fault-injector.conf') +conf_file = file_path('nbd-fault-injector.conf') +nbd_sock = file_path('nbd-sock', base_dir=iotests.sock_dir) def make_conf_file(event): diff --git a/tests/qemu-iotests/280.out b/tests/qemu-iotests/280.out index c75f437c00e..546dbb4a68a 100644 --- a/tests/qemu-iotests/280.out +++ b/tests/qemu-iotests/280.out @@ -12,7 +12,7 @@ Enabling migration QMP events on VM... VM is now stopped: completed {"execute": "query-status", "arguments": {}} -{"return": {"running": false, "singlestep": false, "status": "postmigrate"}} +{"return": {"running": false, "status": "postmigrate"}} === Create a snapshot of the disk image === {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "file", "filename": "TEST_DIR/PID-top", "size": 0}}} diff --git a/tests/qemu-iotests/284 b/tests/qemu-iotests/284 index 5a82639e7f8..722267486d0 100755 --- a/tests/qemu-iotests/284 +++ b/tests/qemu-iotests/284 @@ -33,6 +33,8 @@ _cleanup() } trap "_cleanup; exit \$status" 0 1 2 3 15 +IMGOPTSSYNTAX=true + # get standard environment, filters and checks . ./common.rc . ./common.filter @@ -47,14 +49,12 @@ size=1M SECRET="secret,id=sec0,data=astrochicken" -IMGSPEC="driver=$IMGFMT,file.filename=$TEST_IMG,encrypt.key-secret=sec0" QEMU_IO_OPTIONS=$QEMU_IO_OPTIONS_NO_FMT _run_test() { - IMGOPTSSYNTAX=true OLD_TEST_IMG="$TEST_IMG" - TEST_IMG="driver=$IMGFMT,file.filename=$TEST_IMG,encrypt.key-secret=sec0" + TEST_IMG="$TEST_IMG,encrypt.key-secret=sec0" QEMU_IMG_EXTRA_ARGS="--image-opts --object $SECRET" echo @@ -78,7 +78,6 @@ _run_test() TEST_IMG="$OLD_TEST_IMG" QEMU_IMG_EXTRA_ARGS= - IMGOPTSSYNTAX= } diff --git a/tests/qemu-iotests/286 b/tests/qemu-iotests/286 index 120a8375b76..38216c2a0e2 100755 --- a/tests/qemu-iotests/286 +++ b/tests/qemu-iotests/286 @@ -69,7 +69,8 @@ $QEMU_IMG snapshot -l "$TEST_IMG" | tail -n 1 | tr -s ' ' \ -e 's/\./(VM state size unit)/' \ -e 's/\./(snapshot date)/' \ -e 's/\./(snapshot time)/' \ - -e 's/\./(VM clock)/' + -e 's/\./(VM clock)/' \ + -e 's/\./(icount)/' # success, all done echo "*** done" diff --git a/tests/qemu-iotests/286.out b/tests/qemu-iotests/286.out index 39ff07e12cd..bb04748e080 100644 --- a/tests/qemu-iotests/286.out +++ b/tests/qemu-iotests/286.out @@ -4,5 +4,5 @@ QEMU X.Y.Z monitor - type 'help' for more information (qemu) savevm abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz (qemu) quit Output structure: -(snapshot ID) (snapshot name) (VM state size value) (VM state size unit) (snapshot date) (snapshot time) (VM clock) +(snapshot ID) (snapshot name) (VM state size value) (VM state size unit) (snapshot date) (snapshot time) (VM clock) (icount) *** done diff --git a/tests/qemu-iotests/meson.build b/tests/qemu-iotests/meson.build index 53847cb98fc..fad340ad595 100644 --- a/tests/qemu-iotests/meson.build +++ b/tests/qemu-iotests/meson.build @@ -1,4 +1,4 @@ -if not have_tools or targetos == 'windows' +if not have_tools or host_os == 'windows' subdir_done() endif diff --git a/tests/qemu-iotests/tests/detect-zeroes-registered-buf b/tests/qemu-iotests/tests/detect-zeroes-registered-buf index edb5f2cee52..5eaf34e5a6b 100755 --- a/tests/qemu-iotests/tests/detect-zeroes-registered-buf +++ b/tests/qemu-iotests/tests/detect-zeroes-registered-buf @@ -36,6 +36,8 @@ _cleanup() } trap "_cleanup; exit \$status" 0 1 2 3 15 +IMGOPTSSYNTAX=true + # get standard environment, filters and checks cd .. . ./common.rc @@ -46,7 +48,7 @@ _supported_proto generic size=128M _make_test_img $size -IMGSPEC="driver=$IMGFMT,file.filename=$TEST_IMG,discard=unmap,detect-zeroes=unmap" +IMGSPEC="$TEST_IMG,discard=unmap,detect-zeroes=unmap" echo echo "== writing zero buffer to image ==" diff --git a/tests/qemu-iotests/tests/iothreads-nbd-export b/tests/qemu-iotests/tests/iothreads-nbd-export new file mode 100755 index 00000000000..037260729c7 --- /dev/null +++ b/tests/qemu-iotests/tests/iothreads-nbd-export @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# Copyright (C) 2024 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Creator/Owner: Kevin Wolf + +import time +import qemu +import iotests + +iotests.script_initialize(supported_fmts=['qcow2'], + supported_platforms=['linux']) + +with iotests.FilePath('disk1.img') as path, \ + iotests.FilePath('nbd.sock', base_dir=iotests.sock_dir) as nbd_sock, \ + qemu.machine.QEMUMachine(iotests.qemu_prog) as vm: + + img_size = '10M' + + iotests.log('Preparing disk...') + iotests.qemu_img_create('-f', iotests.imgfmt, path, img_size) + vm.add_args('-blockdev', f'file,node-name=disk-file,filename={path}') + vm.add_args('-blockdev', 'qcow2,node-name=disk,file=disk-file') + vm.add_args('-object', 'iothread,id=iothread0') + vm.add_args('-device', + 'virtio-blk,drive=disk,iothread=iothread0,share-rw=on') + + iotests.log('Launching VM...') + vm.add_args('-accel', 'kvm', '-accel', 'tcg') + #vm.add_args('-accel', 'qtest') + vm.launch() + + iotests.log('Exporting to NBD...') + iotests.log(vm.qmp('nbd-server-start', + addr={'type': 'unix', 'data': {'path': nbd_sock}})) + iotests.log(vm.qmp('block-export-add', type='nbd', id='exp0', + node_name='disk', writable=True)) + + iotests.log('Connecting qemu-img...') + qemu_io = iotests.QemuIoInteractive('-f', 'raw', + f'nbd+unix:///disk?socket={nbd_sock}') + + iotests.log('Moving the NBD export to a different iothread...') + for i in range(0, 10): + iotests.log(vm.qmp('system_reset')) + time.sleep(0.1) + + iotests.log('Checking that it is still alive...') + iotests.log(vm.qmp('query-status')) + + qemu_io.close() + vm.shutdown() diff --git a/tests/qemu-iotests/tests/iothreads-nbd-export.out b/tests/qemu-iotests/tests/iothreads-nbd-export.out new file mode 100644 index 00000000000..bc514e35e59 --- /dev/null +++ b/tests/qemu-iotests/tests/iothreads-nbd-export.out @@ -0,0 +1,19 @@ +Preparing disk... +Launching VM... +Exporting to NBD... +{"return": {}} +{"return": {}} +Connecting qemu-img... +Moving the NBD export to a different iothread... +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} +{"return": {}} +Checking that it is still alive... +{"return": {"running": true, "status": "running"}} diff --git a/tests/qemu-iotests/tests/iothreads-stream b/tests/qemu-iotests/tests/iothreads-stream index 503f221f16d..231195b5e87 100755 --- a/tests/qemu-iotests/tests/iothreads-stream +++ b/tests/qemu-iotests/tests/iothreads-stream @@ -18,6 +18,7 @@ # # Creator/Owner: Kevin Wolf +import asyncio import iotests iotests.script_initialize(supported_fmts=['qcow2'], @@ -69,6 +70,6 @@ with iotests.FilePath('disk1.img') as base1_path, \ # The test is done once both jobs are gone if finished == 2: break - except TimeoutError: + except asyncio.TimeoutError: pass vm.cmd('query-jobs') diff --git a/tests/qemu-iotests/tests/luks-detached-header b/tests/qemu-iotests/tests/luks-detached-header new file mode 100755 index 00000000000..3455fd8de1e --- /dev/null +++ b/tests/qemu-iotests/tests/luks-detached-header @@ -0,0 +1,316 @@ +#!/usr/bin/env python3 +# group: rw auto +# +# Test LUKS volume with detached header +# +# Copyright (C) 2024 SmartX Inc. +# +# Authors: +# Hyman Huang +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import json +import iotests +from iotests import ( + imgfmt, + qemu_img_create, + qemu_img_info, + QMPTestCase, +) + + +image_size = 128 * 1024 * 1024 + +luks_img = os.path.join(iotests.test_dir, "luks.img") +detached_header_img1 = os.path.join(iotests.test_dir, "detached_header.img1") +detached_header_img2 = os.path.join(iotests.test_dir, "detached_header.img2") +detached_payload_raw_img = os.path.join( + iotests.test_dir, "detached_payload_raw.img" +) +detached_payload_qcow2_img = os.path.join( + iotests.test_dir, "detached_payload_qcow2.img" +) +detached_header_raw_img = "json:" + json.dumps( + { + "driver": "luks", + "file": {"filename": detached_payload_raw_img}, + "header": { + "filename": detached_header_img1, + }, + } +) +detached_header_qcow2_img = "json:" + json.dumps( + { + "driver": "luks", + "file": {"filename": detached_payload_qcow2_img}, + "header": {"filename": detached_header_img2}, + } +) + +secret_obj = "secret,id=sec0,data=foo" +luks_opts = "key-secret=sec0" + + +class TestDetachedLUKSHeader(QMPTestCase): + def setUp(self) -> None: + self.vm = iotests.VM() + self.vm.add_object(secret_obj) + self.vm.launch() + + # 1. Create the normal LUKS disk with 128M size + self.vm.blockdev_create( + {"driver": "file", "filename": luks_img, "size": 0} + ) + self.vm.qmp_log( + "blockdev-add", + driver="file", + filename=luks_img, + node_name="luks-1-storage", + ) + result = self.vm.blockdev_create( + { + "driver": imgfmt, + "file": "luks-1-storage", + "key-secret": "sec0", + "size": image_size, + "iter-time": 10, + } + ) + # None is expected + self.assertEqual(result, None) + + # 2. Create the LUKS disk with detached header (raw) + + # Create detached LUKS header + self.vm.blockdev_create( + {"driver": "file", "filename": detached_header_img1, "size": 0} + ) + self.vm.qmp_log( + "blockdev-add", + driver="file", + filename=detached_header_img1, + node_name="luks-2-header-storage", + ) + + # Create detached LUKS raw payload + self.vm.blockdev_create( + {"driver": "file", "filename": detached_payload_raw_img, "size": 0} + ) + self.vm.qmp_log( + "blockdev-add", + driver="file", + filename=detached_payload_raw_img, + node_name="luks-2-payload-storage", + ) + + # Format LUKS disk with detached header + result = self.vm.blockdev_create( + { + "driver": imgfmt, + "header": "luks-2-header-storage", + "file": "luks-2-payload-storage", + "key-secret": "sec0", + "preallocation": "full", + "size": image_size, + "iter-time": 10, + } + ) + self.assertEqual(result, None) + + self.vm.shutdown() + + # 3. Create the LUKS disk with detached header (qcow2) + + # Create detached LUKS header using qemu-img + res = qemu_img_create( + "-f", + "luks", + "--object", + secret_obj, + "-o", + luks_opts, + "-o", + "detached-header=true", + detached_header_img2, + ) + assert res.returncode == 0 + + # Create detached LUKS qcow2 payload + res = qemu_img_create( + "-f", "qcow2", detached_payload_qcow2_img, str(image_size) + ) + assert res.returncode == 0 + + def tearDown(self) -> None: + os.remove(luks_img) + os.remove(detached_header_img1) + os.remove(detached_header_img2) + os.remove(detached_payload_raw_img) + os.remove(detached_payload_qcow2_img) + + # Check if there was any qemu-io run that failed + if "Pattern verification failed" in self.vm.get_log(): + print("ERROR: Pattern verification failed:") + print(self.vm.get_log()) + self.fail("qemu-io pattern verification failed") + + def test_img_creation(self) -> None: + # Check if the images created above are expected + + data = qemu_img_info(luks_img)["format-specific"] + self.assertEqual(data["type"], imgfmt) + self.assertEqual(data["data"]["detached-header"], False) + + data = qemu_img_info(detached_header_raw_img)["format-specific"] + self.assertEqual(data["type"], imgfmt) + self.assertEqual(data["data"]["detached-header"], True) + + data = qemu_img_info(detached_header_qcow2_img)["format-specific"] + self.assertEqual(data["type"], imgfmt) + self.assertEqual(data["data"]["detached-header"], True) + + # Check if preallocation works + size = qemu_img_info(detached_payload_raw_img)["actual-size"] + self.assertGreaterEqual(size, image_size) + + def test_detached_luks_header(self) -> None: + self.vm.launch() + + # 1. Add the disk created above + + # Add normal LUKS disk + self.vm.qmp_log( + "blockdev-add", + driver="file", + filename=luks_img, + node_name="luks-1-storage", + ) + result = self.vm.qmp_log( + "blockdev-add", + driver="luks", + file="luks-1-storage", + key_secret="sec0", + node_name="luks-1-format", + ) + + # Expected result{ "return": {} } + self.assert_qmp(result, "return", {}) + + # Add detached LUKS header with raw payload + self.vm.qmp_log( + "blockdev-add", + driver="file", + filename=detached_header_img1, + node_name="luks-header1-storage", + ) + + self.vm.qmp_log( + "blockdev-add", + driver="file", + filename=detached_payload_raw_img, + node_name="luks-2-payload-raw-storage", + ) + + result = self.vm.qmp_log( + "blockdev-add", + driver=imgfmt, + header="luks-header1-storage", + file="luks-2-payload-raw-storage", + key_secret="sec0", + node_name="luks-2-payload-raw-format", + ) + self.assert_qmp(result, "return", {}) + + # Add detached LUKS header with qcow2 payload + self.vm.qmp_log( + "blockdev-add", + driver="file", + filename=detached_header_img2, + node_name="luks-header2-storage", + ) + + self.vm.qmp_log( + "blockdev-add", + driver="file", + filename=detached_payload_qcow2_img, + node_name="luks-3-payload-qcow2-storage", + ) + + result = self.vm.qmp_log( + "blockdev-add", + driver=imgfmt, + header="luks-header2-storage", + file="luks-3-payload-qcow2-storage", + key_secret="sec0", + node_name="luks-3-payload-qcow2-format", + ) + self.assert_qmp(result, "return", {}) + + # 2. Do I/O test + + # Do some I/O to the image to see whether it still works + # (Pattern verification will be checked by tearDown()) + + # Normal LUKS disk + result = self.vm.qmp_log( + "human-monitor-command", + command_line='qemu-io luks-1-format "write -P 40 0 64k"', + ) + self.assert_qmp(result, "return", "") + + result = self.vm.qmp_log( + "human-monitor-command", + command_line='qemu-io luks-1-format "read -P 40 0 64k"', + ) + self.assert_qmp(result, "return", "") + + # Detached LUKS header with raw payload + cmd = 'qemu-io luks-2-payload-raw-format "write -P 41 0 64k"' + result = self.vm.qmp( + "human-monitor-command", + command_line=cmd + ) + self.assert_qmp(result, "return", "") + + cmd = 'qemu-io luks-2-payload-raw-format "read -P 41 0 64k"' + result = self.vm.qmp( + "human-monitor-command", + command_line=cmd + ) + self.assert_qmp(result, "return", "") + + # Detached LUKS header with qcow2 payload + cmd = 'qemu-io luks-3-payload-qcow2-format "write -P 42 0 64k"' + result = self.vm.qmp( + "human-monitor-command", + command_line=cmd + ) + self.assert_qmp(result, "return", "") + + cmd = 'qemu-io luks-3-payload-qcow2-format "read -P 42 0 64k"' + result = self.vm.qmp( + "human-monitor-command", + command_line=cmd + ) + self.assert_qmp(result, "return", "") + + self.vm.shutdown() + + +if __name__ == "__main__": + # Test image creation and I/O + iotests.main(supported_fmts=["luks"], supported_protocols=["file"]) diff --git a/tests/qemu-iotests/tests/luks-detached-header.out b/tests/qemu-iotests/tests/luks-detached-header.out new file mode 100644 index 00000000000..fbc63e62f88 --- /dev/null +++ b/tests/qemu-iotests/tests/luks-detached-header.out @@ -0,0 +1,5 @@ +.. +---------------------------------------------------------------------- +Ran 2 tests + +OK diff --git a/tests/qemu-iotests/tests/qcow2-internal-snapshots b/tests/qemu-iotests/tests/qcow2-internal-snapshots index 36523aba06e..9f83aa89039 100755 --- a/tests/qemu-iotests/tests/qcow2-internal-snapshots +++ b/tests/qemu-iotests/tests/qcow2-internal-snapshots @@ -39,7 +39,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 # This tests qcow2-specific low-level functionality _supported_fmt qcow2 -_supported_proto generic +_supported_proto file # Internal snapshots are (currently) impossible with refcount_bits=1, # and generally impossible with external data files _unsupported_imgopts 'compat=0.10' 'refcount_bits=1[^0-9]' data_file diff --git a/tests/qemu-iotests/tests/qcow2-internal-snapshots.out b/tests/qemu-iotests/tests/qcow2-internal-snapshots.out index 438f535e6ac..fedb09224ea 100644 --- a/tests/qemu-iotests/tests/qcow2-internal-snapshots.out +++ b/tests/qemu-iotests/tests/qcow2-internal-snapshots.out @@ -14,8 +14,8 @@ wrote 524288/524288 bytes at offset 0 (qemu) quit Snapshot list: -ID TAG VM SIZE DATE VM CLOCK ICOUNT -1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +1 snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- No errors were found on the image. === Verify that loading the snapshot reverts to the old content === @@ -47,9 +47,9 @@ read 64512/64512 bytes at offset 66560 (qemu) quit Snapshot list: -ID TAG VM SIZE DATE VM CLOCK ICOUNT -1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 -2 snap1 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +1 snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- +2 snap1 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- No errors were found on the image. === qemu-img snapshot can revert to snapshots === @@ -79,8 +79,8 @@ read 64512/64512 bytes at offset 66560 (qemu) quit Snapshot list: -ID TAG VM SIZE DATE VM CLOCK ICOUNT -1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +ID TAG VM_SIZE DATE VM_CLOCK ICOUNT +1 snap0 SIZE yyyy-mm-dd hh:mm:ss 0000:00:00.000 -- No errors were found on the image. === Error cases === diff --git a/tests/qemu-iotests/tests/qsd-jobs b/tests/qemu-iotests/tests/qsd-jobs index 510bf0a9dcb..9b843af6312 100755 --- a/tests/qemu-iotests/tests/qsd-jobs +++ b/tests/qemu-iotests/tests/qsd-jobs @@ -40,7 +40,7 @@ cd .. . ./common.filter _supported_fmt qcow2 -_supported_proto generic +_supported_proto file size=128M diff --git a/tests/qemu-iotests/tests/stream-unaligned-prefetch b/tests/qemu-iotests/tests/stream-unaligned-prefetch new file mode 100755 index 00000000000..546db1d3698 --- /dev/null +++ b/tests/qemu-iotests/tests/stream-unaligned-prefetch @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# Test what happens when a stream job does an unaligned prefetch read +# which requires padding while having a NULL qiov. +# +# Copyright (C) Proxmox Server Solutions GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import iotests +from iotests import imgfmt, qemu_img_create, qemu_io, QMPTestCase + +image_size = 1 * 1024 * 1024 +cluster_size = 64 * 1024 +base = os.path.join(iotests.test_dir, 'base.img') +top = os.path.join(iotests.test_dir, 'top.img') + +class TestStreamUnalignedPrefetch(QMPTestCase): + def setUp(self) -> None: + """ + Create two images: + - base image {base} with {cluster_size // 2} bytes allocated + - top image {top} without any data allocated and coarser + cluster size + + Attach a compress filter for the top image, because that + requires that the request alignment is the top image's cluster + size. + """ + qemu_img_create('-f', imgfmt, + '-o', 'cluster_size={}'.format(cluster_size // 2), + base, str(image_size)) + qemu_io('-c', f'write 0 {cluster_size // 2}', base) + qemu_img_create('-f', imgfmt, + '-o', 'cluster_size={}'.format(cluster_size), + top, str(image_size)) + + self.vm = iotests.VM() + self.vm.add_blockdev(self.vm.qmp_to_opts({ + 'driver': imgfmt, + 'node-name': 'base', + 'file': { + 'driver': 'file', + 'filename': base + } + })) + self.vm.add_blockdev(self.vm.qmp_to_opts({ + 'driver': 'compress', + 'node-name': 'compress-top', + 'file': { + 'driver': imgfmt, + 'node-name': 'top', + 'file': { + 'driver': 'file', + 'filename': top + }, + 'backing': 'base' + } + })) + self.vm.launch() + + def tearDown(self) -> None: + self.vm.shutdown() + os.remove(top) + os.remove(base) + + def test_stream_unaligned_prefetch(self) -> None: + self.vm.cmd('block-stream', job_id='stream', device='compress-top') + + +if __name__ == '__main__': + iotests.main(supported_fmts=['qcow2'], supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/stream-unaligned-prefetch.out b/tests/qemu-iotests/tests/stream-unaligned-prefetch.out new file mode 100644 index 00000000000..ae1213e6f86 --- /dev/null +++ b/tests/qemu-iotests/tests/stream-unaligned-prefetch.out @@ -0,0 +1,5 @@ +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qtest/arm-cpu-features.c b/tests/qtest/arm-cpu-features.c index a8a4c668adb..9d6e6190d55 100644 --- a/tests/qtest/arm-cpu-features.c +++ b/tests/qtest/arm-cpu-features.c @@ -79,7 +79,7 @@ static const char *resp_get_error(QDict *resp) g_assert(_resp); \ _error = resp_get_error(_resp); \ g_assert(_error); \ - g_assert(g_str_equal(_error, expected_error)); \ + g_assert_cmpstr(_error, ==, expected_error); \ qobject_unref(_resp); \ }) @@ -194,8 +194,8 @@ static void assert_type_full(QTestState *qts) g_assert(resp); error = resp_get_error(resp); g_assert(error); - g_assert(g_str_equal(error, - "The requested expansion type is not supported")); + g_assert_cmpstr(error, ==, + "The requested expansion type is not supported"); qobject_unref(resp); } @@ -212,8 +212,9 @@ static void assert_bad_props(QTestState *qts, const char *cpu_type) g_assert(resp); error = resp_get_error(resp); g_assert(error); - g_assert(g_str_equal(error, - "Invalid parameter type for 'props', expected: dict")); + g_assert_cmpstr(error, ==, + "Invalid parameter type for 'model.props'," + " expected: object"); qobject_unref(resp); } @@ -446,7 +447,7 @@ static void test_query_cpu_model_expansion(const void *data) assert_bad_props(qts, "max"); assert_error(qts, "foo", "The CPU type 'foo' is not a recognized " "ARM CPU type", NULL); - assert_error(qts, "max", "Parameter 'not-a-prop' is unexpected", + assert_error(qts, "max", "Parameter 'model.props.not-a-prop' is unexpected", "{ 'not-a-prop': false }"); assert_error(qts, "host", "The CPU type 'host' requires KVM", NULL); diff --git a/tests/qtest/aspeed_fsi-test.c b/tests/qtest/aspeed_fsi-test.c new file mode 100644 index 00000000000..b3020dd8211 --- /dev/null +++ b/tests/qtest/aspeed_fsi-test.c @@ -0,0 +1,205 @@ +/* + * QTest testcases for IBM's Flexible Service Interface (FSI) + * + * Copyright (c) 2023 IBM Corporation + * + * Authors: + * Ninad Palsule + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include + +#include "qemu/module.h" +#include "libqtest-single.h" + +/* Registers from ast2600 specifications */ +#define ASPEED_FSI_ENGINER_TRIGGER 0x04 +#define ASPEED_FSI_OPB0_BUS_SELECT 0x10 +#define ASPEED_FSI_OPB1_BUS_SELECT 0x28 +#define ASPEED_FSI_OPB0_RW_DIRECTION 0x14 +#define ASPEED_FSI_OPB1_RW_DIRECTION 0x2c +#define ASPEED_FSI_OPB0_XFER_SIZE 0x18 +#define ASPEED_FSI_OPB1_XFER_SIZE 0x30 +#define ASPEED_FSI_OPB0_BUS_ADDR 0x1c +#define ASPEED_FSI_OPB1_BUS_ADDR 0x34 +#define ASPEED_FSI_INTRRUPT_CLEAR 0x40 +#define ASPEED_FSI_INTRRUPT_STATUS 0x48 +#define ASPEED_FSI_OPB0_BUS_STATUS 0x80 +#define ASPEED_FSI_OPB1_BUS_STATUS 0x8c +#define ASPEED_FSI_OPB0_READ_DATA 0x84 +#define ASPEED_FSI_OPB1_READ_DATA 0x90 + +/* + * FSI Base addresses from the ast2600 specifications. + */ +#define AST2600_OPB_FSI0_BASE_ADDR 0x1e79b000 +#define AST2600_OPB_FSI1_BASE_ADDR 0x1e79b100 + +static uint32_t aspeed_fsi_base_addr; + +static uint32_t aspeed_fsi_readl(QTestState *s, uint32_t reg) +{ + return qtest_readl(s, aspeed_fsi_base_addr + reg); +} + +static void aspeed_fsi_writel(QTestState *s, uint32_t reg, uint32_t val) +{ + qtest_writel(s, aspeed_fsi_base_addr + reg, val); +} + +/* Setup base address and select register */ +static void test_fsi_setup(QTestState *s, uint32_t base_addr) +{ + uint32_t curval; + + aspeed_fsi_base_addr = base_addr; + + /* Set the base select register */ + if (base_addr == AST2600_OPB_FSI0_BASE_ADDR) { + /* Unselect FSI1 */ + aspeed_fsi_writel(s, ASPEED_FSI_OPB1_BUS_SELECT, 0x0); + curval = aspeed_fsi_readl(s, ASPEED_FSI_OPB1_BUS_SELECT); + g_assert_cmpuint(curval, ==, 0x0); + + /* Select FSI0 */ + aspeed_fsi_writel(s, ASPEED_FSI_OPB0_BUS_SELECT, 0x1); + curval = aspeed_fsi_readl(s, ASPEED_FSI_OPB0_BUS_SELECT); + g_assert_cmpuint(curval, ==, 0x1); + } else if (base_addr == AST2600_OPB_FSI1_BASE_ADDR) { + /* Unselect FSI0 */ + aspeed_fsi_writel(s, ASPEED_FSI_OPB0_BUS_SELECT, 0x0); + curval = aspeed_fsi_readl(s, ASPEED_FSI_OPB0_BUS_SELECT); + g_assert_cmpuint(curval, ==, 0x0); + + /* Select FSI1 */ + aspeed_fsi_writel(s, ASPEED_FSI_OPB1_BUS_SELECT, 0x1); + curval = aspeed_fsi_readl(s, ASPEED_FSI_OPB1_BUS_SELECT); + g_assert_cmpuint(curval, ==, 0x1); + } else { + g_assert_not_reached(); + } +} + +static void test_fsi_reg_change(QTestState *s, uint32_t reg, uint32_t newval) +{ + uint32_t base; + uint32_t curval; + + base = aspeed_fsi_readl(s, reg); + aspeed_fsi_writel(s, reg, newval); + curval = aspeed_fsi_readl(s, reg); + g_assert_cmpuint(curval, ==, newval); + aspeed_fsi_writel(s, reg, base); + curval = aspeed_fsi_readl(s, reg); + g_assert_cmpuint(curval, ==, base); +} + +static void test_fsi0_master_regs(const void *data) +{ + QTestState *s = (QTestState *)data; + + test_fsi_setup(s, AST2600_OPB_FSI0_BASE_ADDR); + + test_fsi_reg_change(s, ASPEED_FSI_OPB0_RW_DIRECTION, 0xF3F4F514); + test_fsi_reg_change(s, ASPEED_FSI_OPB0_XFER_SIZE, 0xF3F4F518); + test_fsi_reg_change(s, ASPEED_FSI_OPB0_BUS_ADDR, 0xF3F4F51c); + test_fsi_reg_change(s, ASPEED_FSI_INTRRUPT_CLEAR, 0xF3F4F540); + test_fsi_reg_change(s, ASPEED_FSI_INTRRUPT_STATUS, 0xF3F4F548); + test_fsi_reg_change(s, ASPEED_FSI_OPB0_BUS_STATUS, 0xF3F4F580); + test_fsi_reg_change(s, ASPEED_FSI_OPB0_READ_DATA, 0xF3F4F584); +} + +static void test_fsi1_master_regs(const void *data) +{ + QTestState *s = (QTestState *)data; + + test_fsi_setup(s, AST2600_OPB_FSI1_BASE_ADDR); + + test_fsi_reg_change(s, ASPEED_FSI_OPB1_RW_DIRECTION, 0xF3F4F514); + test_fsi_reg_change(s, ASPEED_FSI_OPB1_XFER_SIZE, 0xF3F4F518); + test_fsi_reg_change(s, ASPEED_FSI_OPB1_BUS_ADDR, 0xF3F4F51c); + test_fsi_reg_change(s, ASPEED_FSI_INTRRUPT_CLEAR, 0xF3F4F540); + test_fsi_reg_change(s, ASPEED_FSI_INTRRUPT_STATUS, 0xF3F4F548); + test_fsi_reg_change(s, ASPEED_FSI_OPB1_BUS_STATUS, 0xF3F4F580); + test_fsi_reg_change(s, ASPEED_FSI_OPB1_READ_DATA, 0xF3F4F584); +} + +static void test_fsi0_getcfam_addr0(const void *data) +{ + QTestState *s = (QTestState *)data; + uint32_t curval; + + test_fsi_setup(s, AST2600_OPB_FSI0_BASE_ADDR); + + /* Master access direction read */ + aspeed_fsi_writel(s, ASPEED_FSI_OPB0_RW_DIRECTION, 0x1); + /* word */ + aspeed_fsi_writel(s, ASPEED_FSI_OPB0_XFER_SIZE, 0x3); + /* Address */ + aspeed_fsi_writel(s, ASPEED_FSI_OPB0_BUS_ADDR, 0xa0000000); + aspeed_fsi_writel(s, ASPEED_FSI_INTRRUPT_CLEAR, 0x1); + aspeed_fsi_writel(s, ASPEED_FSI_ENGINER_TRIGGER, 0x1); + + curval = aspeed_fsi_readl(s, ASPEED_FSI_INTRRUPT_STATUS); + g_assert_cmpuint(curval, ==, 0x10000); + curval = aspeed_fsi_readl(s, ASPEED_FSI_OPB0_BUS_STATUS); + g_assert_cmpuint(curval, ==, 0x0); + curval = aspeed_fsi_readl(s, ASPEED_FSI_OPB0_READ_DATA); + g_assert_cmpuint(curval, ==, 0x152d02c0); +} + +static void test_fsi1_getcfam_addr0(const void *data) +{ + QTestState *s = (QTestState *)data; + uint32_t curval; + + test_fsi_setup(s, AST2600_OPB_FSI1_BASE_ADDR); + + /* Master access direction read */ + aspeed_fsi_writel(s, ASPEED_FSI_OPB1_RW_DIRECTION, 0x1); + + aspeed_fsi_writel(s, ASPEED_FSI_OPB1_XFER_SIZE, 0x3); + aspeed_fsi_writel(s, ASPEED_FSI_OPB1_BUS_ADDR, 0xa0000000); + aspeed_fsi_writel(s, ASPEED_FSI_INTRRUPT_CLEAR, 0x1); + aspeed_fsi_writel(s, ASPEED_FSI_ENGINER_TRIGGER, 0x1); + + curval = aspeed_fsi_readl(s, ASPEED_FSI_INTRRUPT_STATUS); + g_assert_cmpuint(curval, ==, 0x20000); + curval = aspeed_fsi_readl(s, ASPEED_FSI_OPB1_BUS_STATUS); + g_assert_cmpuint(curval, ==, 0x0); + curval = aspeed_fsi_readl(s, ASPEED_FSI_OPB1_READ_DATA); + g_assert_cmpuint(curval, ==, 0x152d02c0); +} + +int main(int argc, char **argv) +{ + int ret = -1; + QTestState *s; + + g_test_init(&argc, &argv, NULL); + + s = qtest_init("-machine ast2600-evb "); + + /* Tests for OPB/FSI0 */ + qtest_add_data_func("/aspeed-fsi-test/test_fsi0_master_regs", s, + test_fsi0_master_regs); + + qtest_add_data_func("/aspeed-fsi-test/test_fsi0_getcfam_addr0", s, + test_fsi0_getcfam_addr0); + + /* Tests for OPB/FSI1 */ + qtest_add_data_func("/aspeed-fsi-test/test_fsi1_master_regs", s, + test_fsi1_master_regs); + + qtest_add_data_func("/aspeed-fsi-test/test_fsi1_getcfam_addr0", s, + test_fsi1_getcfam_addr0); + + ret = g_test_run(); + qtest_quit(s); + + return ret; +} diff --git a/tests/qtest/bcm2835-i2c-test.c b/tests/qtest/bcm2835-i2c-test.c new file mode 100644 index 00000000000..513ecce61dc --- /dev/null +++ b/tests/qtest/bcm2835-i2c-test.c @@ -0,0 +1,115 @@ +/* + * QTest testcase for Broadcom Serial Controller (BSC) + * + * Copyright (c) 2024 Rayhan Faizel + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" + +#include "hw/i2c/bcm2835_i2c.h" +#include "hw/sensor/tmp105_regs.h" + +static const uint32_t bsc_base_addrs[] = { + 0x3f205000, /* I2C0 */ + 0x3f804000, /* I2C1 */ + 0x3f805000, /* I2C2 */ +}; + +static void bcm2835_i2c_init_transfer(uint32_t base_addr, bool read) +{ + /* read flag is bit 0 so we can write it directly */ + int interrupt = read ? BCM2835_I2C_C_INTR : BCM2835_I2C_C_INTT; + + writel(base_addr + BCM2835_I2C_C, + BCM2835_I2C_C_I2CEN | BCM2835_I2C_C_INTD | + BCM2835_I2C_C_ST | BCM2835_I2C_C_CLEAR | interrupt | read); +} + +static void test_i2c_read_write(gconstpointer data) +{ + uint32_t i2cdata; + intptr_t index = (intptr_t) data; + uint32_t base_addr = bsc_base_addrs[index]; + + /* Write to TMP105 register */ + writel(base_addr + BCM2835_I2C_A, 0x50); + writel(base_addr + BCM2835_I2C_DLEN, 3); + + bcm2835_i2c_init_transfer(base_addr, 0); + + writel(base_addr + BCM2835_I2C_FIFO, TMP105_REG_T_HIGH); + writel(base_addr + BCM2835_I2C_FIFO, 0xde); + writel(base_addr + BCM2835_I2C_FIFO, 0xad); + + /* Clear flags */ + writel(base_addr + BCM2835_I2C_S, BCM2835_I2C_S_DONE | BCM2835_I2C_S_ERR | + BCM2835_I2C_S_CLKT); + + /* Read from TMP105 register */ + writel(base_addr + BCM2835_I2C_A, 0x50); + writel(base_addr + BCM2835_I2C_DLEN, 1); + + bcm2835_i2c_init_transfer(base_addr, 0); + + writel(base_addr + BCM2835_I2C_FIFO, TMP105_REG_T_HIGH); + + writel(base_addr + BCM2835_I2C_DLEN, 2); + bcm2835_i2c_init_transfer(base_addr, 1); + + i2cdata = readl(base_addr + BCM2835_I2C_FIFO); + g_assert_cmpint(i2cdata, ==, 0xde); + + i2cdata = readl(base_addr + BCM2835_I2C_FIFO); + g_assert_cmpint(i2cdata, ==, 0xad); + + /* Clear flags */ + writel(base_addr + BCM2835_I2C_S, BCM2835_I2C_S_DONE | BCM2835_I2C_S_ERR | + BCM2835_I2C_S_CLKT); + +} + +int main(int argc, char **argv) +{ + int ret; + int i; + + g_test_init(&argc, &argv, NULL); + + for (i = 0; i < 3; i++) { + g_autofree char *test_name = + g_strdup_printf("/bcm2835/bcm2835-i2c%d/read_write", i); + qtest_add_data_func(test_name, (void *)(intptr_t) i, + test_i2c_read_write); + } + + /* Run I2C tests with TMP105 slaves on all three buses */ + qtest_start("-M raspi3b " + "-device tmp105,address=0x50,bus=i2c-bus.0 " + "-device tmp105,address=0x50,bus=i2c-bus.1 " + "-device tmp105,address=0x50,bus=i2c-bus.2"); + ret = g_test_run(); + qtest_end(); + + return ret; +} diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c index fe6a9a8563c..d1ff4db7a23 100644 --- a/tests/qtest/bios-tables-test.c +++ b/tests/qtest/bios-tables-test.c @@ -858,16 +858,8 @@ static void test_vm_prepare(const char *params, test_data *data) g_free(args); } -static void process_acpi_tables_noexit(test_data *data) +static void process_smbios_tables_noexit(test_data *data) { - test_acpi_load_tables(data); - - if (getenv(ACPI_REBUILD_EXPECTED_AML)) { - dump_aml_files(data, true); - } else { - test_acpi_asl(data); - } - /* * TODO: make SMBIOS tests work with UEFI firmware, * Bug on uefi-test-tools to provide entry point: @@ -879,6 +871,27 @@ static void process_acpi_tables_noexit(test_data *data) } } +static void test_smbios(const char *params, test_data *data) +{ + test_vm_prepare(params, data); + boot_sector_test(data->qts); + process_smbios_tables_noexit(data); + qtest_quit(data->qts); +} + +static void process_acpi_tables_noexit(test_data *data) +{ + test_acpi_load_tables(data); + + if (getenv(ACPI_REBUILD_EXPECTED_AML)) { + dump_aml_files(data, true); + } else { + test_acpi_asl(data); + } + + process_smbios_tables_noexit(data); +} + static void process_acpi_tables(test_data *data) { process_acpi_tables_noexit(data); @@ -1015,7 +1028,7 @@ static void test_acpi_q35_tcg(void) free_test_data(&data); } -static void test_acpi_q35_tcg_type4_count(void) +static void test_acpi_q35_kvm_type4_count(void) { test_data data = { .machine = MACHINE_Q35, @@ -1031,7 +1044,7 @@ static void test_acpi_q35_tcg_type4_count(void) free_test_data(&data); } -static void test_acpi_q35_tcg_core_count(void) +static void test_acpi_q35_kvm_core_count(void) { test_data data = { .machine = MACHINE_Q35, @@ -1048,7 +1061,7 @@ static void test_acpi_q35_tcg_core_count(void) free_test_data(&data); } -static void test_acpi_q35_tcg_core_count2(void) +static void test_acpi_q35_kvm_core_count2(void) { test_data data = { .machine = MACHINE_Q35, @@ -1065,7 +1078,7 @@ static void test_acpi_q35_tcg_core_count2(void) free_test_data(&data); } -static void test_acpi_q35_tcg_thread_count(void) +static void test_acpi_q35_kvm_thread_count(void) { test_data data = { .machine = MACHINE_Q35, @@ -1082,7 +1095,7 @@ static void test_acpi_q35_tcg_thread_count(void) free_test_data(&data); } -static void test_acpi_q35_tcg_thread_count2(void) +static void test_acpi_q35_kvm_thread_count2(void) { test_data data = { .machine = MACHINE_Q35, @@ -2064,6 +2077,50 @@ static void test_acpi_q35_pvpanic_isa(void) free_test_data(&data); } +static void test_acpi_pc_smbios_options(void) +{ + uint8_t req_type11[] = { 11 }; + test_data data = { + .machine = MACHINE_PC, + .variant = ".pc_smbios_options", + .required_struct_types = req_type11, + .required_struct_types_len = ARRAY_SIZE(req_type11), + }; + + test_smbios("-smbios type=11,value=TEST", &data); + free_test_data(&data); +} + +static void test_acpi_pc_smbios_blob(void) +{ + uint8_t req_type11[] = { 11 }; + test_data data = { + .machine = MACHINE_PC, + .variant = ".pc_smbios_blob", + .required_struct_types = req_type11, + .required_struct_types_len = ARRAY_SIZE(req_type11), + }; + + test_smbios("-machine smbios-entry-point-type=32 " + "-smbios file=tests/data/smbios/type11_blob", &data); + free_test_data(&data); +} + +static void test_acpi_isapc_smbios_legacy(void) +{ + uint8_t req_type11[] = { 1, 11 }; + test_data data = { + .machine = "isapc", + .variant = ".pc_smbios_legacy", + .required_struct_types = req_type11, + .required_struct_types_len = ARRAY_SIZE(req_type11), + }; + + test_smbios("-smbios file=tests/data/smbios/type11_blob.legacy " + "-smbios type=1,family=TEST", &data); + free_test_data(&data); +} + static void test_oem_fields(test_data *data) { int i; @@ -2215,6 +2272,12 @@ int main(int argc, char *argv[]) #ifdef CONFIG_POSIX qtest_add_func("acpi/piix4/acpierst", test_acpi_piix4_acpi_erst); #endif + qtest_add_func("acpi/piix4/smbios-options", + test_acpi_pc_smbios_options); + qtest_add_func("acpi/piix4/smbios-blob", + test_acpi_pc_smbios_blob); + qtest_add_func("acpi/piix4/smbios-legacy", + test_acpi_isapc_smbios_legacy); } if (qtest_has_machine(MACHINE_Q35)) { qtest_add_func("acpi/q35", test_acpi_q35_tcg); @@ -2262,15 +2325,15 @@ int main(int argc, char *argv[]) qtest_add_func("acpi/q35/kvm/xapic", test_acpi_q35_kvm_xapic); qtest_add_func("acpi/q35/kvm/dmar", test_acpi_q35_kvm_dmar); qtest_add_func("acpi/q35/type4-count", - test_acpi_q35_tcg_type4_count); + test_acpi_q35_kvm_type4_count); qtest_add_func("acpi/q35/core-count", - test_acpi_q35_tcg_core_count); + test_acpi_q35_kvm_core_count); qtest_add_func("acpi/q35/core-count2", - test_acpi_q35_tcg_core_count2); + test_acpi_q35_kvm_core_count2); qtest_add_func("acpi/q35/thread-count", - test_acpi_q35_tcg_thread_count); + test_acpi_q35_kvm_thread_count); qtest_add_func("acpi/q35/thread-count2", - test_acpi_q35_tcg_thread_count2); + test_acpi_q35_kvm_thread_count2); } if (qtest_has_device("virtio-iommu-pci")) { qtest_add_func("acpi/q35/viot", test_acpi_q35_viot); diff --git a/tests/qtest/boot-serial-test.c b/tests/qtest/boot-serial-test.c index 6dd06aeaf47..e3b7d65fe5c 100644 --- a/tests/qtest/boot-serial-test.c +++ b/tests/qtest/boot-serial-test.c @@ -156,7 +156,7 @@ static const testdef_t tests[] = { "Open Firmware" }, { "ppc64", "powernv8", "", "OPAL" }, { "ppc64", "powernv9", "", "OPAL" }, - { "ppc64", "sam460ex", "-device e1000", "8086 100e" }, + { "ppc64", "sam460ex", "-device pci-bridge,chassis_nr=2", "1b36 0001" }, { "i386", "isapc", "-cpu qemu32 -M graphics=off", "SeaBIOS" }, { "i386", "pc", "-M graphics=off", "SeaBIOS" }, { "i386", "q35", "-M graphics=off", "SeaBIOS" }, diff --git a/tests/qtest/cdrom-test.c b/tests/qtest/cdrom-test.c index 0945383789f..5d89e62515c 100644 --- a/tests/qtest/cdrom-test.c +++ b/tests/qtest/cdrom-test.c @@ -271,6 +271,11 @@ int main(int argc, char **argv) const char *virtmachine[] = { "virt", NULL }; add_cdrom_param_tests(virtmachine); } + } else if (g_str_equal(arch, "loongarch64")) { + if (qtest_has_device("virtio-blk-pci")) { + const char *virtmachine[] = { "virt", NULL }; + add_cdrom_param_tests(virtmachine); + } } else { const char *nonemachine[] = { "none", NULL }; add_cdrom_param_tests(nonemachine); diff --git a/tests/qtest/dbus-display-test.c b/tests/qtest/dbus-display-test.c index 21edaa1e321..0390bdcb41a 100644 --- a/tests/qtest/dbus-display-test.c +++ b/tests/qtest/dbus-display-test.c @@ -135,6 +135,13 @@ test_dbus_console_registered(GObject *source_object, NULL, #endif res, &err); + + if (g_error_matches(err, G_DBUS_ERROR, G_DBUS_ERROR_UNKNOWN_METHOD)) { + g_test_skip("The VM doesn't have a console!"); + g_main_loop_quit(test->loop); + return; + } + g_assert_no_error(err); test->listener_conn = g_thread_join(test->thread); @@ -156,7 +163,7 @@ test_dbus_display_console(void) g_autoptr(GMainLoop) loop = NULL; QTestState *qts = NULL; int pair[2]; - TestDBusConsoleRegister test; + TestDBusConsoleRegister test = { 0, }; #ifdef WIN32 WSAPROTOCOL_INFOW info; g_autoptr(GVariant) listener = NULL; @@ -245,7 +252,6 @@ test_dbus_display_keyboard(void) &err)); g_assert_no_error(err); - g_assert_cmpint(qtest_inb(qts, 0x64) & 0x1, ==, 0); g_assert_cmpint(qtest_inb(qts, 0x60), ==, 0); @@ -256,6 +262,12 @@ test_dbus_display_keyboard(void) -1, NULL, &err); + if (g_error_matches(err, G_DBUS_ERROR, G_DBUS_ERROR_UNKNOWN_METHOD)) { + g_test_skip("The VM doesn't have a console!"); + qtest_quit(qts); + return; + } + g_assert_no_error(err); /* may be should wait for interrupt? */ diff --git a/tests/qtest/libqos/ahci.c b/tests/qtest/libqos/ahci.c index a2c94c6e060..6d59c7551ab 100644 --- a/tests/qtest/libqos/ahci.c +++ b/tests/qtest/libqos/ahci.c @@ -662,7 +662,7 @@ unsigned ahci_pick_cmd(AHCIQState *ahci, uint8_t port) g_assert_not_reached(); } -inline unsigned size_to_prdtl(unsigned bytes, unsigned bytes_per_prd) +static unsigned size_to_prdtl(unsigned bytes, unsigned bytes_per_prd) { /* Each PRD can describe up to 4MiB */ g_assert_cmphex(bytes_per_prd, <=, 4096 * 1024); diff --git a/tests/qtest/libqos/ahci.h b/tests/qtest/libqos/ahci.h index 48017864bfa..a0487a1557d 100644 --- a/tests/qtest/libqos/ahci.h +++ b/tests/qtest/libqos/ahci.h @@ -599,7 +599,6 @@ void ahci_port_check_cmd_sanity(AHCIQState *ahci, AHCICommand *cmd); /* Misc */ bool is_atapi(AHCIQState *ahci, uint8_t port); -unsigned size_to_prdtl(unsigned bytes, unsigned bytes_per_prd); /* Command: Macro level execution */ void ahci_guest_io(AHCIQState *ahci, uint8_t port, uint8_t ide_cmd, diff --git a/tests/qtest/libqos/meson.build b/tests/qtest/libqos/meson.build index 90aae42a225..3aed6efcb8d 100644 --- a/tests/qtest/libqos/meson.build +++ b/tests/qtest/libqos/meson.build @@ -60,6 +60,7 @@ libqos_srcs = files( 'arm-xilinx-zynq-a9-machine.c', 'ppc64_pseries-machine.c', 'x86_64_pc-machine.c', + 'riscv-virt-machine.c', ) if have_virtfs diff --git a/tests/qtest/libqos/qgraph.h b/tests/qtest/libqos/qgraph.h index 287022a67c1..1b5de02e7be 100644 --- a/tests/qtest/libqos/qgraph.h +++ b/tests/qtest/libqos/qgraph.h @@ -24,7 +24,7 @@ #include "libqos-malloc.h" /* maximum path length */ -#define QOS_PATH_MAX_ELEMENT_SIZE 64 +#define QOS_PATH_MAX_ELEMENT_SIZE 128 typedef struct QOSGraphObject QOSGraphObject; typedef struct QOSGraphNode QOSGraphNode; diff --git a/tests/qtest/libqos/riscv-virt-machine.c b/tests/qtest/libqos/riscv-virt-machine.c new file mode 100644 index 00000000000..c4364c9c5d0 --- /dev/null +++ b/tests/qtest/libqos/riscv-virt-machine.c @@ -0,0 +1,137 @@ +/* + * libqos driver framework for risc-v + * + * Initial version based on arm-virt-machine.c + * + * Copyright (c) 2024 Ventana Micro + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "../libqtest.h" +#include "qemu/module.h" +#include "libqos-malloc.h" +#include "qgraph.h" +#include "virtio-mmio.h" +#include "generic-pcihost.h" +#include "hw/pci/pci_regs.h" + +#define RISCV_PAGE_SIZE 4096 + +/* VIRT_DRAM */ +#define RISCV_VIRT_RAM_ADDR 0x80000000 +#define RISCV_VIRT_RAM_SIZE 0x20000000 + +/* + * VIRT_VIRTIO. BASE_ADDR points to the last + * virtio_mmio device. + */ +#define VIRTIO_MMIO_BASE_ADDR 0x10008000 +#define VIRTIO_MMIO_SIZE 0x00001000 + +/* VIRT_PCIE_PIO */ +#define RISCV_GPEX_PIO_BASE 0x3000000 +#define RISCV_BUS_PIO_LIMIT 0x10000 + +/* VIRT_PCIE_MMIO */ +#define RISCV_BUS_MMIO_ALLOC_PTR 0x40000000 +#define RISCV_BUS_MMIO_LIMIT 0x80000000 + +/* VIRT_PCIE_ECAM */ +#define RISCV_ECAM_ALLOC_PTR 0x30000000 + +typedef struct QVirtMachine QVirtMachine; + +struct QVirtMachine { + QOSGraphObject obj; + QGuestAllocator alloc; + QVirtioMMIODevice virtio_mmio; + QGenericPCIHost bridge; +}; + +static void virt_destructor(QOSGraphObject *obj) +{ + QVirtMachine *machine = (QVirtMachine *) obj; + alloc_destroy(&machine->alloc); +} + +static void *virt_get_driver(void *object, const char *interface) +{ + QVirtMachine *machine = object; + if (!g_strcmp0(interface, "memory")) { + return &machine->alloc; + } + + fprintf(stderr, "%s not present in riscv/virtio\n", interface); + g_assert_not_reached(); +} + +static QOSGraphObject *virt_get_device(void *obj, const char *device) +{ + QVirtMachine *machine = obj; + if (!g_strcmp0(device, "generic-pcihost")) { + return &machine->bridge.obj; + } else if (!g_strcmp0(device, "virtio-mmio")) { + return &machine->virtio_mmio.obj; + } + + fprintf(stderr, "%s not present in riscv/virt\n", device); + g_assert_not_reached(); +} + +static void riscv_config_qpci_bus(QGenericPCIBus *qpci) +{ + qpci->gpex_pio_base = RISCV_GPEX_PIO_BASE; + qpci->bus.pio_limit = RISCV_BUS_PIO_LIMIT; + + qpci->bus.mmio_alloc_ptr = RISCV_BUS_MMIO_ALLOC_PTR; + qpci->bus.mmio_limit = RISCV_BUS_MMIO_LIMIT; + + qpci->ecam_alloc_ptr = RISCV_ECAM_ALLOC_PTR; +} + +static void *qos_create_machine_riscv_virt(QTestState *qts) +{ + QVirtMachine *machine = g_new0(QVirtMachine, 1); + + alloc_init(&machine->alloc, 0, + RISCV_VIRT_RAM_ADDR, + RISCV_VIRT_RAM_ADDR + RISCV_VIRT_RAM_SIZE, + RISCV_PAGE_SIZE); + qvirtio_mmio_init_device(&machine->virtio_mmio, qts, VIRTIO_MMIO_BASE_ADDR, + VIRTIO_MMIO_SIZE); + + qos_create_generic_pcihost(&machine->bridge, qts, &machine->alloc); + riscv_config_qpci_bus(&machine->bridge.pci); + + machine->obj.get_device = virt_get_device; + machine->obj.get_driver = virt_get_driver; + machine->obj.destructor = virt_destructor; + return machine; +} + +static void virt_machine_register_nodes(void) +{ + qos_node_create_machine_args("riscv32/virt", qos_create_machine_riscv_virt, + "aclint=on,aia=aplic-imsic"); + qos_node_contains("riscv32/virt", "virtio-mmio", NULL); + qos_node_contains("riscv32/virt", "generic-pcihost", NULL); + + qos_node_create_machine_args("riscv64/virt", qos_create_machine_riscv_virt, + "aclint=on,aia=aplic-imsic"); + qos_node_contains("riscv64/virt", "virtio-mmio", NULL); + qos_node_contains("riscv64/virt", "generic-pcihost", NULL); +} + +libqos_init(virt_machine_register_nodes); diff --git a/tests/qtest/libqos/virtio.c b/tests/qtest/libqos/virtio.c index 410513225f4..a21b6eee9cd 100644 --- a/tests/qtest/libqos/virtio.c +++ b/tests/qtest/libqos/virtio.c @@ -265,7 +265,7 @@ void qvring_init(QTestState *qts, const QGuestAllocator *alloc, QVirtQueue *vq, /* vq->used->idx */ qvirtio_writew(vq->vdev, qts, vq->used + 2, 0); /* vq->used->avail_event */ - qvirtio_writew(vq->vdev, qts, vq->used + 2 + + qvirtio_writew(vq->vdev, qts, vq->used + 4 + sizeof(struct vring_used_elem) * vq->size, 0); } @@ -280,14 +280,27 @@ QVRingIndirectDesc *qvring_indirect_desc_setup(QTestState *qs, QVirtioDevice *d, indirect->elem = elem; indirect->desc = guest_alloc(alloc, sizeof(struct vring_desc) * elem); - for (i = 0; i < elem - 1; ++i) { + for (i = 0; i < elem; ++i) { /* indirect->desc[i].addr */ qvirtio_writeq(d, qs, indirect->desc + (16 * i), 0); - /* indirect->desc[i].flags */ - qvirtio_writew(d, qs, indirect->desc + (16 * i) + 12, - VRING_DESC_F_NEXT); - /* indirect->desc[i].next */ - qvirtio_writew(d, qs, indirect->desc + (16 * i) + 14, i + 1); + + /* + * If it's not the last element of the ring, set + * the chain (VRING_DESC_F_NEXT) flag and + * desc->next. Clear the last element - there's + * no guarantee that guest_alloc() will do it. + */ + if (i != elem - 1) { + /* indirect->desc[i].flags */ + qvirtio_writew(d, qs, indirect->desc + (16 * i) + 12, + VRING_DESC_F_NEXT); + + /* indirect->desc[i].next */ + qvirtio_writew(d, qs, indirect->desc + (16 * i) + 14, i + 1); + } else { + qvirtio_writew(d, qs, indirect->desc + (16 * i) + 12, 0); + qvirtio_writew(d, qs, indirect->desc + (16 * i) + 14, 0); + } } return indirect; @@ -381,7 +394,7 @@ void qvirtqueue_kick(QTestState *qts, QVirtioDevice *d, QVirtQueue *vq, qvirtio_writew(d, qts, vq->avail + 2, idx + 1); /* Must read after idx is updated */ - flags = qvirtio_readw(d, qts, vq->avail); + flags = qvirtio_readw(d, qts, vq->used); avail_event = qvirtio_readw(d, qts, vq->used + 4 + sizeof(struct vring_used_elem) * vq->size); diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c index f33a2108610..d8f80d335e7 100644 --- a/tests/qtest/libqtest.c +++ b/tests/qtest/libqtest.c @@ -1814,7 +1814,11 @@ QTestState *qtest_inproc_init(QTestState **s, bool log, const char* arch, * way, qtest_get_arch works for inproc qtest. */ gchar *bin_path = g_strconcat("/qemu-system-", arch, NULL); - g_setenv("QTEST_QEMU_BINARY", bin_path, 0); + if (!g_setenv("QTEST_QEMU_BINARY", bin_path, 0)) { + fprintf(stderr, + "Could not set environment variable QTEST_QEMU_BINARY\n"); + exit(1); + } g_free(bin_path); return qts; diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index f096cf3ecd0..36c5c13a7bb 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -1,15 +1,18 @@ slow_qtests = { - 'ahci-test' : 60, 'aspeed_smc-test': 360, - 'bios-tables-test' : 120, - 'boot-serial-test' : 60, - 'migration-test' : 150, - 'npcm7xx_pwm-test': 150, - 'prom-env-test' : 60, - 'pxe-test' : 60, - 'qos-test' : 60, - 'qom-test' : 300, - 'test-hmp' : 120, + 'bios-tables-test' : 610, + 'cdrom-test' : 610, + 'device-introspect-test' : 720, + 'migration-test' : 480, + 'npcm7xx_pwm-test': 300, + 'npcm7xx_watchdog_timer-test': 120, + 'qom-test' : 900, + 'test-hmp' : 240, + 'pxe-test': 610, + 'prom-env-test': 360, + 'boot-serial-test': 360, + 'qos-test': 120, + 'vmgenid-test': 610, } qtests_generic = [ @@ -39,8 +42,8 @@ qtests_cxl = \ # for the availability of the default NICs in the tests qtests_filter = \ (get_option('default_devices') and slirp.found() ? ['test-netfilter'] : []) + \ - (get_option('default_devices') and targetos != 'windows' ? ['test-filter-mirror'] : []) + \ - (get_option('default_devices') and targetos != 'windows' ? ['test-filter-redirector'] : []) + (get_option('default_devices') and host_os != 'windows' ? ['test-filter-mirror'] : []) + \ + (get_option('default_devices') and host_os != 'windows' ? ['test-filter-redirector'] : []) qtests_i386 = \ (slirp.found() ? ['pxe-test'] : []) + \ @@ -49,7 +52,7 @@ qtests_i386 = \ (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ (config_all_devices.has_key('CONFIG_SGA') ? ['boot-serial-test'] : []) + \ (config_all_devices.has_key('CONFIG_ISA_IPMI_KCS') ? ['ipmi-kcs-test'] : []) + \ - (targetos == 'linux' and \ + (host_os == 'linux' and \ config_all_devices.has_key('CONFIG_ISA_IPMI_BT') and config_all_devices.has_key('CONFIG_IPMI_EXTERN') ? ['ipmi-bt-test'] : []) + \ (config_all_devices.has_key('CONFIG_WDT_IB700') ? ['wdt_ib700-test'] : []) + \ @@ -75,7 +78,7 @@ qtests_i386 = \ (config_all_devices.has_key('CONFIG_SB16') ? ['fuzz-sb16-test'] : []) + \ (config_all_devices.has_key('CONFIG_SDHCI_PCI') ? ['fuzz-sdcard-test'] : []) + \ (config_all_devices.has_key('CONFIG_ESP_PCI') ? ['am53c974-test'] : []) + \ - (targetos != 'windows' and \ + (host_os != 'windows' and \ config_all_devices.has_key('CONFIG_ACPI_ERST') ? ['erst-test'] : []) + \ (config_all_devices.has_key('CONFIG_PCIE_PORT') and \ config_all_devices.has_key('CONFIG_VIRTIO_NET') and \ @@ -156,14 +159,15 @@ qtests_ppc = \ qtests_filter + \ (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ (config_all_devices.has_key('CONFIG_M48T59') ? ['m48t59-test'] : []) + \ - (config_all.has_key('CONFIG_TCG') ? ['prom-env-test'] : []) + \ - (config_all.has_key('CONFIG_TCG') ? ['boot-serial-test'] : []) + \ + (config_all_accel.has_key('CONFIG_TCG') ? ['prom-env-test'] : []) + \ + (config_all_accel.has_key('CONFIG_TCG') ? ['boot-serial-test'] : []) + \ ['boot-order-test'] qtests_ppc64 = \ qtests_ppc + \ (config_all_devices.has_key('CONFIG_PSERIES') ? ['device-plug-test'] : []) + \ (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-xscom-test'] : []) + \ + (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-host-i2c-test'] : []) + \ (config_all_devices.has_key('CONFIG_PSERIES') ? ['rtas-test'] : []) + \ (slirp.found() ? ['pxe-test'] : []) + \ (config_all_devices.has_key('CONFIG_USB_UHCI') ? ['usb-hcd-uhci-test'] : []) + \ @@ -189,12 +193,20 @@ qtests_npcm7xx = \ 'npcm7xx_sdhci-test', 'npcm7xx_smbus-test', 'npcm7xx_timer-test', - 'npcm7xx_watchdog_timer-test'] + \ + 'npcm7xx_watchdog_timer-test', + 'npcm_gmac-test'] + \ (slirp.found() ? ['npcm7xx_emc-test'] : []) qtests_aspeed = \ ['aspeed_hace-test', 'aspeed_smc-test', 'aspeed_gpio-test'] + +qtests_stm32l4x5 = \ + ['stm32l4x5_exti-test', + 'stm32l4x5_syscfg-test', + 'stm32l4x5_rcc-test', + 'stm32l4x5_gpio-test'] + qtests_arm = \ (config_all_devices.has_key('CONFIG_MPS2') ? ['sse-timer-test'] : []) + \ (config_all_devices.has_key('CONFIG_CMSDK_APB_DUALTIMER') ? ['cmsdk-apb-dualtimer-test'] : []) + \ @@ -208,18 +220,20 @@ qtests_arm = \ (config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \ (config_all_devices.has_key('CONFIG_VEXPRESS') ? ['test-arm-mptimer'] : []) + \ (config_all_devices.has_key('CONFIG_MICROBIT') ? ['microbit-test'] : []) + \ + (config_all_devices.has_key('CONFIG_STM32L4X5_SOC') ? qtests_stm32l4x5 : []) + \ + (config_all_devices.has_key('CONFIG_FSI_APB2OPB_ASPEED') ? ['aspeed_fsi-test'] : []) + \ ['arm-cpu-features', 'boot-serial-test'] # TODO: once aarch64 TCG is fixed on ARM 32 bit host, make bios-tables-test unconditional qtests_aarch64 = \ (cpu != 'arm' and unpack_edk2_blobs ? ['bios-tables-test'] : []) + \ - (config_all.has_key('CONFIG_TCG') and config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? \ + (config_all_accel.has_key('CONFIG_TCG') and config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? \ ['tpm-tis-device-test', 'tpm-tis-device-swtpm-test'] : []) + \ (config_all_devices.has_key('CONFIG_XLNX_ZYNQMP_ARM') ? ['xlnx-can-test', 'fuzz-xlnx-dp-test'] : []) + \ (config_all_devices.has_key('CONFIG_XLNX_VERSAL') ? ['xlnx-canfd-test', 'xlnx-versal-trng-test'] : []) + \ - (config_all_devices.has_key('CONFIG_RASPI') ? ['bcm2835-dma-test'] : []) + \ - (config_all.has_key('CONFIG_TCG') and \ + (config_all_devices.has_key('CONFIG_RASPI') ? ['bcm2835-dma-test', 'bcm2835-i2c-test'] : []) + \ + (config_all_accel.has_key('CONFIG_TCG') and \ config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \ ['arm-cpu-features', 'numa-test', @@ -278,7 +292,7 @@ if config_all_devices.has_key('CONFIG_VIRTIO_SERIAL') qos_test_ss.add(files('virtio-serial-test.c')) endif -if targetos != 'windows' +if host_os != 'windows' qos_test_ss.add(files('e1000e-test.c')) endif if have_virtfs @@ -311,7 +325,7 @@ qtests = { 'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'], 'migration-test': migration_files, 'pxe-test': files('boot-sector.c'), - 'qos-test': [chardev, io, qos_test_ss.apply(config_targetos, strict: false).sources()], + 'qos-test': [chardev, io, qos_test_ss.apply({}).sources()], 'tpm-crb-swtpm-test': [io, tpmemu_files], 'tpm-crb-test': [io, tpmemu_files], 'tpm-tis-swtpm-test': [io, tpmemu_files, 'tpm-tis-util.c'], @@ -384,8 +398,8 @@ foreach dir : target_dirs env: qtest_env, args: ['--tap', '-k'], protocol: 'tap', - timeout: slow_qtests.get(test, 30), - priority: slow_qtests.get(test, 30), + timeout: slow_qtests.get(test, 60), + priority: slow_qtests.get(test, 60), suite: ['qtest', 'qtest-' + target_base]) endforeach endforeach diff --git a/tests/qtest/migration-helpers.c b/tests/qtest/migration-helpers.c index 24fb7b3525c..e451dbdbed1 100644 --- a/tests/qtest/migration-helpers.c +++ b/tests/qtest/migration-helpers.c @@ -24,26 +24,19 @@ */ #define MIGRATION_STATUS_WAIT_TIMEOUT 120 -bool migrate_watch_for_stop(QTestState *who, const char *name, - QDict *event, void *opaque) +bool migrate_watch_for_events(QTestState *who, const char *name, + QDict *event, void *opaque) { - bool *seen = opaque; + QTestMigrationState *state = opaque; if (g_str_equal(name, "STOP")) { - *seen = true; + state->stop_seen = true; return true; - } - - return false; -} - -bool migrate_watch_for_resume(QTestState *who, const char *name, - QDict *event, void *opaque) -{ - bool *seen = opaque; - - if (g_str_equal(name, "RESUME")) { - *seen = true; + } else if (g_str_equal(name, "SUSPEND")) { + state->suspend_seen = true; + return true; + } else if (g_str_equal(name, "RESUME")) { + state->resume_seen = true; return true; } @@ -118,6 +111,12 @@ void migrate_incoming_qmp(QTestState *to, const char *uri, const char *fmt, ...) rsp = qtest_qmp(to, "{ 'execute': 'migrate-incoming', 'arguments': %p}", args); + + if (!qdict_haskey(rsp, "return")) { + g_autoptr(GString) s = qobject_to_json_pretty(QOBJECT(rsp), true); + g_test_message("%s", s->str); + } + g_assert(qdict_haskey(rsp, "return")); qobject_unref(rsp); @@ -292,3 +291,35 @@ char *resolve_machine_version(const char *alias, const char *var1, return find_common_machine_version(machine_name, var1, var2); } + +typedef struct { + char *name; + void (*func)(void); +} MigrationTest; + +static void migration_test_destroy(gpointer data) +{ + MigrationTest *test = (MigrationTest *)data; + + g_free(test->name); + g_free(test); +} + +static void migration_test_wrapper(const void *data) +{ + MigrationTest *test = (MigrationTest *)data; + + g_test_message("Running /%s%s", qtest_get_arch(), test->name); + test->func(); +} + +void migration_test_add(const char *path, void (*fn)(void)) +{ + MigrationTest *test = g_new0(MigrationTest, 1); + + test->func = fn; + test->name = g_strdup(path); + + qtest_add_data_func_full(path, test, migration_test_wrapper, + migration_test_destroy); +} diff --git a/tests/qtest/migration-helpers.h b/tests/qtest/migration-helpers.h index e31dc85cc75..3bf7ded1b97 100644 --- a/tests/qtest/migration-helpers.h +++ b/tests/qtest/migration-helpers.h @@ -15,9 +15,14 @@ #include "libqtest.h" -bool migrate_watch_for_stop(QTestState *who, const char *name, - QDict *event, void *opaque); -bool migrate_watch_for_resume(QTestState *who, const char *name, +typedef struct QTestMigrationState { + bool stop_seen; + bool resume_seen; + bool suspend_seen; + bool suspend_me; +} QTestMigrationState; + +bool migrate_watch_for_events(QTestState *who, const char *name, QDict *event, void *opaque); G_GNUC_PRINTF(3, 4) @@ -47,4 +52,5 @@ char *find_common_machine_version(const char *mtype, const char *var1, const char *var2); char *resolve_machine_version(const char *alias, const char *var1, const char *var2); +void migration_test_add(const char *path, void (*fn)(void)); #endif /* MIGRATION_HELPERS_H */ diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index 0fbaa6a90fd..1d2cee87ea3 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -43,8 +43,8 @@ unsigned start_address; unsigned end_address; static bool uffd_feature_thread_id; -static bool got_src_stop; -static bool got_dst_resume; +static QTestMigrationState src_state; +static QTestMigrationState dst_state; /* * An initial 3 MB offset is used as that corresponds @@ -104,8 +104,8 @@ static bool ufd_version_check(void) } uffd_feature_thread_id = api_struct.features & UFFD_FEATURE_THREAD_ID; - ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | - (__u64)1 << _UFFDIO_UNREGISTER; + ioctl_mask = 1ULL << _UFFDIO_REGISTER | + 1ULL << _UFFDIO_UNREGISTER; if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { g_test_message("Skipping test: Missing userfault feature"); return false; @@ -133,7 +133,7 @@ static char *bootpath; #include "tests/migration/aarch64/a-b-kernel.h" #include "tests/migration/s390x/a-b-bios.h" -static void bootfile_create(char *dir) +static void bootfile_create(char *dir, bool suspend_me) { const char *arch = qtest_get_arch(); unsigned char *content; @@ -143,6 +143,7 @@ static void bootfile_create(char *dir) if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { /* the assembled x86 boot sector should be exactly one sector large */ g_assert(sizeof(x86_bootsect) == 512); + x86_bootsect[SYM_suspend_me - SYM_start] = suspend_me; content = x86_bootsect; len = sizeof(x86_bootsect); } else if (g_str_equal(arch, "s390x")) { @@ -177,7 +178,7 @@ static void bootfile_delete(void) /* * Wait for some output in the serial output file, * we get an 'A' followed by an endless string of 'B's - * but on the destination we won't have the A. + * but on the destination we won't have the A (unless we enabled suspend/resume) */ static void wait_for_serial(const char *side) { @@ -230,6 +231,27 @@ static void wait_for_serial(const char *side) } while (true); } +static void wait_for_stop(QTestState *who, QTestMigrationState *state) +{ + if (!state->stop_seen) { + qtest_qmp_eventwait(who, "STOP"); + } +} + +static void wait_for_resume(QTestState *who, QTestMigrationState *state) +{ + if (!state->resume_seen) { + qtest_qmp_eventwait(who, "RESUME"); + } +} + +static void wait_for_suspend(QTestState *who, QTestMigrationState *state) +{ + if (state->suspend_me && !state->suspend_seen) { + qtest_qmp_eventwait(who, "SUSPEND"); + } +} + /* * It's tricky to use qemu's migration event capability with qtest, * events suddenly appearing confuse the qmp()/hmp() responses. @@ -277,21 +299,19 @@ static void read_blocktime(QTestState *who) qobject_unref(rsp_return); } +/* + * Wait for two changes in the migration pass count, but bail if we stop. + */ static void wait_for_migration_pass(QTestState *who) { - uint64_t initial_pass = get_migration_pass(who); - uint64_t pass; + uint64_t pass, prev_pass = 0, changes = 0; - /* Wait for the 1st sync */ - while (!got_src_stop && !initial_pass) { - usleep(1000); - initial_pass = get_migration_pass(who); - } - - do { + while (changes < 2 && !src_state.stop_seen && !src_state.suspend_seen) { usleep(1000); pass = get_migration_pass(who); - } while (pass == initial_pass && !got_src_stop); + changes += (pass != prev_pass); + prev_pass = pass; + } } static void check_guests_ram(QTestState *who) @@ -571,6 +591,12 @@ static void migrate_wait_for_dirty_mem(QTestState *from, usleep(1000 * 10); } while (qtest_readq(to, marker_address) != MAGIC_MARKER); + + /* If suspended, src only iterates once, and watch_byte may never change */ + if (src_state.suspend_me) { + return; + } + /* * Now ensure that already transferred bytes are * dirty again from the guest workload. Note the @@ -617,10 +643,7 @@ static void migrate_postcopy_start(QTestState *from, QTestState *to) { qtest_qmp_assert_success(from, "{ 'execute': 'migrate-start-postcopy' }"); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } - + wait_for_stop(from, &src_state); qtest_qmp_eventwait(to, "RESUME"); } @@ -637,6 +660,8 @@ typedef struct { bool use_dirty_ring; const char *opts_source; const char *opts_target; + /* suspend the src before migrating to dest. */ + bool suspend_me; } MigrateStart; /* @@ -756,8 +781,11 @@ static int test_migrate_start(QTestState **from, QTestState **to, } } - got_src_stop = false; - got_dst_resume = false; + dst_state = (QTestMigrationState) { }; + src_state = (QTestMigrationState) { }; + bootfile_create(tmpfs, args->suspend_me); + src_state.suspend_me = args->suspend_me; + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { memory_size = "150M"; @@ -791,7 +819,7 @@ static int test_migrate_start(QTestState **from, QTestState **to, } else if (strcmp(arch, "aarch64") == 0) { memory_size = "150M"; machine_alias = "virt"; - machine_opts = "gic-version=max"; + machine_opts = "gic-version=3"; arch_opts = g_strdup_printf("-cpu max -kernel %s", bootpath); start_address = ARM_TEST_MEM_START; end_address = ARM_TEST_MEM_END; @@ -848,8 +876,8 @@ static int test_migrate_start(QTestState **from, QTestState **to, if (!args->only_target) { *from = qtest_init_with_env(QEMU_ENV_SRC, cmd_source); qtest_qmp_set_event_callback(*from, - migrate_watch_for_stop, - &got_src_stop); + migrate_watch_for_events, + &src_state); } cmd_target = g_strdup_printf("-accel kvm%s -accel tcg " @@ -869,8 +897,8 @@ static int test_migrate_start(QTestState **from, QTestState **to, ignore_stderr); *to = qtest_init_with_env(QEMU_ENV_DST, cmd_target); qtest_qmp_set_event_callback(*to, - migrate_watch_for_resume, - &got_dst_resume); + migrate_watch_for_events, + &dst_state); /* * Remove shmem file immediately to avoid memory leak in test failed case. @@ -1319,6 +1347,7 @@ static int migrate_postcopy_prepare(QTestState **from_ptr, /* Wait for the first serial output from the source */ wait_for_serial("src_serial"); + wait_for_suspend(from, &src_state); g_autofree char *uri = migrate_get_socket_address(to, "socket-address"); migrate_qmp(from, uri, "{}"); @@ -1336,6 +1365,11 @@ static void migrate_postcopy_complete(QTestState *from, QTestState *to, { wait_for_migration_complete(from); + if (args->start.suspend_me) { + /* wakeup succeeds only if guest is suspended */ + qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}"); + } + /* Make sure we get at least one "B" on destination */ wait_for_serial("dest_serial"); @@ -1369,6 +1403,15 @@ static void test_postcopy(void) test_postcopy_common(&args); } +static void test_postcopy_suspend(void) +{ + MigrateCommon args = { + .start.suspend_me = true, + }; + + test_postcopy_common(&args); +} + static void test_postcopy_compress(void) { MigrateCommon args = { @@ -1703,6 +1746,7 @@ static void test_precopy_common(MigrateCommon *args) /* Wait for the first serial output from the source */ if (args->result == MIG_TEST_SUCCEED) { wait_for_serial("src_serial"); + wait_for_suspend(from, &src_state); } if (args->live) { @@ -1717,9 +1761,7 @@ static void test_precopy_common(MigrateCommon *args) */ if (args->result == MIG_TEST_SUCCEED) { qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}"); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } + wait_for_stop(from, &src_state); migrate_ensure_converge(from); } } @@ -1765,9 +1807,8 @@ static void test_precopy_common(MigrateCommon *args) */ wait_for_migration_complete(from); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } + wait_for_stop(from, &src_state); + } else { wait_for_migration_complete(from); /* @@ -1780,8 +1821,11 @@ static void test_precopy_common(MigrateCommon *args) qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}"); } - if (!got_dst_resume) { - qtest_qmp_eventwait(to, "RESUME"); + wait_for_resume(to, &dst_state); + + if (args->start.suspend_me) { + /* wakeup succeeds only if guest is suspended */ + qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}"); } wait_for_serial("dest_serial"); @@ -1821,9 +1865,7 @@ static void test_file_common(MigrateCommon *args, bool stop_src) if (stop_src) { qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}"); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } + wait_for_stop(from, &src_state); } if (args->result == MIG_TEST_QMP_ERROR) { @@ -1844,10 +1886,7 @@ static void test_file_common(MigrateCommon *args, bool stop_src) if (stop_src) { qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}"); } - - if (!got_dst_resume) { - qtest_qmp_eventwait(to, "RESUME"); - } + wait_for_resume(to, &dst_state); wait_for_serial("dest_serial"); @@ -1875,6 +1914,34 @@ static void test_precopy_unix_plain(void) test_precopy_common(&args); } +static void test_precopy_unix_suspend_live(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .listen_uri = uri, + .connect_uri = uri, + /* + * despite being live, the test is fast because the src + * suspends immediately. + */ + .live = true, + .start.suspend_me = true, + }; + + test_precopy_common(&args); +} + +static void test_precopy_unix_suspend_notlive(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .listen_uri = uri, + .connect_uri = uri, + .start.suspend_me = true, + }; + + test_precopy_common(&args); +} static void test_precopy_unix_dirty_ring(void) { @@ -1966,9 +2033,7 @@ static void test_ignore_shared(void) migrate_wait_for_dirty_mem(from, to); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } + wait_for_stop(from, &src_state); qtest_qmp_eventwait(to, "RESUME"); @@ -2135,6 +2200,14 @@ static void *test_mode_reboot_start(QTestState *from, QTestState *to) return NULL; } +static void *migrate_mapped_ram_start(QTestState *from, QTestState *to) +{ + migrate_set_capability(from, "mapped-ram", true); + migrate_set_capability(to, "mapped-ram", true); + + return NULL; +} + static void test_mode_reboot(void) { g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, @@ -2149,6 +2222,72 @@ static void test_mode_reboot(void) test_file_common(&args, true); } +static void test_precopy_file_mapped_ram_live(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start_hook = migrate_mapped_ram_start, + }; + + test_file_common(&args, false); +} + +static void test_precopy_file_mapped_ram(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start_hook = migrate_mapped_ram_start, + }; + + test_file_common(&args, true); +} + +static void *migrate_multifd_mapped_ram_start(QTestState *from, QTestState *to) +{ + migrate_mapped_ram_start(from, to); + + migrate_set_parameter_int(from, "multifd-channels", 4); + migrate_set_parameter_int(to, "multifd-channels", 4); + + migrate_set_capability(from, "multifd", true); + migrate_set_capability(to, "multifd", true); + + return NULL; +} + +static void test_multifd_file_mapped_ram_live(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start_hook = migrate_multifd_mapped_ram_start, + }; + + test_file_common(&args, false); +} + +static void test_multifd_file_mapped_ram(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start_hook = migrate_multifd_mapped_ram_start, + }; + + test_file_common(&args, true); +} + + static void test_precopy_tcp_plain(void) { MigrateCommon args = { @@ -2358,7 +2497,7 @@ static void test_migrate_fd_finish_hook(QTestState *from, qobject_unref(rsp); } -static void test_migrate_fd_proto(void) +static void test_migrate_precopy_fd_socket(void) { MigrateCommon args = { .listen_uri = "defer", @@ -2368,6 +2507,45 @@ static void test_migrate_fd_proto(void) }; test_precopy_common(&args); } + +static void *migrate_precopy_fd_file_start(QTestState *from, QTestState *to) +{ + g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); + int src_flags = O_CREAT | O_RDWR; + int dst_flags = O_CREAT | O_RDWR; + int fds[2]; + + fds[0] = open(file, src_flags, 0660); + assert(fds[0] != -1); + + fds[1] = open(file, dst_flags, 0660); + assert(fds[1] != -1); + + + qtest_qmp_fds_assert_success(to, &fds[0], 1, + "{ 'execute': 'getfd'," + " 'arguments': { 'fdname': 'fd-mig' }}"); + + qtest_qmp_fds_assert_success(from, &fds[1], 1, + "{ 'execute': 'getfd'," + " 'arguments': { 'fdname': 'fd-mig' }}"); + + close(fds[0]); + close(fds[1]); + + return NULL; +} + +static void test_migrate_precopy_fd_file(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .connect_uri = "fd:fd-mig", + .start_hook = migrate_precopy_fd_file_start, + .finish_hook = test_migrate_fd_finish_hook + }; + test_file_common(&args, true); +} #endif /* _WIN32 */ static void do_test_validate_uuid(MigrateStart *args, bool should_fail) @@ -2503,7 +2681,7 @@ static void test_migrate_auto_converge(void) break; } usleep(20); - g_assert_false(got_src_stop); + g_assert_false(src_state.stop_seen); } while (true); /* The first percentage of throttling should be at least init_pct */ g_assert_cmpint(percentage, >=, init_pct); @@ -2556,10 +2734,35 @@ test_migrate_precopy_tcp_multifd_start(QTestState *from, return test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); } +static void * +test_migrate_precopy_tcp_multifd_start_zero_page_legacy(QTestState *from, + QTestState *to) +{ + test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); + migrate_set_parameter_str(from, "zero-page-detection", "legacy"); + return NULL; +} + +static void * +test_migration_precopy_tcp_multifd_start_no_zero_page(QTestState *from, + QTestState *to) +{ + test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); + migrate_set_parameter_str(from, "zero-page-detection", "none"); + return NULL; +} + static void * test_migrate_precopy_tcp_multifd_zlib_start(QTestState *from, QTestState *to) { + /* + * Overloading this test to also check that set_parameter does not error. + * This is also done in the tests for the other compression methods. + */ + migrate_set_parameter_int(from, "multifd-zlib-level", 2); + migrate_set_parameter_int(to, "multifd-zlib-level", 2); + return test_migrate_precopy_tcp_multifd_start_common(from, to, "zlib"); } @@ -2568,6 +2771,9 @@ static void * test_migrate_precopy_tcp_multifd_zstd_start(QTestState *from, QTestState *to) { + migrate_set_parameter_int(from, "multifd-zstd-level", 2); + migrate_set_parameter_int(to, "multifd-zstd-level", 2); + return test_migrate_precopy_tcp_multifd_start_common(from, to, "zstd"); } #endif /* CONFIG_ZSTD */ @@ -2587,6 +2793,36 @@ static void test_multifd_tcp_none(void) test_precopy_common(&args); } +static void test_multifd_tcp_zero_page_legacy(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = test_migrate_precopy_tcp_multifd_start_zero_page_legacy, + /* + * Multifd is more complicated than most of the features, it + * directly takes guest page buffers when sending, make sure + * everything will work alright even if guest page is changing. + */ + .live = true, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_no_zero_page(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = test_migration_precopy_tcp_multifd_start_no_zero_page, + /* + * Multifd is more complicated than most of the features, it + * directly takes guest page buffers when sending, make sure + * everything will work alright even if guest page is changing. + */ + .live = true, + }; + test_precopy_common(&args); +} + static void test_multifd_tcp_zlib(void) { MigrateCommon args = { @@ -2842,9 +3078,7 @@ static void test_multifd_tcp_cancel(void) migrate_ensure_converge(from); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } + wait_for_stop(from, &src_state); qtest_qmp_eventwait(to2, "RESUME"); wait_for_serial("dest_serial"); @@ -2985,7 +3219,9 @@ static int64_t get_limit_rate(QTestState *who) static QTestState *dirtylimit_start_vm(void) { QTestState *vm = NULL; - g_autofree gchar * + g_autofree gchar *cmd = NULL; + + bootfile_create(tmpfs, false); cmd = g_strdup_printf("-accel kvm,dirty-ring-size=4096 " "-name dirtylimit-test,debug-threads=on " "-m 150M -smp 1 " @@ -3177,7 +3413,7 @@ static void test_migrate_dirty_limit(void) throttle_us_per_full = read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); usleep(100); - g_assert_false(got_src_stop); + g_assert_false(src_state.stop_seen); } /* Now cancel migrate and wait for dirty limit throttle switch off */ @@ -3189,7 +3425,7 @@ static void test_migrate_dirty_limit(void) throttle_us_per_full = read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); usleep(100); - g_assert_false(got_src_stop); + g_assert_false(src_state.stop_seen); } while (throttle_us_per_full != 0 && --max_try_count); /* Assert dirty limit is not in service */ @@ -3218,7 +3454,7 @@ static void test_migrate_dirty_limit(void) throttle_us_per_full = read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); usleep(100); - g_assert_false(got_src_stop); + g_assert_false(src_state.stop_seen); } /* @@ -3277,7 +3513,7 @@ static bool kvm_dirty_ring_supported(void) int main(int argc, char **argv) { bool has_kvm, has_tcg; - bool has_uffd; + bool has_uffd, is_x86; const char *arch; g_autoptr(GError) err = NULL; const char *qemu_src = getenv(QEMU_ENV_SRC); @@ -3307,6 +3543,7 @@ int main(int argc, char **argv) has_uffd = ufd_version_check(); arch = qtest_get_arch(); + is_x86 = !strcmp(arch, "i386") || !strcmp(arch, "x86_64"); /* * On ppc64, the test only works with kvm-hv, but not with kvm-pr and TCG @@ -3334,67 +3571,89 @@ int main(int argc, char **argv) g_get_tmp_dir(), err->message); } g_assert(tmpfs); - bootfile_create(tmpfs); module_call_init(MODULE_INIT_QOM); + if (is_x86) { + migration_test_add("/migration/precopy/unix/suspend/live", + test_precopy_unix_suspend_live); + migration_test_add("/migration/precopy/unix/suspend/notlive", + test_precopy_unix_suspend_notlive); + } + if (has_uffd) { - qtest_add_func("/migration/postcopy/plain", test_postcopy); - qtest_add_func("/migration/postcopy/recovery/plain", - test_postcopy_recovery); - qtest_add_func("/migration/postcopy/preempt/plain", test_postcopy_preempt); - qtest_add_func("/migration/postcopy/preempt/recovery/plain", - test_postcopy_preempt_recovery); + migration_test_add("/migration/postcopy/plain", test_postcopy); + migration_test_add("/migration/postcopy/recovery/plain", + test_postcopy_recovery); + migration_test_add("/migration/postcopy/preempt/plain", + test_postcopy_preempt); + migration_test_add("/migration/postcopy/preempt/recovery/plain", + test_postcopy_preempt_recovery); if (getenv("QEMU_TEST_FLAKY_TESTS")) { - qtest_add_func("/migration/postcopy/compress/plain", - test_postcopy_compress); - qtest_add_func("/migration/postcopy/recovery/compress/plain", - test_postcopy_recovery_compress); + migration_test_add("/migration/postcopy/compress/plain", + test_postcopy_compress); + migration_test_add("/migration/postcopy/recovery/compress/plain", + test_postcopy_recovery_compress); } #ifndef _WIN32 - qtest_add_func("/migration/postcopy/recovery/double-failures", - test_postcopy_recovery_double_fail); + migration_test_add("/migration/postcopy/recovery/double-failures", + test_postcopy_recovery_double_fail); #endif /* _WIN32 */ - + if (is_x86) { + migration_test_add("/migration/postcopy/suspend", + test_postcopy_suspend); + } } - qtest_add_func("/migration/bad_dest", test_baddest); + migration_test_add("/migration/bad_dest", test_baddest); #ifndef _WIN32 if (!g_str_equal(arch, "s390x")) { - qtest_add_func("/migration/analyze-script", test_analyze_script); + migration_test_add("/migration/analyze-script", test_analyze_script); } #endif - qtest_add_func("/migration/precopy/unix/plain", test_precopy_unix_plain); - qtest_add_func("/migration/precopy/unix/xbzrle", test_precopy_unix_xbzrle); + migration_test_add("/migration/precopy/unix/plain", + test_precopy_unix_plain); + migration_test_add("/migration/precopy/unix/xbzrle", + test_precopy_unix_xbzrle); /* * Compression fails from time to time. * Put test here but don't enable it until everything is fixed. */ if (getenv("QEMU_TEST_FLAKY_TESTS")) { - qtest_add_func("/migration/precopy/unix/compress/wait", - test_precopy_unix_compress); - qtest_add_func("/migration/precopy/unix/compress/nowait", - test_precopy_unix_compress_nowait); + migration_test_add("/migration/precopy/unix/compress/wait", + test_precopy_unix_compress); + migration_test_add("/migration/precopy/unix/compress/nowait", + test_precopy_unix_compress_nowait); } - qtest_add_func("/migration/precopy/file", - test_precopy_file); - qtest_add_func("/migration/precopy/file/offset", - test_precopy_file_offset); - qtest_add_func("/migration/precopy/file/offset/bad", - test_precopy_file_offset_bad); + migration_test_add("/migration/precopy/file", + test_precopy_file); + migration_test_add("/migration/precopy/file/offset", + test_precopy_file_offset); + migration_test_add("/migration/precopy/file/offset/bad", + test_precopy_file_offset_bad); /* * Our CI system has problems with shared memory. * Don't run this test until we find a workaround. */ if (getenv("QEMU_TEST_FLAKY_TESTS")) { - qtest_add_func("/migration/mode/reboot", test_mode_reboot); + migration_test_add("/migration/mode/reboot", test_mode_reboot); } + migration_test_add("/migration/precopy/file/mapped-ram", + test_precopy_file_mapped_ram); + migration_test_add("/migration/precopy/file/mapped-ram/live", + test_precopy_file_mapped_ram_live); + + migration_test_add("/migration/multifd/file/mapped-ram", + test_multifd_file_mapped_ram); + migration_test_add("/migration/multifd/file/mapped-ram/live", + test_multifd_file_mapped_ram_live); + #ifdef CONFIG_GNUTLS - qtest_add_func("/migration/precopy/unix/tls/psk", - test_precopy_unix_tls_psk); + migration_test_add("/migration/precopy/unix/tls/psk", + test_precopy_unix_tls_psk); if (has_uffd) { /* @@ -3402,110 +3661,115 @@ int main(int argc, char **argv) * channels are tested under precopy. Here what we want to test is the * general postcopy path that has TLS channel enabled. */ - qtest_add_func("/migration/postcopy/tls/psk", test_postcopy_tls_psk); - qtest_add_func("/migration/postcopy/recovery/tls/psk", - test_postcopy_recovery_tls_psk); - qtest_add_func("/migration/postcopy/preempt/tls/psk", - test_postcopy_preempt_tls_psk); - qtest_add_func("/migration/postcopy/preempt/recovery/tls/psk", - test_postcopy_preempt_all); + migration_test_add("/migration/postcopy/tls/psk", + test_postcopy_tls_psk); + migration_test_add("/migration/postcopy/recovery/tls/psk", + test_postcopy_recovery_tls_psk); + migration_test_add("/migration/postcopy/preempt/tls/psk", + test_postcopy_preempt_tls_psk); + migration_test_add("/migration/postcopy/preempt/recovery/tls/psk", + test_postcopy_preempt_all); } #ifdef CONFIG_TASN1 - qtest_add_func("/migration/precopy/unix/tls/x509/default-host", - test_precopy_unix_tls_x509_default_host); - qtest_add_func("/migration/precopy/unix/tls/x509/override-host", - test_precopy_unix_tls_x509_override_host); + migration_test_add("/migration/precopy/unix/tls/x509/default-host", + test_precopy_unix_tls_x509_default_host); + migration_test_add("/migration/precopy/unix/tls/x509/override-host", + test_precopy_unix_tls_x509_override_host); #endif /* CONFIG_TASN1 */ #endif /* CONFIG_GNUTLS */ - qtest_add_func("/migration/precopy/tcp/plain", test_precopy_tcp_plain); + migration_test_add("/migration/precopy/tcp/plain", test_precopy_tcp_plain); - qtest_add_func("/migration/precopy/tcp/plain/switchover-ack", - test_precopy_tcp_switchover_ack); + migration_test_add("/migration/precopy/tcp/plain/switchover-ack", + test_precopy_tcp_switchover_ack); #ifdef CONFIG_GNUTLS - qtest_add_func("/migration/precopy/tcp/tls/psk/match", - test_precopy_tcp_tls_psk_match); - qtest_add_func("/migration/precopy/tcp/tls/psk/mismatch", - test_precopy_tcp_tls_psk_mismatch); + migration_test_add("/migration/precopy/tcp/tls/psk/match", + test_precopy_tcp_tls_psk_match); + migration_test_add("/migration/precopy/tcp/tls/psk/mismatch", + test_precopy_tcp_tls_psk_mismatch); #ifdef CONFIG_TASN1 - qtest_add_func("/migration/precopy/tcp/tls/x509/default-host", - test_precopy_tcp_tls_x509_default_host); - qtest_add_func("/migration/precopy/tcp/tls/x509/override-host", - test_precopy_tcp_tls_x509_override_host); - qtest_add_func("/migration/precopy/tcp/tls/x509/mismatch-host", - test_precopy_tcp_tls_x509_mismatch_host); - qtest_add_func("/migration/precopy/tcp/tls/x509/friendly-client", - test_precopy_tcp_tls_x509_friendly_client); - qtest_add_func("/migration/precopy/tcp/tls/x509/hostile-client", - test_precopy_tcp_tls_x509_hostile_client); - qtest_add_func("/migration/precopy/tcp/tls/x509/allow-anon-client", - test_precopy_tcp_tls_x509_allow_anon_client); - qtest_add_func("/migration/precopy/tcp/tls/x509/reject-anon-client", - test_precopy_tcp_tls_x509_reject_anon_client); + migration_test_add("/migration/precopy/tcp/tls/x509/default-host", + test_precopy_tcp_tls_x509_default_host); + migration_test_add("/migration/precopy/tcp/tls/x509/override-host", + test_precopy_tcp_tls_x509_override_host); + migration_test_add("/migration/precopy/tcp/tls/x509/mismatch-host", + test_precopy_tcp_tls_x509_mismatch_host); + migration_test_add("/migration/precopy/tcp/tls/x509/friendly-client", + test_precopy_tcp_tls_x509_friendly_client); + migration_test_add("/migration/precopy/tcp/tls/x509/hostile-client", + test_precopy_tcp_tls_x509_hostile_client); + migration_test_add("/migration/precopy/tcp/tls/x509/allow-anon-client", + test_precopy_tcp_tls_x509_allow_anon_client); + migration_test_add("/migration/precopy/tcp/tls/x509/reject-anon-client", + test_precopy_tcp_tls_x509_reject_anon_client); #endif /* CONFIG_TASN1 */ #endif /* CONFIG_GNUTLS */ - /* qtest_add_func("/migration/ignore_shared", test_ignore_shared); */ + /* migration_test_add("/migration/ignore_shared", test_ignore_shared); */ #ifndef _WIN32 - qtest_add_func("/migration/fd_proto", test_migrate_fd_proto); + migration_test_add("/migration/precopy/fd/tcp", + test_migrate_precopy_fd_socket); + migration_test_add("/migration/precopy/fd/file", + test_migrate_precopy_fd_file); #endif - qtest_add_func("/migration/validate_uuid", test_validate_uuid); - qtest_add_func("/migration/validate_uuid_error", test_validate_uuid_error); - qtest_add_func("/migration/validate_uuid_src_not_set", - test_validate_uuid_src_not_set); - qtest_add_func("/migration/validate_uuid_dst_not_set", - test_validate_uuid_dst_not_set); + migration_test_add("/migration/validate_uuid", test_validate_uuid); + migration_test_add("/migration/validate_uuid_error", + test_validate_uuid_error); + migration_test_add("/migration/validate_uuid_src_not_set", + test_validate_uuid_src_not_set); + migration_test_add("/migration/validate_uuid_dst_not_set", + test_validate_uuid_dst_not_set); /* * See explanation why this test is slow on function definition */ if (g_test_slow()) { - qtest_add_func("/migration/auto_converge", test_migrate_auto_converge); + migration_test_add("/migration/auto_converge", + test_migrate_auto_converge); if (g_str_equal(arch, "x86_64") && has_kvm && kvm_dirty_ring_supported()) { - qtest_add_func("/migration/dirty_limit", test_migrate_dirty_limit); + migration_test_add("/migration/dirty_limit", + test_migrate_dirty_limit); } } - qtest_add_func("/migration/multifd/tcp/plain/none", - test_multifd_tcp_none); - /* - * This test is flaky and sometimes fails in CI and otherwise: - * don't run unless user opts in via environment variable. - */ - if (getenv("QEMU_TEST_FLAKY_TESTS")) { - qtest_add_func("/migration/multifd/tcp/plain/cancel", + migration_test_add("/migration/multifd/tcp/plain/none", + test_multifd_tcp_none); + migration_test_add("/migration/multifd/tcp/plain/zero-page/legacy", + test_multifd_tcp_zero_page_legacy); + migration_test_add("/migration/multifd/tcp/plain/zero-page/none", + test_multifd_tcp_no_zero_page); + migration_test_add("/migration/multifd/tcp/plain/cancel", test_multifd_tcp_cancel); - } - qtest_add_func("/migration/multifd/tcp/plain/zlib", - test_multifd_tcp_zlib); + migration_test_add("/migration/multifd/tcp/plain/zlib", + test_multifd_tcp_zlib); #ifdef CONFIG_ZSTD - qtest_add_func("/migration/multifd/tcp/plain/zstd", - test_multifd_tcp_zstd); + migration_test_add("/migration/multifd/tcp/plain/zstd", + test_multifd_tcp_zstd); #endif #ifdef CONFIG_GNUTLS - qtest_add_func("/migration/multifd/tcp/tls/psk/match", - test_multifd_tcp_tls_psk_match); - qtest_add_func("/migration/multifd/tcp/tls/psk/mismatch", - test_multifd_tcp_tls_psk_mismatch); + migration_test_add("/migration/multifd/tcp/tls/psk/match", + test_multifd_tcp_tls_psk_match); + migration_test_add("/migration/multifd/tcp/tls/psk/mismatch", + test_multifd_tcp_tls_psk_mismatch); #ifdef CONFIG_TASN1 - qtest_add_func("/migration/multifd/tcp/tls/x509/default-host", - test_multifd_tcp_tls_x509_default_host); - qtest_add_func("/migration/multifd/tcp/tls/x509/override-host", - test_multifd_tcp_tls_x509_override_host); - qtest_add_func("/migration/multifd/tcp/tls/x509/mismatch-host", - test_multifd_tcp_tls_x509_mismatch_host); - qtest_add_func("/migration/multifd/tcp/tls/x509/allow-anon-client", - test_multifd_tcp_tls_x509_allow_anon_client); - qtest_add_func("/migration/multifd/tcp/tls/x509/reject-anon-client", - test_multifd_tcp_tls_x509_reject_anon_client); + migration_test_add("/migration/multifd/tcp/tls/x509/default-host", + test_multifd_tcp_tls_x509_default_host); + migration_test_add("/migration/multifd/tcp/tls/x509/override-host", + test_multifd_tcp_tls_x509_override_host); + migration_test_add("/migration/multifd/tcp/tls/x509/mismatch-host", + test_multifd_tcp_tls_x509_mismatch_host); + migration_test_add("/migration/multifd/tcp/tls/x509/allow-anon-client", + test_multifd_tcp_tls_x509_allow_anon_client); + migration_test_add("/migration/multifd/tcp/tls/x509/reject-anon-client", + test_multifd_tcp_tls_x509_reject_anon_client); #endif /* CONFIG_TASN1 */ #endif /* CONFIG_GNUTLS */ if (g_str_equal(arch, "x86_64") && has_kvm && kvm_dirty_ring_supported()) { - qtest_add_func("/migration/dirty_ring", - test_precopy_unix_dirty_ring); - qtest_add_func("/migration/vcpu_dirty_limit", - test_vcpu_dirty_limit); + migration_test_add("/migration/dirty_ring", + test_precopy_unix_dirty_ring); + migration_test_add("/migration/vcpu_dirty_limit", + test_vcpu_dirty_limit); } ret = g_test_run(); diff --git a/tests/qtest/netdev-socket.c b/tests/qtest/netdev-socket.c index bb99d08b5e7..fc7d11961ea 100644 --- a/tests/qtest/netdev-socket.c +++ b/tests/qtest/netdev-socket.c @@ -16,33 +16,7 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-sockets.h" -#define CONNECTION_TIMEOUT 120 - -static double connection_timeout(void) -{ - double load; - int ret = getloadavg(&load, 1); - - /* - * If we can't get load data, or load is low because we just started - * running, assume load of 1 (we are alone in this system). - */ - if (ret < 1 || load < 1.0) { - load = 1.0; - } - /* - * No one wants to wait more than 10 minutes for this test. Higher load? - * Too bad. - */ - if (load > 10.0) { - fprintf(stderr, "Warning: load %f higher than 10 - test might timeout\n", - load); - load = 10.0; - } - - /* if load is high increase timeout as we might not get a chance to run */ - return load * CONNECTION_TIMEOUT; -} +#define CONNECTION_TIMEOUT 60 #define EXPECT_STATE(q, e, t) \ do { \ @@ -57,7 +31,7 @@ do { \ if (g_str_equal(resp, e)) { \ break; \ } \ - } while (g_test_timer_elapsed() < connection_timeout()); \ + } while (g_test_timer_elapsed() < CONNECTION_TIMEOUT); \ g_assert_cmpstr(resp, ==, e); \ g_free(resp); \ } while (0) @@ -153,7 +127,7 @@ static void test_stream_inet_ipv4(void) "addr.ipv4=on,addr.ipv6=off," "addr.host=127.0.0.1,addr.port=%d", port); - EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); + EXPECT_STATE(qts0, "st0: index=0,type=stream,listening\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,server=false,id=st0,addr.type=inet," @@ -226,7 +200,7 @@ static void test_stream_unix_reconnect(void) "-netdev stream,id=st0,server=true,addr.type=unix," "addr.path=%s", path); - EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); + EXPECT_STATE(qts0, "st0: index=0,type=stream,listening\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,server=false,id=st0,addr.type=unix," @@ -276,7 +250,7 @@ static void test_stream_inet_ipv6(void) "addr.ipv4=off,addr.ipv6=on," "addr.host=::1,addr.port=%d", port); - EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); + EXPECT_STATE(qts0, "st0: index=0,type=stream,listening\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,server=false,id=st0,addr.type=inet," @@ -308,7 +282,7 @@ static void test_stream_unix(void) "addr.type=unix,addr.path=%s,", path); - EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); + EXPECT_STATE(qts0, "st0: index=0,type=stream,listening\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,id=st0,server=false," @@ -340,7 +314,7 @@ static void test_stream_unix_abstract(void) "addr.abstract=on", path); - EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); + EXPECT_STATE(qts0, "st0: index=0,type=stream,listening\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,id=st0,server=false," @@ -552,7 +526,7 @@ int main(int argc, char **argv) #ifndef _WIN32 qtest_add_func("/netdev/dgram/unix", test_dgram_unix); #endif - qtest_add_func("/netdev/stream/unix", test_stream_unix); + qtest_add_func("/netdev/stream/unix/oneshot", test_stream_unix); qtest_add_func("/netdev/stream/unix/reconnect", test_stream_unix_reconnect); #ifdef CONFIG_LINUX diff --git a/tests/qtest/npcm7xx_emc-test.c b/tests/qtest/npcm7xx_emc-test.c index b046f1d76af..2e1a1a6d702 100644 --- a/tests/qtest/npcm7xx_emc-test.c +++ b/tests/qtest/npcm7xx_emc-test.c @@ -225,21 +225,14 @@ static int *packet_test_init(int module_num, GString *cmd_line) g_assert_cmpint(ret, != , -1); /* - * KISS and use -nic. We specify two nics (both emc{0,1}) because there's - * currently no way to specify only emc1: The driver implicitly relies on - * emc[i] == nd_table[i]. + * KISS and use -nic. The driver accepts 'emc0' and 'emc1' as aliases + * in the 'model' field to specify the device to match. */ - if (module_num == 0) { - g_string_append_printf(cmd_line, - " -nic socket,fd=%d,model=" TYPE_NPCM7XX_EMC " " - " -nic user,model=" TYPE_NPCM7XX_EMC " ", - test_sockets[1]); - } else { - g_string_append_printf(cmd_line, - " -nic user,model=" TYPE_NPCM7XX_EMC " " - " -nic socket,fd=%d,model=" TYPE_NPCM7XX_EMC " ", - test_sockets[1]); - } + g_string_append_printf(cmd_line, " -nic socket,fd=%d,model=emc%d " + "-nic user,model=npcm7xx-emc " + "-nic user,model=npcm-gmac " + "-nic user,model=npcm-gmac", + test_sockets[1], module_num); g_test_queue_destroy(packet_test_clear, test_sockets); return test_sockets; @@ -796,7 +789,7 @@ static void emc_test_ptle(QTestState *qts, const EMCModule *mod, int fd) static void test_tx(gconstpointer test_data) { const TestData *td = test_data; - GString *cmd_line = g_string_new("-machine quanta-gsj"); + g_autoptr(GString) cmd_line = g_string_new("-machine quanta-gsj"); int *test_sockets = packet_test_init(emc_module_index(td->module), cmd_line); QTestState *qts = qtest_init(cmd_line->str); @@ -821,7 +814,7 @@ static void test_tx(gconstpointer test_data) static void test_rx(gconstpointer test_data) { const TestData *td = test_data; - GString *cmd_line = g_string_new("-machine quanta-gsj"); + g_autoptr(GString) cmd_line = g_string_new("-machine quanta-gsj"); int *test_sockets = packet_test_init(emc_module_index(td->module), cmd_line); QTestState *qts = qtest_init(cmd_line->str); diff --git a/tests/qtest/npcm7xx_pwm-test.c b/tests/qtest/npcm7xx_pwm-test.c index ea4ca1d106e..b53a43c4171 100644 --- a/tests/qtest/npcm7xx_pwm-test.c +++ b/tests/qtest/npcm7xx_pwm-test.c @@ -606,6 +606,7 @@ static void test_toggle(gconstpointer test_data) uint32_t ppr, csr, pcr, cnr, cmr; int i, j, k, l; uint64_t expected_freq, expected_duty; + int cnr_step = g_test_quick() ? 2 : 1; mft_init(qts, td); @@ -618,7 +619,7 @@ static void test_toggle(gconstpointer test_data) csr = csr_list[j]; pwm_write_csr(qts, td, csr); - for (k = 0; k < ARRAY_SIZE(cnr_list); ++k) { + for (k = 0; k < ARRAY_SIZE(cnr_list); k += cnr_step) { cnr = cnr_list[k]; pwm_write_cnr(qts, td, cnr); @@ -678,6 +679,7 @@ static void pwm_add_test(const char *name, const TestData* td, int main(int argc, char **argv) { TestData test_data_list[ARRAY_SIZE(pwm_module_list) * ARRAY_SIZE(pwm_list)]; + int pwm_module_list_cnt = 1, pwm_list_cnt = 1; char *v_env = getenv("V"); @@ -687,8 +689,13 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); - for (int i = 0; i < ARRAY_SIZE(pwm_module_list); ++i) { - for (int j = 0; j < ARRAY_SIZE(pwm_list); ++j) { + if (!g_test_quick()) { + pwm_module_list_cnt = ARRAY_SIZE(pwm_module_list); + pwm_list_cnt = ARRAY_SIZE(pwm_list); + } + + for (int i = 0; i < pwm_module_list_cnt; ++i) { + for (int j = 0; j < pwm_list_cnt; ++j) { TestData *td = &test_data_list[i * ARRAY_SIZE(pwm_list) + j]; td->module = &pwm_module_list[i]; diff --git a/tests/qtest/npcm7xx_watchdog_timer-test.c b/tests/qtest/npcm7xx_watchdog_timer-test.c index 4773a673b20..981b853c99d 100644 --- a/tests/qtest/npcm7xx_watchdog_timer-test.c +++ b/tests/qtest/npcm7xx_watchdog_timer-test.c @@ -172,9 +172,10 @@ static void test_reset_action(gconstpointer watchdog) static void test_prescaler(gconstpointer watchdog) { const Watchdog *wd = watchdog; + int inc = g_test_quick() ? 3 : 1; - for (int wtclk = 0; wtclk < 4; ++wtclk) { - for (int wtis = 0; wtis < 4; ++wtis) { + for (int wtclk = 0; wtclk < 4; wtclk += inc) { + for (int wtis = 0; wtis < 4; wtis += inc) { QTestState *qts = qtest_init("-machine quanta-gsj"); qtest_irq_intercept_in(qts, "/machine/soc/a9mpcore/gic"); diff --git a/tests/qtest/npcm_gmac-test.c b/tests/qtest/npcm_gmac-test.c new file mode 100644 index 00000000000..c28b471ab20 --- /dev/null +++ b/tests/qtest/npcm_gmac-test.c @@ -0,0 +1,264 @@ +/* + * QTests for Nuvoton NPCM7xx/8xx GMAC Modules. + * + * Copyright 2024 Google LLC + * Authors: + * Hao Wu + * Nabih Estefan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "libqos/libqos.h" + +/* Name of the GMAC Device */ +#define TYPE_NPCM_GMAC "npcm-gmac" + +/* Address of the PCS Module */ +#define PCS_BASE_ADDRESS 0xf0780000 +#define NPCM_PCS_IND_AC_BA 0x1fe + +typedef struct GMACModule { + int irq; + uint64_t base_addr; +} GMACModule; + +typedef struct TestData { + const GMACModule *module; +} TestData; + +/* Values extracted from hw/arm/npcm7xx.c */ +static const GMACModule gmac_module_list[] = { + { + .irq = 14, + .base_addr = 0xf0802000 + }, + { + .irq = 15, + .base_addr = 0xf0804000 + }, +}; + +/* Returns the index of the GMAC module. */ +static int gmac_module_index(const GMACModule *mod) +{ + ptrdiff_t diff = mod - gmac_module_list; + + g_assert_true(diff >= 0 && diff < ARRAY_SIZE(gmac_module_list)); + + return diff; +} + +/* 32-bit register indices. Taken from npcm_gmac.c */ +typedef enum NPCMRegister { + /* DMA Registers */ + NPCM_DMA_BUS_MODE = 0x1000, + NPCM_DMA_XMT_POLL_DEMAND = 0x1004, + NPCM_DMA_RCV_POLL_DEMAND = 0x1008, + NPCM_DMA_RCV_BASE_ADDR = 0x100c, + NPCM_DMA_TX_BASE_ADDR = 0x1010, + NPCM_DMA_STATUS = 0x1014, + NPCM_DMA_CONTROL = 0x1018, + NPCM_DMA_INTR_ENA = 0x101c, + NPCM_DMA_MISSED_FRAME_CTR = 0x1020, + NPCM_DMA_HOST_TX_DESC = 0x1048, + NPCM_DMA_HOST_RX_DESC = 0x104c, + NPCM_DMA_CUR_TX_BUF_ADDR = 0x1050, + NPCM_DMA_CUR_RX_BUF_ADDR = 0x1054, + NPCM_DMA_HW_FEATURE = 0x1058, + + /* GMAC Registers */ + NPCM_GMAC_MAC_CONFIG = 0x0, + NPCM_GMAC_FRAME_FILTER = 0x4, + NPCM_GMAC_HASH_HIGH = 0x8, + NPCM_GMAC_HASH_LOW = 0xc, + NPCM_GMAC_MII_ADDR = 0x10, + NPCM_GMAC_MII_DATA = 0x14, + NPCM_GMAC_FLOW_CTRL = 0x18, + NPCM_GMAC_VLAN_FLAG = 0x1c, + NPCM_GMAC_VERSION = 0x20, + NPCM_GMAC_WAKEUP_FILTER = 0x28, + NPCM_GMAC_PMT = 0x2c, + NPCM_GMAC_LPI_CTRL = 0x30, + NPCM_GMAC_TIMER_CTRL = 0x34, + NPCM_GMAC_INT_STATUS = 0x38, + NPCM_GMAC_INT_MASK = 0x3c, + NPCM_GMAC_MAC0_ADDR_HI = 0x40, + NPCM_GMAC_MAC0_ADDR_LO = 0x44, + NPCM_GMAC_MAC1_ADDR_HI = 0x48, + NPCM_GMAC_MAC1_ADDR_LO = 0x4c, + NPCM_GMAC_MAC2_ADDR_HI = 0x50, + NPCM_GMAC_MAC2_ADDR_LO = 0x54, + NPCM_GMAC_MAC3_ADDR_HI = 0x58, + NPCM_GMAC_MAC3_ADDR_LO = 0x5c, + NPCM_GMAC_RGMII_STATUS = 0xd8, + NPCM_GMAC_WATCHDOG = 0xdc, + NPCM_GMAC_PTP_TCR = 0x700, + NPCM_GMAC_PTP_SSIR = 0x704, + NPCM_GMAC_PTP_STSR = 0x708, + NPCM_GMAC_PTP_STNSR = 0x70c, + NPCM_GMAC_PTP_STSUR = 0x710, + NPCM_GMAC_PTP_STNSUR = 0x714, + NPCM_GMAC_PTP_TAR = 0x718, + NPCM_GMAC_PTP_TTSR = 0x71c, + + /* PCS Registers */ + NPCM_PCS_SR_CTL_ID1 = 0x3c0008, + NPCM_PCS_SR_CTL_ID2 = 0x3c000a, + NPCM_PCS_SR_CTL_STS = 0x3c0010, + + NPCM_PCS_SR_MII_CTRL = 0x3e0000, + NPCM_PCS_SR_MII_STS = 0x3e0002, + NPCM_PCS_SR_MII_DEV_ID1 = 0x3e0004, + NPCM_PCS_SR_MII_DEV_ID2 = 0x3e0006, + NPCM_PCS_SR_MII_AN_ADV = 0x3e0008, + NPCM_PCS_SR_MII_LP_BABL = 0x3e000a, + NPCM_PCS_SR_MII_AN_EXPN = 0x3e000c, + NPCM_PCS_SR_MII_EXT_STS = 0x3e001e, + + NPCM_PCS_SR_TIM_SYNC_ABL = 0x3e0e10, + NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_LWR = 0x3e0e12, + NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_UPR = 0x3e0e14, + NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_LWR = 0x3e0e16, + NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_UPR = 0x3e0e18, + NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_LWR = 0x3e0e1a, + NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_UPR = 0x3e0e1c, + NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_LWR = 0x3e0e1e, + NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_UPR = 0x3e0e20, + + NPCM_PCS_VR_MII_MMD_DIG_CTRL1 = 0x3f0000, + NPCM_PCS_VR_MII_AN_CTRL = 0x3f0002, + NPCM_PCS_VR_MII_AN_INTR_STS = 0x3f0004, + NPCM_PCS_VR_MII_TC = 0x3f0006, + NPCM_PCS_VR_MII_DBG_CTRL = 0x3f000a, + NPCM_PCS_VR_MII_EEE_MCTRL0 = 0x3f000c, + NPCM_PCS_VR_MII_EEE_TXTIMER = 0x3f0010, + NPCM_PCS_VR_MII_EEE_RXTIMER = 0x3f0012, + NPCM_PCS_VR_MII_LINK_TIMER_CTRL = 0x3f0014, + NPCM_PCS_VR_MII_EEE_MCTRL1 = 0x3f0016, + NPCM_PCS_VR_MII_DIG_STS = 0x3f0020, + NPCM_PCS_VR_MII_ICG_ERRCNT1 = 0x3f0022, + NPCM_PCS_VR_MII_MISC_STS = 0x3f0030, + NPCM_PCS_VR_MII_RX_LSTS = 0x3f0040, + NPCM_PCS_VR_MII_MP_TX_BSTCTRL0 = 0x3f0070, + NPCM_PCS_VR_MII_MP_TX_LVLCTRL0 = 0x3f0074, + NPCM_PCS_VR_MII_MP_TX_GENCTRL0 = 0x3f007a, + NPCM_PCS_VR_MII_MP_TX_GENCTRL1 = 0x3f007c, + NPCM_PCS_VR_MII_MP_TX_STS = 0x3f0090, + NPCM_PCS_VR_MII_MP_RX_GENCTRL0 = 0x3f00b0, + NPCM_PCS_VR_MII_MP_RX_GENCTRL1 = 0x3f00b2, + NPCM_PCS_VR_MII_MP_RX_LOS_CTRL0 = 0x3f00ba, + NPCM_PCS_VR_MII_MP_MPLL_CTRL0 = 0x3f00f0, + NPCM_PCS_VR_MII_MP_MPLL_CTRL1 = 0x3f00f2, + NPCM_PCS_VR_MII_MP_MPLL_STS = 0x3f0110, + NPCM_PCS_VR_MII_MP_MISC_CTRL2 = 0x3f0126, + NPCM_PCS_VR_MII_MP_LVL_CTRL = 0x3f0130, + NPCM_PCS_VR_MII_MP_MISC_CTRL0 = 0x3f0132, + NPCM_PCS_VR_MII_MP_MISC_CTRL1 = 0x3f0134, + NPCM_PCS_VR_MII_DIG_CTRL2 = 0x3f01c2, + NPCM_PCS_VR_MII_DIG_ERRCNT_SEL = 0x3f01c4, +} NPCMRegister; + +static uint32_t gmac_read(QTestState *qts, const GMACModule *mod, + NPCMRegister regno) +{ + return qtest_readl(qts, mod->base_addr + regno); +} + +/* Check that GMAC registers are reset to default value */ +static void test_init(gconstpointer test_data) +{ + const TestData *td = test_data; + const GMACModule *mod = td->module; + QTestState *qts = qtest_init("-machine npcm750-evb"); + +#define CHECK_REG32(regno, value) \ + do { \ + g_assert_cmphex(gmac_read(qts, mod, (regno)), ==, (value)); \ + } while (0) + + CHECK_REG32(NPCM_DMA_BUS_MODE, 0x00020100); + CHECK_REG32(NPCM_DMA_XMT_POLL_DEMAND, 0); + CHECK_REG32(NPCM_DMA_RCV_POLL_DEMAND, 0); + CHECK_REG32(NPCM_DMA_RCV_BASE_ADDR, 0); + CHECK_REG32(NPCM_DMA_TX_BASE_ADDR, 0); + CHECK_REG32(NPCM_DMA_STATUS, 0); + CHECK_REG32(NPCM_DMA_CONTROL, 0); + CHECK_REG32(NPCM_DMA_INTR_ENA, 0); + CHECK_REG32(NPCM_DMA_MISSED_FRAME_CTR, 0); + CHECK_REG32(NPCM_DMA_HOST_TX_DESC, 0); + CHECK_REG32(NPCM_DMA_HOST_RX_DESC, 0); + CHECK_REG32(NPCM_DMA_CUR_TX_BUF_ADDR, 0); + CHECK_REG32(NPCM_DMA_CUR_RX_BUF_ADDR, 0); + CHECK_REG32(NPCM_DMA_HW_FEATURE, 0x100d4f37); + + CHECK_REG32(NPCM_GMAC_MAC_CONFIG, 0); + CHECK_REG32(NPCM_GMAC_FRAME_FILTER, 0); + CHECK_REG32(NPCM_GMAC_HASH_HIGH, 0); + CHECK_REG32(NPCM_GMAC_HASH_LOW, 0); + CHECK_REG32(NPCM_GMAC_MII_ADDR, 0); + CHECK_REG32(NPCM_GMAC_MII_DATA, 0); + CHECK_REG32(NPCM_GMAC_FLOW_CTRL, 0); + CHECK_REG32(NPCM_GMAC_VLAN_FLAG, 0); + CHECK_REG32(NPCM_GMAC_VERSION, 0x00001032); + CHECK_REG32(NPCM_GMAC_WAKEUP_FILTER, 0); + CHECK_REG32(NPCM_GMAC_PMT, 0); + CHECK_REG32(NPCM_GMAC_LPI_CTRL, 0); + CHECK_REG32(NPCM_GMAC_TIMER_CTRL, 0x03e80000); + CHECK_REG32(NPCM_GMAC_INT_STATUS, 0); + CHECK_REG32(NPCM_GMAC_INT_MASK, 0); + CHECK_REG32(NPCM_GMAC_MAC0_ADDR_HI, 0x8000ffff); + CHECK_REG32(NPCM_GMAC_MAC0_ADDR_LO, 0xffffffff); + CHECK_REG32(NPCM_GMAC_MAC1_ADDR_HI, 0x0000ffff); + CHECK_REG32(NPCM_GMAC_MAC1_ADDR_LO, 0xffffffff); + CHECK_REG32(NPCM_GMAC_MAC2_ADDR_HI, 0x0000ffff); + CHECK_REG32(NPCM_GMAC_MAC2_ADDR_LO, 0xffffffff); + CHECK_REG32(NPCM_GMAC_MAC3_ADDR_HI, 0x0000ffff); + CHECK_REG32(NPCM_GMAC_MAC3_ADDR_LO, 0xffffffff); + CHECK_REG32(NPCM_GMAC_RGMII_STATUS, 0); + CHECK_REG32(NPCM_GMAC_WATCHDOG, 0); + CHECK_REG32(NPCM_GMAC_PTP_TCR, 0x00002000); + CHECK_REG32(NPCM_GMAC_PTP_SSIR, 0); + CHECK_REG32(NPCM_GMAC_PTP_STSR, 0); + CHECK_REG32(NPCM_GMAC_PTP_STNSR, 0); + CHECK_REG32(NPCM_GMAC_PTP_STSUR, 0); + CHECK_REG32(NPCM_GMAC_PTP_STNSUR, 0); + CHECK_REG32(NPCM_GMAC_PTP_TAR, 0); + CHECK_REG32(NPCM_GMAC_PTP_TTSR, 0); + + qtest_quit(qts); +} + +static void gmac_add_test(const char *name, const TestData* td, + GTestDataFunc fn) +{ + g_autofree char *full_name = g_strdup_printf( + "npcm7xx_gmac/gmac[%d]/%s", gmac_module_index(td->module), name); + qtest_add_data_func(full_name, td, fn); +} + +int main(int argc, char **argv) +{ + TestData test_data_list[ARRAY_SIZE(gmac_module_list)]; + + g_test_init(&argc, &argv, NULL); + + for (int i = 0; i < ARRAY_SIZE(gmac_module_list); ++i) { + TestData *td = &test_data_list[i]; + + td->module = &gmac_module_list[i]; + + gmac_add_test("init", td, test_init); + } + + return g_test_run(); +} diff --git a/tests/qtest/pca9552-test.c b/tests/qtest/pca9552-test.c index d80ed93cd3a..74749576923 100644 --- a/tests/qtest/pca9552-test.c +++ b/tests/qtest/pca9552-test.c @@ -12,7 +12,7 @@ #include "libqtest.h" #include "libqos/qgraph.h" #include "libqos/i2c.h" -#include "hw/misc/pca9552_regs.h" +#include "hw/gpio/pca9552_regs.h" #define PCA9552_TEST_ID "pca9552-test" #define PCA9552_TEST_ADDR 0x60 @@ -60,7 +60,7 @@ static void send_and_receive(void *obj, void *data, QGuestAllocator *alloc) g_assert_cmphex(value, ==, 0x55); value = i2c_get8(i2cdev, PCA9552_INPUT0); - g_assert_cmphex(value, ==, 0x0); + g_assert_cmphex(value, ==, 0xFF); pca9552_init(i2cdev); @@ -68,13 +68,13 @@ static void send_and_receive(void *obj, void *data, QGuestAllocator *alloc) g_assert_cmphex(value, ==, 0x54); value = i2c_get8(i2cdev, PCA9552_INPUT0); - g_assert_cmphex(value, ==, 0x01); + g_assert_cmphex(value, ==, 0xFE); value = i2c_get8(i2cdev, PCA9552_LS3); g_assert_cmphex(value, ==, 0x54); value = i2c_get8(i2cdev, PCA9552_INPUT1); - g_assert_cmphex(value, ==, 0x10); + g_assert_cmphex(value, ==, 0xEF); } static void pca9552_register_nodes(void) diff --git a/tests/qtest/pnv-host-i2c-test.c b/tests/qtest/pnv-host-i2c-test.c new file mode 100644 index 00000000000..7f64d597ac1 --- /dev/null +++ b/tests/qtest/pnv-host-i2c-test.c @@ -0,0 +1,491 @@ +/* + * QTest testcase for PowerNV 10 Host I2C Communications + * + * Copyright (c) 2023, IBM Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "libqtest.h" +#include "hw/gpio/pca9554_regs.h" +#include "hw/gpio/pca9552_regs.h" +#include "pnv-xscom.h" + +#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit)) +#define PPC_BIT32(bit) (0x80000000 >> (bit)) +#define PPC_BIT8(bit) (0x80 >> (bit)) +#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) +#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be)) | \ + PPC_BIT32(bs)) + +#define MASK_TO_LSH(m) (__builtin_ffsll(m) - 1) +#define GETFIELD(m, v) (((v) & (m)) >> MASK_TO_LSH(m)) +#define SETFIELD(m, v, val) \ + (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m))) + +#define PNV10_XSCOM_I2CM_BASE 0xa0000 +#define PNV10_XSCOM_I2CM_SIZE 0x1000 + +#include "hw/i2c/pnv_i2c_regs.h" + +typedef struct { + QTestState *qts; + const PnvChip *chip; + int engine; +} PnvI2cCtlr; + +typedef struct { + PnvI2cCtlr *ctlr; + int port; + uint8_t addr; +} PnvI2cDev; + + +static uint64_t pnv_i2c_xscom_addr(PnvI2cCtlr *ctlr, uint32_t reg) +{ + return pnv_xscom_addr(ctlr->chip, PNV10_XSCOM_I2CM_BASE + + (PNV10_XSCOM_I2CM_SIZE * ctlr->engine) + reg); +} + +static uint64_t pnv_i2c_xscom_read(PnvI2cCtlr *ctlr, uint32_t reg) +{ + return qtest_readq(ctlr->qts, pnv_i2c_xscom_addr(ctlr, reg)); +} + +static void pnv_i2c_xscom_write(PnvI2cCtlr *ctlr, uint32_t reg, uint64_t val) +{ + qtest_writeq(ctlr->qts, pnv_i2c_xscom_addr(ctlr, reg), val); +} + +/* Write len bytes from buf to i2c device with given addr and port */ +static void pnv_i2c_send(PnvI2cDev *dev, const uint8_t *buf, uint16_t len) +{ + int byte_num; + uint64_t reg64; + + /* select requested port */ + reg64 = SETFIELD(I2C_MODE_BIT_RATE_DIV, 0ull, 0x2be); + reg64 = SETFIELD(I2C_MODE_PORT_NUM, reg64, dev->port); + pnv_i2c_xscom_write(dev->ctlr, I2C_MODE_REG, reg64); + + /* check status for cmd complete and bus idle */ + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_EXTD_STAT_REG); + g_assert_cmphex(reg64 & I2C_EXTD_STAT_I2C_BUSY, ==, 0); + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_STAT_REG); + g_assert_cmphex(reg64 & (I2C_STAT_ANY_ERR | I2C_STAT_CMD_COMP), ==, + I2C_STAT_CMD_COMP); + + /* Send start, with stop, with address and len bytes of data */ + reg64 = I2C_CMD_WITH_START | I2C_CMD_WITH_ADDR | I2C_CMD_WITH_STOP; + reg64 = SETFIELD(I2C_CMD_DEV_ADDR, reg64, dev->addr); + reg64 = SETFIELD(I2C_CMD_LEN_BYTES, reg64, len); + pnv_i2c_xscom_write(dev->ctlr, I2C_CMD_REG, reg64); + + /* check status for errors */ + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_STAT_REG); + g_assert_cmphex(reg64 & I2C_STAT_ANY_ERR, ==, 0); + + /* write data bytes to fifo register */ + for (byte_num = 0; byte_num < len; byte_num++) { + reg64 = SETFIELD(I2C_FIFO, 0ull, buf[byte_num]); + pnv_i2c_xscom_write(dev->ctlr, I2C_FIFO_REG, reg64); + } + + /* check status for cmd complete and bus idle */ + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_EXTD_STAT_REG); + g_assert_cmphex(reg64 & I2C_EXTD_STAT_I2C_BUSY, ==, 0); + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_STAT_REG); + g_assert_cmphex(reg64 & (I2C_STAT_ANY_ERR | I2C_STAT_CMD_COMP), ==, + I2C_STAT_CMD_COMP); +} + +/* Recieve len bytes into buf from i2c device with given addr and port */ +static void pnv_i2c_recv(PnvI2cDev *dev, uint8_t *buf, uint16_t len) +{ + int byte_num; + uint64_t reg64; + + /* select requested port */ + reg64 = SETFIELD(I2C_MODE_BIT_RATE_DIV, 0ull, 0x2be); + reg64 = SETFIELD(I2C_MODE_PORT_NUM, reg64, dev->port); + pnv_i2c_xscom_write(dev->ctlr, I2C_MODE_REG, reg64); + + /* check status for cmd complete and bus idle */ + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_EXTD_STAT_REG); + g_assert_cmphex(reg64 & I2C_EXTD_STAT_I2C_BUSY, ==, 0); + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_STAT_REG); + g_assert_cmphex(reg64 & (I2C_STAT_ANY_ERR | I2C_STAT_CMD_COMP), ==, + I2C_STAT_CMD_COMP); + + /* Send start, with stop, with address and len bytes of data */ + reg64 = I2C_CMD_WITH_START | I2C_CMD_WITH_ADDR | + I2C_CMD_WITH_STOP | I2C_CMD_READ_NOT_WRITE; + reg64 = SETFIELD(I2C_CMD_DEV_ADDR, reg64, dev->addr); + reg64 = SETFIELD(I2C_CMD_LEN_BYTES, reg64, len); + pnv_i2c_xscom_write(dev->ctlr, I2C_CMD_REG, reg64); + + /* check status for errors */ + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_STAT_REG); + g_assert_cmphex(reg64 & I2C_STAT_ANY_ERR, ==, 0); + + /* Read data bytes from fifo register */ + for (byte_num = 0; byte_num < len; byte_num++) { + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_FIFO_REG); + buf[byte_num] = GETFIELD(I2C_FIFO, reg64); + } + + /* check status for cmd complete and bus idle */ + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_EXTD_STAT_REG); + g_assert_cmphex(reg64 & I2C_EXTD_STAT_I2C_BUSY, ==, 0); + reg64 = pnv_i2c_xscom_read(dev->ctlr, I2C_STAT_REG); + g_assert_cmphex(reg64 & (I2C_STAT_ANY_ERR | I2C_STAT_CMD_COMP), ==, + I2C_STAT_CMD_COMP); +} + +static void pnv_i2c_pca9554_default_cfg(PnvI2cDev *dev) +{ + uint8_t buf[2]; + + /* input register bits are not inverted */ + buf[0] = PCA9554_POLARITY; + buf[1] = 0; + pnv_i2c_send(dev, buf, 2); + + /* All pins are inputs */ + buf[0] = PCA9554_CONFIG; + buf[1] = 0xff; + pnv_i2c_send(dev, buf, 2); + + /* Output value for when pins are outputs */ + buf[0] = PCA9554_OUTPUT; + buf[1] = 0xff; + pnv_i2c_send(dev, buf, 2); +} + +static void pnv_i2c_pca9554_set_pin(PnvI2cDev *dev, int pin, bool high) +{ + uint8_t send_buf[2]; + uint8_t recv_buf[2]; + uint8_t mask = 0x1 << pin; + uint8_t new_value = ((high) ? 1 : 0) << pin; + + /* read current OUTPUT value */ + send_buf[0] = PCA9554_OUTPUT; + pnv_i2c_send(dev, send_buf, 1); + pnv_i2c_recv(dev, recv_buf, 1); + + /* write new OUTPUT value */ + send_buf[1] = (recv_buf[0] & ~mask) | new_value; + pnv_i2c_send(dev, send_buf, 2); + + /* Update config bit for output */ + send_buf[0] = PCA9554_CONFIG; + pnv_i2c_send(dev, send_buf, 1); + pnv_i2c_recv(dev, recv_buf, 1); + send_buf[1] = recv_buf[0] & ~mask; + pnv_i2c_send(dev, send_buf, 2); +} + +static uint8_t pnv_i2c_pca9554_read_pins(PnvI2cDev *dev) +{ + uint8_t send_buf[1]; + uint8_t recv_buf[1]; + uint8_t inputs; + send_buf[0] = PCA9554_INPUT; + pnv_i2c_send(dev, send_buf, 1); + pnv_i2c_recv(dev, recv_buf, 1); + inputs = recv_buf[0]; + return inputs; +} + +static void pnv_i2c_pca9554_flip_polarity(PnvI2cDev *dev) +{ + uint8_t recv_buf[1]; + uint8_t send_buf[2]; + + send_buf[0] = PCA9554_POLARITY; + pnv_i2c_send(dev, send_buf, 1); + pnv_i2c_recv(dev, recv_buf, 1); + send_buf[1] = recv_buf[0] ^ 0xff; + pnv_i2c_send(dev, send_buf, 2); +} + +static void pnv_i2c_pca9554_default_inputs(PnvI2cDev *dev) +{ + uint8_t pin_values = pnv_i2c_pca9554_read_pins(dev); + g_assert_cmphex(pin_values, ==, 0xff); +} + +/* Check that setting pin values and polarity changes inputs as expected */ +static void pnv_i2c_pca554_set_pins(PnvI2cDev *dev) +{ + uint8_t pin_values; + pnv_i2c_pca9554_set_pin(dev, 0, 0); + pin_values = pnv_i2c_pca9554_read_pins(dev); + g_assert_cmphex(pin_values, ==, 0xfe); + pnv_i2c_pca9554_flip_polarity(dev); + pin_values = pnv_i2c_pca9554_read_pins(dev); + g_assert_cmphex(pin_values, ==, 0x01); + pnv_i2c_pca9554_set_pin(dev, 2, 0); + pin_values = pnv_i2c_pca9554_read_pins(dev); + g_assert_cmphex(pin_values, ==, 0x05); + pnv_i2c_pca9554_flip_polarity(dev); + pin_values = pnv_i2c_pca9554_read_pins(dev); + g_assert_cmphex(pin_values, ==, 0xfa); + pnv_i2c_pca9554_default_cfg(dev); + pin_values = pnv_i2c_pca9554_read_pins(dev); + g_assert_cmphex(pin_values, ==, 0xff); +} + +static void pnv_i2c_pca9552_default_cfg(PnvI2cDev *dev) +{ + uint8_t buf[2]; + /* configure pwm/psc regs */ + buf[0] = PCA9552_PSC0; + buf[1] = 0xff; + pnv_i2c_send(dev, buf, 2); + buf[0] = PCA9552_PWM0; + buf[1] = 0x80; + pnv_i2c_send(dev, buf, 2); + buf[0] = PCA9552_PSC1; + buf[1] = 0xff; + pnv_i2c_send(dev, buf, 2); + buf[0] = PCA9552_PWM1; + buf[1] = 0x80; + pnv_i2c_send(dev, buf, 2); + + /* configure all pins as inputs */ + buf[0] = PCA9552_LS0; + buf[1] = 0x55; + pnv_i2c_send(dev, buf, 2); + buf[0] = PCA9552_LS1; + buf[1] = 0x55; + pnv_i2c_send(dev, buf, 2); + buf[0] = PCA9552_LS2; + buf[1] = 0x55; + pnv_i2c_send(dev, buf, 2); + buf[0] = PCA9552_LS3; + buf[1] = 0x55; + pnv_i2c_send(dev, buf, 2); +} + +static void pnv_i2c_pca9552_set_pin(PnvI2cDev *dev, int pin, bool high) +{ + uint8_t send_buf[2]; + uint8_t recv_buf[2]; + uint8_t reg = PCA9552_LS0 + (pin / 4); + uint8_t shift = (pin % 4) * 2; + uint8_t mask = ~(0x3 << shift); + uint8_t new_value = ((high) ? 1 : 0) << shift; + + /* read current LSx value */ + send_buf[0] = reg; + pnv_i2c_send(dev, send_buf, 1); + pnv_i2c_recv(dev, recv_buf, 1); + + /* write new value to LSx */ + send_buf[1] = (recv_buf[0] & mask) | new_value; + pnv_i2c_send(dev, send_buf, 2); +} + +static uint16_t pnv_i2c_pca9552_read_pins(PnvI2cDev *dev) +{ + uint8_t send_buf[2]; + uint8_t recv_buf[2]; + uint16_t inputs; + send_buf[0] = PCA9552_INPUT0; + pnv_i2c_send(dev, send_buf, 1); + pnv_i2c_recv(dev, recv_buf, 1); + inputs = recv_buf[0]; + send_buf[0] = PCA9552_INPUT1; + pnv_i2c_send(dev, send_buf, 1); + pnv_i2c_recv(dev, recv_buf, 1); + inputs |= recv_buf[0] << 8; + return inputs; +} + +static void pnv_i2c_pca9552_default_inputs(PnvI2cDev *dev) +{ + uint16_t pin_values = pnv_i2c_pca9552_read_pins(dev); + g_assert_cmphex(pin_values, ==, 0xffff); +} + +/* + * Set pins 0-4 one at a time and verify that pins 5-9 are + * set to the same value + */ +static void pnv_i2c_pca552_set_pins(PnvI2cDev *dev) +{ + uint16_t pin_values; + + /* set pin 0 low */ + pnv_i2c_pca9552_set_pin(dev, 0, 0); + pin_values = pnv_i2c_pca9552_read_pins(dev); + + /* pins 0 and 5 should be low */ + g_assert_cmphex(pin_values, ==, 0xffde); + + /* set pin 1 low */ + pnv_i2c_pca9552_set_pin(dev, 1, 0); + pin_values = pnv_i2c_pca9552_read_pins(dev); + + /* pins 0, 1, 5 and 6 should be low */ + g_assert_cmphex(pin_values, ==, 0xff9c); + + /* set pin 2 low */ + pnv_i2c_pca9552_set_pin(dev, 2, 0); + pin_values = pnv_i2c_pca9552_read_pins(dev); + + /* pins 0, 1, 2, 5, 6 and 7 should be low */ + g_assert_cmphex(pin_values, ==, 0xff18); + + /* set pin 3 low */ + pnv_i2c_pca9552_set_pin(dev, 3, 0); + pin_values = pnv_i2c_pca9552_read_pins(dev); + + /* pins 0, 1, 2, 3, 5, 6, 7 and 8 should be low */ + g_assert_cmphex(pin_values, ==, 0xfe10); + + /* set pin 4 low */ + pnv_i2c_pca9552_set_pin(dev, 4, 0); + pin_values = pnv_i2c_pca9552_read_pins(dev); + + /* pins 0, 1, 2, 3, 5, 6, 7, 8 and 9 should be low */ + g_assert_cmphex(pin_values, ==, 0xfc00); + + /* reset all pins to the high state */ + pnv_i2c_pca9552_default_cfg(dev); + pin_values = pnv_i2c_pca9552_read_pins(dev); + + /* verify all pins went back to the high state */ + g_assert_cmphex(pin_values, ==, 0xffff); +} + +static void reset_engine(PnvI2cCtlr *ctlr) +{ + pnv_i2c_xscom_write(ctlr, I2C_RESET_I2C_REG, 0); +} + +static void check_i2cm_por_regs(QTestState *qts, const PnvChip *chip) +{ + int engine; + for (engine = 0; engine < chip->num_i2c; engine++) { + PnvI2cCtlr ctlr; + ctlr.qts = qts; + ctlr.chip = chip; + ctlr.engine = engine; + + /* Check version in Extended Status Register */ + uint64_t value = pnv_i2c_xscom_read(&ctlr, I2C_EXTD_STAT_REG); + g_assert_cmphex(value & I2C_EXTD_STAT_I2C_VERSION, ==, 0x1700000000); + + /* Check for command complete and bus idle in Status Register */ + value = pnv_i2c_xscom_read(&ctlr, I2C_STAT_REG); + g_assert_cmphex(value & (I2C_STAT_ANY_ERR | I2C_STAT_CMD_COMP), + ==, + I2C_STAT_CMD_COMP); + } +} + +static void reset_all(QTestState *qts, const PnvChip *chip) +{ + int engine; + for (engine = 0; engine < chip->num_i2c; engine++) { + PnvI2cCtlr ctlr; + ctlr.qts = qts; + ctlr.chip = chip; + ctlr.engine = engine; + reset_engine(&ctlr); + pnv_i2c_xscom_write(&ctlr, I2C_MODE_REG, 0x02be040000000000); + } +} + +static void test_host_i2c(const void *data) +{ + const PnvChip *chip = data; + QTestState *qts; + const char *machine = "powernv8"; + PnvI2cCtlr ctlr; + PnvI2cDev pca9552; + PnvI2cDev pca9554; + + if (chip->chip_type == PNV_CHIP_POWER9) { + machine = "powernv9"; + } else if (chip->chip_type == PNV_CHIP_POWER10) { + machine = "powernv10-rainier"; + } + + qts = qtest_initf("-M %s -smp %d,cores=1,threads=%d -nographic " + "-nodefaults -serial mon:stdio -S " + "-d guest_errors", + machine, SMT, SMT); + + /* Check the I2C master status registers after POR */ + check_i2cm_por_regs(qts, chip); + + /* Now do a forced "immediate" reset on all engines */ + reset_all(qts, chip); + + /* Check that the status values are still good */ + check_i2cm_por_regs(qts, chip); + + /* P9 doesn't have any i2c devices attached at this time */ + if (chip->chip_type != PNV_CHIP_POWER10) { + qtest_quit(qts); + return; + } + + /* Initialize for a P10 pca9552 hotplug device */ + ctlr.qts = qts; + ctlr.chip = chip; + ctlr.engine = 2; + pca9552.ctlr = &ctlr; + pca9552.port = 1; + pca9552.addr = 0x63; + + /* Set all pca9552 pins as inputs */ + pnv_i2c_pca9552_default_cfg(&pca9552); + + /* Check that all pins of the pca9552 are high */ + pnv_i2c_pca9552_default_inputs(&pca9552); + + /* perform individual pin tests */ + pnv_i2c_pca552_set_pins(&pca9552); + + /* Initialize for a P10 pca9554 CableCard Presence detection device */ + pca9554.ctlr = &ctlr; + pca9554.port = 1; + pca9554.addr = 0x25; + + /* Set all pca9554 pins as inputs */ + pnv_i2c_pca9554_default_cfg(&pca9554); + + /* Check that all pins of the pca9554 are high */ + pnv_i2c_pca9554_default_inputs(&pca9554); + + /* perform individual pin tests */ + pnv_i2c_pca554_set_pins(&pca9554); + + qtest_quit(qts); +} + +static void add_test(const char *name, void (*test)(const void *data)) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pnv_chips); i++) { + char *tname = g_strdup_printf("pnv-xscom/%s/%s", name, + pnv_chips[i].cpu_model); + qtest_add_data_func(tname, &pnv_chips[i], test); + g_free(tname); + } +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + + add_test("host-i2c", test_host_i2c); + return g_test_run(); +} diff --git a/tests/qtest/pnv-xscom-test.c b/tests/qtest/pnv-xscom-test.c index 8a5ac110377..c814c0f4f5b 100644 --- a/tests/qtest/pnv-xscom-test.c +++ b/tests/qtest/pnv-xscom-test.c @@ -10,66 +10,7 @@ #include "libqtest.h" -typedef enum PnvChipType { - PNV_CHIP_POWER8E, /* AKA Murano (default) */ - PNV_CHIP_POWER8, /* AKA Venice */ - PNV_CHIP_POWER8NVL, /* AKA Naples */ - PNV_CHIP_POWER9, /* AKA Nimbus */ - PNV_CHIP_POWER10, -} PnvChipType; - -typedef struct PnvChip { - PnvChipType chip_type; - const char *cpu_model; - uint64_t xscom_base; - uint64_t cfam_id; - uint32_t first_core; -} PnvChip; - -static const PnvChip pnv_chips[] = { - { - .chip_type = PNV_CHIP_POWER8, - .cpu_model = "POWER8", - .xscom_base = 0x0003fc0000000000ull, - .cfam_id = 0x220ea04980000000ull, - .first_core = 0x1, - }, { - .chip_type = PNV_CHIP_POWER8NVL, - .cpu_model = "POWER8NVL", - .xscom_base = 0x0003fc0000000000ull, - .cfam_id = 0x120d304980000000ull, - .first_core = 0x1, - }, - { - .chip_type = PNV_CHIP_POWER9, - .cpu_model = "POWER9", - .xscom_base = 0x000603fc00000000ull, - .cfam_id = 0x220d104900008000ull, - .first_core = 0x0, - }, - { - .chip_type = PNV_CHIP_POWER10, - .cpu_model = "POWER10", - .xscom_base = 0x000603fc00000000ull, - .cfam_id = 0x120da04900008000ull, - .first_core = 0x0, - }, -}; - -static uint64_t pnv_xscom_addr(const PnvChip *chip, uint32_t pcba) -{ - uint64_t addr = chip->xscom_base; - - if (chip->chip_type == PNV_CHIP_POWER10) { - addr |= ((uint64_t) pcba << 3); - } else if (chip->chip_type == PNV_CHIP_POWER9) { - addr |= ((uint64_t) pcba << 3); - } else { - addr |= (((uint64_t) pcba << 4) & ~0xffull) | - (((uint64_t) pcba << 3) & 0x78); - } - return addr; -} +#include "pnv-xscom.h" static uint64_t pnv_xscom_read(QTestState *qts, const PnvChip *chip, uint32_t pcba) diff --git a/tests/qtest/pnv-xscom.h b/tests/qtest/pnv-xscom.h new file mode 100644 index 00000000000..6f62941744a --- /dev/null +++ b/tests/qtest/pnv-xscom.h @@ -0,0 +1,80 @@ +/* + * PowerNV XSCOM Bus + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef PNV_XSCOM_H +#define PNV_XSCOM_H + +#define SMT 4 /* some tests will break if less than 4 */ + +typedef enum PnvChipType { + PNV_CHIP_POWER8E, /* AKA Murano (default) */ + PNV_CHIP_POWER8, /* AKA Venice */ + PNV_CHIP_POWER8NVL, /* AKA Naples */ + PNV_CHIP_POWER9, /* AKA Nimbus */ + PNV_CHIP_POWER10, +} PnvChipType; + +typedef struct PnvChip { + PnvChipType chip_type; + const char *cpu_model; + uint64_t xscom_base; + uint64_t cfam_id; + uint32_t first_core; + uint32_t num_i2c; +} PnvChip; + +static const PnvChip pnv_chips[] = { + { + .chip_type = PNV_CHIP_POWER8, + .cpu_model = "POWER8", + .xscom_base = 0x0003fc0000000000ull, + .cfam_id = 0x220ea04980000000ull, + .first_core = 0x1, + .num_i2c = 0, + }, { + .chip_type = PNV_CHIP_POWER8NVL, + .cpu_model = "POWER8NVL", + .xscom_base = 0x0003fc0000000000ull, + .cfam_id = 0x120d304980000000ull, + .first_core = 0x1, + .num_i2c = 0, + }, + { + .chip_type = PNV_CHIP_POWER9, + .cpu_model = "POWER9", + .xscom_base = 0x000603fc00000000ull, + .cfam_id = 0x220d104900008000ull, + .first_core = 0x0, + .num_i2c = 4, + }, + { + .chip_type = PNV_CHIP_POWER10, + .cpu_model = "POWER10", + .xscom_base = 0x000603fc00000000ull, + .cfam_id = 0x120da04900008000ull, + .first_core = 0x0, + .num_i2c = 4, + }, +}; + +static inline uint64_t pnv_xscom_addr(const PnvChip *chip, uint32_t pcba) +{ + uint64_t addr = chip->xscom_base; + + if (chip->chip_type == PNV_CHIP_POWER10) { + addr |= ((uint64_t) pcba << 3); + } else if (chip->chip_type == PNV_CHIP_POWER9) { + addr |= ((uint64_t) pcba << 3); + } else { + addr |= (((uint64_t) pcba << 4) & ~0xffull) | + (((uint64_t) pcba << 3) & 0x78); + } + return addr; +} + +#endif /* PNV_XSCOM_H */ diff --git a/tests/qtest/qtest_aspeed.h b/tests/qtest/qtest_aspeed.h index 235dfaa186a..d35b0c7cba5 100644 --- a/tests/qtest/qtest_aspeed.h +++ b/tests/qtest/qtest_aspeed.h @@ -12,8 +12,6 @@ #ifndef QTEST_ASPEED_H #define QTEST_ASPEED_H -#include - #include "libqtest.h" #define AST2600_ASPEED_I2C_BASE_ADDR 0x1e78a000 diff --git a/tests/qtest/stm32l4x5_exti-test.c b/tests/qtest/stm32l4x5_exti-test.c new file mode 100644 index 00000000000..81830be8aea --- /dev/null +++ b/tests/qtest/stm32l4x5_exti-test.c @@ -0,0 +1,561 @@ +/* + * QTest testcase for STM32L4x5_EXTI + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" + +#define EXTI_BASE_ADDR 0x40010400 +#define EXTI_IMR1 0x00 +#define EXTI_EMR1 0x04 +#define EXTI_RTSR1 0x08 +#define EXTI_FTSR1 0x0C +#define EXTI_SWIER1 0x10 +#define EXTI_PR1 0x14 +#define EXTI_IMR2 0x20 +#define EXTI_EMR2 0x24 +#define EXTI_RTSR2 0x28 +#define EXTI_FTSR2 0x2C +#define EXTI_SWIER2 0x30 +#define EXTI_PR2 0x34 + +#define NVIC_ISER 0xE000E100 +#define NVIC_ISPR 0xE000E200 +#define NVIC_ICPR 0xE000E280 + +#define EXTI0_IRQ 6 +#define EXTI1_IRQ 7 +#define EXTI5_9_IRQ 23 +#define EXTI35_IRQ 1 + +static void enable_nvic_irq(unsigned int n) +{ + writel(NVIC_ISER, 1 << n); +} + +static void unpend_nvic_irq(unsigned int n) +{ + writel(NVIC_ICPR, 1 << n); +} + +static bool check_nvic_pending(unsigned int n) +{ + return readl(NVIC_ISPR) & (1 << n); +} + +static void exti_writel(unsigned int offset, uint32_t value) +{ + writel(EXTI_BASE_ADDR + offset, value); +} + +static uint32_t exti_readl(unsigned int offset) +{ + return readl(EXTI_BASE_ADDR + offset); +} + +static void exti_set_irq(int num, int level) +{ + qtest_set_irq_in(global_qtest, "/machine/soc/exti", NULL, + num, level); +} + +static void test_reg_write_read(void) +{ + /* Test that non-reserved bits in xMR and xTSR can be set and cleared */ + + exti_writel(EXTI_IMR1, 0xFFFFFFFF); + g_assert_cmpuint(exti_readl(EXTI_IMR1), ==, 0xFFFFFFFF); + exti_writel(EXTI_IMR1, 0x00000000); + g_assert_cmpuint(exti_readl(EXTI_IMR1), ==, 0x00000000); + + exti_writel(EXTI_EMR1, 0xFFFFFFFF); + g_assert_cmpuint(exti_readl(EXTI_EMR1), ==, 0xFFFFFFFF); + exti_writel(EXTI_EMR1, 0x00000000); + g_assert_cmpuint(exti_readl(EXTI_EMR1), ==, 0x00000000); + + exti_writel(EXTI_RTSR1, 0xFFFFFFFF); + g_assert_cmpuint(exti_readl(EXTI_RTSR1), ==, 0x007DFFFF); + exti_writel(EXTI_RTSR1, 0x00000000); + g_assert_cmpuint(exti_readl(EXTI_RTSR1), ==, 0x00000000); + + exti_writel(EXTI_FTSR1, 0xFFFFFFFF); + g_assert_cmpuint(exti_readl(EXTI_FTSR1), ==, 0x007DFFFF); + exti_writel(EXTI_FTSR1, 0x00000000); + g_assert_cmpuint(exti_readl(EXTI_FTSR1), ==, 0x00000000); + + exti_writel(EXTI_IMR2, 0xFFFFFFFF); + g_assert_cmpuint(exti_readl(EXTI_IMR2), ==, 0x000000FF); + exti_writel(EXTI_IMR2, 0x00000000); + g_assert_cmpuint(exti_readl(EXTI_IMR2), ==, 0x00000000); + + exti_writel(EXTI_EMR2, 0xFFFFFFFF); + g_assert_cmpuint(exti_readl(EXTI_EMR2), ==, 0x000000FF); + exti_writel(EXTI_EMR2, 0x00000000); + g_assert_cmpuint(exti_readl(EXTI_EMR2), ==, 0x00000000); + + exti_writel(EXTI_RTSR2, 0xFFFFFFFF); + g_assert_cmpuint(exti_readl(EXTI_RTSR2), ==, 0x00000078); + exti_writel(EXTI_RTSR2, 0x00000000); + g_assert_cmpuint(exti_readl(EXTI_RTSR2), ==, 0x00000000); + + exti_writel(EXTI_FTSR2, 0xFFFFFFFF); + g_assert_cmpuint(exti_readl(EXTI_FTSR2), ==, 0x00000078); + exti_writel(EXTI_FTSR2, 0x00000000); + g_assert_cmpuint(exti_readl(EXTI_FTSR2), ==, 0x00000000); +} + +static void test_direct_lines_write(void) +{ + /* Test that direct lines reserved bits are not written to */ + + exti_writel(EXTI_RTSR1, 0xFF820000); + g_assert_cmpuint(exti_readl(EXTI_RTSR1), ==, 0x00000000); + + exti_writel(EXTI_FTSR1, 0xFF820000); + g_assert_cmpuint(exti_readl(EXTI_FTSR1), ==, 0x00000000); + + exti_writel(EXTI_SWIER1, 0xFF820000); + g_assert_cmpuint(exti_readl(EXTI_SWIER1), ==, 0x00000000); + + exti_writel(EXTI_PR1, 0xFF820000); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + + exti_writel(EXTI_RTSR2, 0x00000087); + g_assert_cmpuint(exti_readl(EXTI_RTSR2), ==, 0x00000000); + + exti_writel(EXTI_FTSR2, 0x00000087); + g_assert_cmpuint(exti_readl(EXTI_FTSR2), ==, 0x00000000); + + exti_writel(EXTI_SWIER2, 0x00000087); + g_assert_cmpuint(exti_readl(EXTI_SWIER2), ==, 0x00000000); + + exti_writel(EXTI_PR2, 0x00000087); + g_assert_cmpuint(exti_readl(EXTI_PR2), ==, 0x00000000); +} + +static void test_reserved_bits_write(void) +{ + /* Test that reserved bits stay are not written to */ + + exti_writel(EXTI_IMR2, 0xFFFFFF00); + g_assert_cmpuint(exti_readl(EXTI_IMR2), ==, 0x00000000); + + exti_writel(EXTI_EMR2, 0xFFFFFF00); + g_assert_cmpuint(exti_readl(EXTI_EMR2), ==, 0x00000000); + + exti_writel(EXTI_RTSR2, 0xFFFFFF00); + g_assert_cmpuint(exti_readl(EXTI_RTSR2), ==, 0x00000000); + + exti_writel(EXTI_FTSR2, 0xFFFFFF00); + g_assert_cmpuint(exti_readl(EXTI_FTSR2), ==, 0x00000000); + + exti_writel(EXTI_SWIER2, 0xFFFFFF00); + g_assert_cmpuint(exti_readl(EXTI_SWIER2), ==, 0x00000000); + + exti_writel(EXTI_PR2, 0xFFFFFF00); + g_assert_cmpuint(exti_readl(EXTI_PR2), ==, 0x00000000); +} + +static void test_software_interrupt(void) +{ + /* + * Test that we can launch a software irq by : + * - enabling its line in IMR + * - and then setting a bit from '0' to '1' in SWIER + * + * And that the interruption stays pending in NVIC + * even after clearing the pending bit in PR. + */ + + /* + * Testing interrupt line EXTI0 + * Bit 0 in EXTI_*1 registers (EXTI0) corresponds to GPIO Px_0 + */ + + enable_nvic_irq(EXTI0_IRQ); + /* Check that there are no interrupts already pending in PR */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that this specific interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + /* Enable interrupt line EXTI0 */ + exti_writel(EXTI_IMR1, 0x00000001); + /* Set the right SWIER bit from '0' to '1' */ + exti_writel(EXTI_SWIER1, 0x00000000); + exti_writel(EXTI_SWIER1, 0x00000001); + + /* Check that the write in SWIER was effective */ + g_assert_cmpuint(exti_readl(EXTI_SWIER1), ==, 0x00000001); + /* Check that the corresponding pending bit in PR is set */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000001); + /* Check that the corresponding interrupt is pending in the NVIC */ + g_assert_true(check_nvic_pending(EXTI0_IRQ)); + + /* Clear the pending bit in PR */ + exti_writel(EXTI_PR1, 0x00000001); + + /* Check that the write in PR was effective */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that the corresponding bit in SWIER was cleared */ + g_assert_cmpuint(exti_readl(EXTI_SWIER1), ==, 0x00000000); + /* Check that the interrupt is still pending in the NVIC */ + g_assert_true(check_nvic_pending(EXTI0_IRQ)); + + /* + * Testing interrupt line EXTI35 + * Bit 3 in EXTI_*2 registers (EXTI35) corresponds to PVM 1 Wakeup + */ + + enable_nvic_irq(EXTI35_IRQ); + /* Check that there are no interrupts already pending */ + g_assert_cmpuint(exti_readl(EXTI_PR2), ==, 0x00000000); + g_assert_false(check_nvic_pending(EXTI35_IRQ)); + + /* Enable interrupt line EXTI0 */ + exti_writel(EXTI_IMR2, 0x00000008); + /* Set the right SWIER bit from '0' to '1' */ + exti_writel(EXTI_SWIER2, 0x00000000); + exti_writel(EXTI_SWIER2, 0x00000008); + + /* Check that the write in SWIER was effective */ + g_assert_cmpuint(exti_readl(EXTI_SWIER2), ==, 0x00000008); + /* Check that the corresponding pending bit in PR is set */ + g_assert_cmpuint(exti_readl(EXTI_PR2), ==, 0x00000008); + /* Check that the corresponding interrupt is pending in the NVIC */ + g_assert_true(check_nvic_pending(EXTI35_IRQ)); + + /* Clear the pending bit in PR */ + exti_writel(EXTI_PR2, 0x00000008); + + /* Check that the write in PR was effective */ + g_assert_cmpuint(exti_readl(EXTI_PR2), ==, 0x00000000); + /* Check that the corresponding bit in SWIER was cleared */ + g_assert_cmpuint(exti_readl(EXTI_SWIER2), ==, 0x00000000); + /* Check that the interrupt is still pending in the NVIC */ + g_assert_true(check_nvic_pending(EXTI35_IRQ)); + + /* Clean NVIC */ + unpend_nvic_irq(EXTI0_IRQ); + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + unpend_nvic_irq(EXTI35_IRQ); + g_assert_false(check_nvic_pending(EXTI35_IRQ)); +} + +static void test_edge_selector(void) +{ + enable_nvic_irq(EXTI0_IRQ); + + /* Configure EXTI line 0 irq on rising edge */ + exti_set_irq(0, 1); + exti_writel(EXTI_IMR1, 0x00000001); + exti_writel(EXTI_RTSR1, 0x00000001); + exti_writel(EXTI_FTSR1, 0x00000000); + + /* Test that an irq is raised on rising edge only */ + exti_set_irq(0, 0); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + exti_set_irq(0, 1); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000001); + g_assert_true(check_nvic_pending(EXTI0_IRQ)); + + /* Clean the test */ + exti_writel(EXTI_PR1, 0x00000001); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + unpend_nvic_irq(EXTI0_IRQ); + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + /* Configure EXTI line 0 irq on falling edge */ + exti_set_irq(0, 0); + exti_writel(EXTI_IMR1, 0x00000001); + exti_writel(EXTI_RTSR1, 0x00000000); + exti_writel(EXTI_FTSR1, 0x00000001); + + /* Test that an irq is raised on falling edge only */ + exti_set_irq(0, 1); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + exti_set_irq(0, 0); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000001); + g_assert_true(check_nvic_pending(EXTI0_IRQ)); + + /* Clean the test */ + exti_writel(EXTI_PR1, 0x00000001); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + unpend_nvic_irq(EXTI0_IRQ); + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + /* Configure EXTI line 0 irq on falling and rising edge */ + exti_writel(EXTI_IMR1, 0x00000001); + exti_writel(EXTI_RTSR1, 0x00000001); + exti_writel(EXTI_FTSR1, 0x00000001); + + /* Test that an irq is raised on rising edge */ + exti_set_irq(0, 1); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000001); + g_assert_true(check_nvic_pending(EXTI0_IRQ)); + + /* Clean the test */ + exti_writel(EXTI_PR1, 0x00000001); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + unpend_nvic_irq(EXTI0_IRQ); + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + /* Test that an irq is raised on falling edge */ + exti_set_irq(0, 0); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000001); + g_assert_true(check_nvic_pending(EXTI0_IRQ)); + + /* Clean the test */ + exti_writel(EXTI_PR1, 0x00000001); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + unpend_nvic_irq(EXTI0_IRQ); + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + /* Configure EXTI line 0 irq without selecting an edge trigger */ + exti_writel(EXTI_IMR1, 0x00000001); + exti_writel(EXTI_RTSR1, 0x00000000); + exti_writel(EXTI_FTSR1, 0x00000000); + + /* Test that no irq is raised */ + exti_set_irq(0, 1); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + exti_set_irq(0, 0); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + g_assert_false(check_nvic_pending(EXTI0_IRQ)); +} + +static void test_no_software_interrupt(void) +{ + /* + * Test that software irq doesn't happen when : + * - corresponding bit in IMR isn't set + * - SWIER is set to 1 before IMR is set to 1 + */ + + /* + * Testing interrupt line EXTI0 + * Bit 0 in EXTI_*1 registers (EXTI0) corresponds to GPIO Px_0 + */ + + enable_nvic_irq(EXTI0_IRQ); + /* Check that there are no interrupts already pending in PR */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that this specific interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + /* Mask interrupt line EXTI0 */ + exti_writel(EXTI_IMR1, 0x00000000); + /* Set the corresponding SWIER bit from '0' to '1' */ + exti_writel(EXTI_SWIER1, 0x00000000); + exti_writel(EXTI_SWIER1, 0x00000001); + + /* Check that the write in SWIER was effective */ + g_assert_cmpuint(exti_readl(EXTI_SWIER1), ==, 0x00000001); + /* Check that the pending bit in PR wasn't set */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that the interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + /* Enable interrupt line EXTI0 */ + exti_writel(EXTI_IMR1, 0x00000001); + + /* Check that the pending bit in PR wasn't set */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that the interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI0_IRQ)); + + /* + * Testing interrupt line EXTI35 + * Bit 3 in EXTI_*2 registers (EXTI35) corresponds to PVM 1 Wakeup + */ + + enable_nvic_irq(EXTI35_IRQ); + /* Check that there are no interrupts already pending in PR */ + g_assert_cmpuint(exti_readl(EXTI_PR2), ==, 0x00000000); + /* Check that this specific interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI35_IRQ)); + + /* Mask interrupt line EXTI35 */ + exti_writel(EXTI_IMR2, 0x00000000); + /* Set the corresponding SWIER bit from '0' to '1' */ + exti_writel(EXTI_SWIER2, 0x00000000); + exti_writel(EXTI_SWIER2, 0x00000008); + + /* Check that the write in SWIER was effective */ + g_assert_cmpuint(exti_readl(EXTI_SWIER2), ==, 0x00000008); + /* Check that the pending bit in PR wasn't set */ + g_assert_cmpuint(exti_readl(EXTI_PR2), ==, 0x00000000); + /* Check that the interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI35_IRQ)); + + /* Enable interrupt line EXTI35 */ + exti_writel(EXTI_IMR2, 0x00000008); + + /* Check that the pending bit in PR wasn't set */ + g_assert_cmpuint(exti_readl(EXTI_PR2), ==, 0x00000000); + /* Check that the interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI35_IRQ)); +} + +static void test_masked_interrupt(void) +{ + /* + * Test that irq doesn't happen when : + * - corresponding bit in IMR isn't set + * - SWIER is set to 1 before IMR is set to 1 + */ + + /* + * Testing interrupt line EXTI1 + * with rising edge from GPIOx pin 1 + */ + + enable_nvic_irq(EXTI1_IRQ); + /* Check that there are no interrupts already pending in PR */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that this specific interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI1_IRQ)); + + /* Mask interrupt line EXTI1 */ + exti_writel(EXTI_IMR1, 0x00000000); + + /* Configure interrupt on rising edge */ + exti_writel(EXTI_RTSR1, 0x00000002); + + /* Simulate rising edge from GPIO line 1 */ + exti_set_irq(1, 1); + + /* Check that the pending bit in PR wasn't set */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that the interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI1_IRQ)); + + /* Enable interrupt line EXTI1 */ + exti_writel(EXTI_IMR1, 0x00000002); + + /* Check that the pending bit in PR wasn't set */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that the interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI1_IRQ)); +} + +static void test_interrupt(void) +{ + /* + * Test that we can launch an irq by : + * - enabling its line in IMR + * - configuring interrupt on rising edge + * - and then setting the input line from '0' to '1' + * + * And that the interruption stays pending in NVIC + * even after clearing the pending bit in PR. + */ + + /* + * Testing interrupt line EXTI1 + * with rising edge from GPIOx pin 1 + */ + + enable_nvic_irq(EXTI1_IRQ); + /* Check that there are no interrupts already pending in PR */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that this specific interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI1_IRQ)); + + /* Enable interrupt line EXTI1 */ + exti_writel(EXTI_IMR1, 0x00000002); + + /* Configure interrupt on rising edge */ + exti_writel(EXTI_RTSR1, 0x00000002); + + /* Simulate rising edge from GPIO line 1 */ + exti_set_irq(1, 1); + + /* Check that the pending bit in PR was set */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000002); + /* Check that the interrupt is pending in NVIC */ + g_assert_true(check_nvic_pending(EXTI1_IRQ)); + + /* Clear the pending bit in PR */ + exti_writel(EXTI_PR1, 0x00000002); + + /* Check that the write in PR was effective */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that the interrupt is still pending in the NVIC */ + g_assert_true(check_nvic_pending(EXTI1_IRQ)); + + /* Clean NVIC */ + unpend_nvic_irq(EXTI1_IRQ); + g_assert_false(check_nvic_pending(EXTI1_IRQ)); +} + +static void test_orred_interrupts(void) +{ + /* + * For lines EXTI5..9 (fanned-in to NVIC irq 23), + * test that raising the line pends interrupt + * 23 in NVIC. + */ + enable_nvic_irq(EXTI5_9_IRQ); + /* Check that there are no interrupts already pending in PR */ + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + /* Check that this specific interrupt isn't pending in NVIC */ + g_assert_false(check_nvic_pending(EXTI5_9_IRQ)); + + /* Enable interrupt lines EXTI[5..9] */ + exti_writel(EXTI_IMR1, (0x1F << 5)); + + /* Configure interrupt on rising edge */ + exti_writel(EXTI_RTSR1, (0x1F << 5)); + + /* Raise GPIO line i, check that the interrupt is pending */ + for (unsigned i = 5; i < 10; i++) { + exti_set_irq(i, 1); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 1 << i); + g_assert_true(check_nvic_pending(EXTI5_9_IRQ)); + + exti_writel(EXTI_PR1, 1 << i); + g_assert_cmpuint(exti_readl(EXTI_PR1), ==, 0x00000000); + g_assert_true(check_nvic_pending(EXTI5_9_IRQ)); + + unpend_nvic_irq(EXTI5_9_IRQ); + g_assert_false(check_nvic_pending(EXTI5_9_IRQ)); + } +} + +int main(int argc, char **argv) +{ + int ret; + + g_test_init(&argc, &argv, NULL); + g_test_set_nonfatal_assertions(); + qtest_add_func("stm32l4x5/exti/direct_lines", test_direct_lines_write); + qtest_add_func("stm32l4x5/exti/reserved_bits", test_reserved_bits_write); + qtest_add_func("stm32l4x5/exti/reg_write_read", test_reg_write_read); + qtest_add_func("stm32l4x5/exti/no_software_interrupt", + test_no_software_interrupt); + qtest_add_func("stm32l4x5/exti/software_interrupt", + test_software_interrupt); + qtest_add_func("stm32l4x5/exti/masked_interrupt", test_masked_interrupt); + qtest_add_func("stm32l4x5/exti/interrupt", test_interrupt); + qtest_add_func("stm32l4x5/exti/test_edge_selector", test_edge_selector); + qtest_add_func("stm32l4x5/exti/test_orred_interrupts", + test_orred_interrupts); + + qtest_start("-machine b-l475e-iot01a"); + ret = g_test_run(); + qtest_end(); + + return ret; +} diff --git a/tests/qtest/stm32l4x5_gpio-test.c b/tests/qtest/stm32l4x5_gpio-test.c new file mode 100644 index 00000000000..0f6bda54d3c --- /dev/null +++ b/tests/qtest/stm32l4x5_gpio-test.c @@ -0,0 +1,562 @@ +/* + * QTest testcase for STM32L4x5_GPIO + * + * Copyright (c) 2024 Arnaud Minier + * Copyright (c) 2024 Inès Varhol + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" + +#define GPIO_BASE_ADDR 0x48000000 +#define GPIO_SIZE 0x400 +#define NUM_GPIOS 8 +#define NUM_GPIO_PINS 16 + +#define GPIO_A 0x48000000 +#define GPIO_B 0x48000400 +#define GPIO_C 0x48000800 +#define GPIO_D 0x48000C00 +#define GPIO_E 0x48001000 +#define GPIO_F 0x48001400 +#define GPIO_G 0x48001800 +#define GPIO_H 0x48001C00 + +#define MODER 0x00 +#define OTYPER 0x04 +#define PUPDR 0x0C +#define IDR 0x10 +#define ODR 0x14 +#define BSRR 0x18 +#define BRR 0x28 + +#define MODER_INPUT 0 +#define MODER_OUTPUT 1 + +#define PUPDR_NONE 0 +#define PUPDR_PULLUP 1 +#define PUPDR_PULLDOWN 2 + +#define OTYPER_PUSH_PULL 0 +#define OTYPER_OPEN_DRAIN 1 + +const uint32_t moder_reset[NUM_GPIOS] = { + 0xABFFFFFF, + 0xFFFFFEBF, + 0xFFFFFFFF, + 0xFFFFFFFF, + 0xFFFFFFFF, + 0xFFFFFFFF, + 0xFFFFFFFF, + 0x0000000F +}; + +const uint32_t pupdr_reset[NUM_GPIOS] = { + 0x64000000, + 0x00000100, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000 +}; + +const uint32_t idr_reset[NUM_GPIOS] = { + 0x0000A000, + 0x00000010, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000 +}; + +#define PIN_MASK 0xF +#define GPIO_ADDR_MASK (~(GPIO_SIZE - 1)) + +static inline void *test_data(uint32_t gpio_addr, uint8_t pin) +{ + return (void *)(uintptr_t)((gpio_addr & GPIO_ADDR_MASK) | (pin & PIN_MASK)); +} + +#define test_gpio_addr(data) ((uintptr_t)(data) & GPIO_ADDR_MASK) +#define test_pin(data) ((uintptr_t)(data) & PIN_MASK) + +static uint32_t gpio_readl(unsigned int gpio, unsigned int offset) +{ + return readl(gpio + offset); +} + +static void gpio_writel(unsigned int gpio, unsigned int offset, uint32_t value) +{ + writel(gpio + offset, value); +} + +static void gpio_set_bit(unsigned int gpio, unsigned int reg, + unsigned int pin, uint32_t value) +{ + uint32_t mask = 0xFFFFFFFF & ~(0x1 << pin); + gpio_writel(gpio, reg, (gpio_readl(gpio, reg) & mask) | value << pin); +} + +static void gpio_set_2bits(unsigned int gpio, unsigned int reg, + unsigned int pin, uint32_t value) +{ + uint32_t offset = 2 * pin; + uint32_t mask = 0xFFFFFFFF & ~(0x3 << offset); + gpio_writel(gpio, reg, (gpio_readl(gpio, reg) & mask) | value << offset); +} + +static unsigned int get_gpio_id(uint32_t gpio_addr) +{ + return (gpio_addr - GPIO_BASE_ADDR) / GPIO_SIZE; +} + +static void gpio_set_irq(unsigned int gpio, int num, int level) +{ + g_autofree char *name = g_strdup_printf("/machine/soc/gpio%c", + get_gpio_id(gpio) + 'a'); + qtest_set_irq_in(global_qtest, name, NULL, num, level); +} + +static void disconnect_all_pins(unsigned int gpio) +{ + g_autofree char *path = g_strdup_printf("/machine/soc/gpio%c", + get_gpio_id(gpio) + 'a'); + QDict *r; + + r = qtest_qmp(global_qtest, "{ 'execute': 'qom-set', 'arguments': " + "{ 'path': %s, 'property': 'disconnected-pins', 'value': %d } }", + path, 0xFFFF); + g_assert_false(qdict_haskey(r, "error")); + qobject_unref(r); +} + +static uint32_t get_disconnected_pins(unsigned int gpio) +{ + g_autofree char *path = g_strdup_printf("/machine/soc/gpio%c", + get_gpio_id(gpio) + 'a'); + uint32_t disconnected_pins = 0; + QDict *r; + + r = qtest_qmp(global_qtest, "{ 'execute': 'qom-get', 'arguments':" + " { 'path': %s, 'property': 'disconnected-pins'} }", path); + g_assert_false(qdict_haskey(r, "error")); + disconnected_pins = qdict_get_int(r, "return"); + qobject_unref(r); + return disconnected_pins; +} + +static uint32_t reset(uint32_t gpio, unsigned int offset) +{ + switch (offset) { + case MODER: + return moder_reset[get_gpio_id(gpio)]; + case PUPDR: + return pupdr_reset[get_gpio_id(gpio)]; + case IDR: + return idr_reset[get_gpio_id(gpio)]; + } + return 0x0; +} + +static void system_reset(void) +{ + QDict *r; + r = qtest_qmp(global_qtest, "{'execute': 'system_reset'}"); + g_assert_false(qdict_haskey(r, "error")); + qobject_unref(r); +} + +static void test_idr_reset_value(void) +{ + /* + * Checks that the values in MODER, OTYPER, PUPDR and ODR + * after reset are correct, and that the value in IDR is + * coherent. + * Since AF and analog modes aren't implemented, IDR reset + * values aren't the same as with a real board. + * + * Register IDR contains the actual values of all GPIO pins. + * Its value depends on the pins' configuration + * (intput/output/analog : register MODER, push-pull/open-drain : + * register OTYPER, pull-up/pull-down/none : register PUPDR) + * and on the values stored in register ODR + * (in case the pin is in output mode). + */ + + gpio_writel(GPIO_A, MODER, 0xDEADBEEF); + gpio_writel(GPIO_A, ODR, 0xDEADBEEF); + gpio_writel(GPIO_A, OTYPER, 0xDEADBEEF); + gpio_writel(GPIO_A, PUPDR, 0xDEADBEEF); + + gpio_writel(GPIO_B, MODER, 0xDEADBEEF); + gpio_writel(GPIO_B, ODR, 0xDEADBEEF); + gpio_writel(GPIO_B, OTYPER, 0xDEADBEEF); + gpio_writel(GPIO_B, PUPDR, 0xDEADBEEF); + + gpio_writel(GPIO_C, MODER, 0xDEADBEEF); + gpio_writel(GPIO_C, ODR, 0xDEADBEEF); + gpio_writel(GPIO_C, OTYPER, 0xDEADBEEF); + gpio_writel(GPIO_C, PUPDR, 0xDEADBEEF); + + gpio_writel(GPIO_H, MODER, 0xDEADBEEF); + gpio_writel(GPIO_H, ODR, 0xDEADBEEF); + gpio_writel(GPIO_H, OTYPER, 0xDEADBEEF); + gpio_writel(GPIO_H, PUPDR, 0xDEADBEEF); + + system_reset(); + + uint32_t moder = gpio_readl(GPIO_A, MODER); + uint32_t odr = gpio_readl(GPIO_A, ODR); + uint32_t otyper = gpio_readl(GPIO_A, OTYPER); + uint32_t pupdr = gpio_readl(GPIO_A, PUPDR); + uint32_t idr = gpio_readl(GPIO_A, IDR); + /* 15: AF, 14: AF, 13: AF, 12: Analog ... */ + /* here AF is the same as Analog and Input mode */ + g_assert_cmphex(moder, ==, reset(GPIO_A, MODER)); + g_assert_cmphex(odr, ==, reset(GPIO_A, ODR)); + g_assert_cmphex(otyper, ==, reset(GPIO_A, OTYPER)); + /* 15: pull-up, 14: pull-down, 13: pull-up, 12: neither ... */ + g_assert_cmphex(pupdr, ==, reset(GPIO_A, PUPDR)); + /* 15 : 1, 14: 0, 13: 1, 12 : reset value ... */ + g_assert_cmphex(idr, ==, reset(GPIO_A, IDR)); + + moder = gpio_readl(GPIO_B, MODER); + odr = gpio_readl(GPIO_B, ODR); + otyper = gpio_readl(GPIO_B, OTYPER); + pupdr = gpio_readl(GPIO_B, PUPDR); + idr = gpio_readl(GPIO_B, IDR); + /* ... 5: Analog, 4: AF, 3: AF, 2: Analog ... */ + /* here AF is the same as Analog and Input mode */ + g_assert_cmphex(moder, ==, reset(GPIO_B, MODER)); + g_assert_cmphex(odr, ==, reset(GPIO_B, ODR)); + g_assert_cmphex(otyper, ==, reset(GPIO_B, OTYPER)); + /* ... 5: neither, 4: pull-up, 3: neither ... */ + g_assert_cmphex(pupdr, ==, reset(GPIO_B, PUPDR)); + /* ... 5 : reset value, 4 : 1, 3 : reset value ... */ + g_assert_cmphex(idr, ==, reset(GPIO_B, IDR)); + + moder = gpio_readl(GPIO_C, MODER); + odr = gpio_readl(GPIO_C, ODR); + otyper = gpio_readl(GPIO_C, OTYPER); + pupdr = gpio_readl(GPIO_C, PUPDR); + idr = gpio_readl(GPIO_C, IDR); + /* Analog, same as Input mode*/ + g_assert_cmphex(moder, ==, reset(GPIO_C, MODER)); + g_assert_cmphex(odr, ==, reset(GPIO_C, ODR)); + g_assert_cmphex(otyper, ==, reset(GPIO_C, OTYPER)); + /* no pull-up or pull-down */ + g_assert_cmphex(pupdr, ==, reset(GPIO_C, PUPDR)); + /* reset value */ + g_assert_cmphex(idr, ==, reset(GPIO_C, IDR)); + + moder = gpio_readl(GPIO_H, MODER); + odr = gpio_readl(GPIO_H, ODR); + otyper = gpio_readl(GPIO_H, OTYPER); + pupdr = gpio_readl(GPIO_H, PUPDR); + idr = gpio_readl(GPIO_H, IDR); + /* Analog, same as Input mode */ + g_assert_cmphex(moder, ==, reset(GPIO_H, MODER)); + g_assert_cmphex(odr, ==, reset(GPIO_H, ODR)); + g_assert_cmphex(otyper, ==, reset(GPIO_H, OTYPER)); + /* no pull-up or pull-down */ + g_assert_cmphex(pupdr, ==, reset(GPIO_H, PUPDR)); + /* reset value */ + g_assert_cmphex(idr, ==, reset(GPIO_H, IDR)); +} + +static void test_gpio_output_mode(const void *data) +{ + /* + * Checks that setting a bit in ODR sets the corresponding + * GPIO line high : it should set the right bit in IDR + * and send an irq to syscfg. + * Additionally, it checks that values written to ODR + * when not in output mode are stored and not discarded. + */ + unsigned int pin = test_pin(data); + uint32_t gpio = test_gpio_addr(data); + unsigned int gpio_id = get_gpio_id(gpio); + + qtest_irq_intercept_in(global_qtest, "/machine/soc/syscfg"); + + /* Set a bit in ODR and check nothing happens */ + gpio_set_bit(gpio, ODR, pin, 1); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR)); + g_assert_false(get_irq(gpio_id * NUM_GPIO_PINS + pin)); + + /* Configure the relevant line as output and check the pin is high */ + gpio_set_2bits(gpio, MODER, pin, MODER_OUTPUT); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR) | (1 << pin)); + g_assert_true(get_irq(gpio_id * NUM_GPIO_PINS + pin)); + + /* Reset the bit in ODR and check the pin is low */ + gpio_set_bit(gpio, ODR, pin, 0); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR) & ~(1 << pin)); + g_assert_false(get_irq(gpio_id * NUM_GPIO_PINS + pin)); + + /* Clean the test */ + gpio_writel(gpio, ODR, reset(gpio, ODR)); + gpio_writel(gpio, MODER, reset(gpio, MODER)); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR)); + g_assert_false(get_irq(gpio_id * NUM_GPIO_PINS + pin)); +} + +static void test_gpio_input_mode(const void *data) +{ + /* + * Test that setting a line high/low externally sets the + * corresponding GPIO line high/low : it should set the + * right bit in IDR and send an irq to syscfg. + */ + unsigned int pin = test_pin(data); + uint32_t gpio = test_gpio_addr(data); + unsigned int gpio_id = get_gpio_id(gpio); + + qtest_irq_intercept_in(global_qtest, "/machine/soc/syscfg"); + + /* Configure a line as input, raise it, and check that the pin is high */ + gpio_set_2bits(gpio, MODER, pin, MODER_INPUT); + gpio_set_irq(gpio, pin, 1); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR) | (1 << pin)); + g_assert_true(get_irq(gpio_id * NUM_GPIO_PINS + pin)); + + /* Lower the line and check that the pin is low */ + gpio_set_irq(gpio, pin, 0); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR) & ~(1 << pin)); + g_assert_false(get_irq(gpio_id * NUM_GPIO_PINS + pin)); + + /* Clean the test */ + gpio_writel(gpio, MODER, reset(gpio, MODER)); + disconnect_all_pins(gpio); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR)); +} + +static void test_pull_up_pull_down(const void *data) +{ + /* + * Test that a floating pin with pull-up sets the pin + * high and vice-versa. + */ + unsigned int pin = test_pin(data); + uint32_t gpio = test_gpio_addr(data); + unsigned int gpio_id = get_gpio_id(gpio); + + qtest_irq_intercept_in(global_qtest, "/machine/soc/syscfg"); + + /* Configure a line as input with pull-up, check the line is set high */ + gpio_set_2bits(gpio, MODER, pin, MODER_INPUT); + gpio_set_2bits(gpio, PUPDR, pin, PUPDR_PULLUP); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR) | (1 << pin)); + g_assert_true(get_irq(gpio_id * NUM_GPIO_PINS + pin)); + + /* Configure the line with pull-down, check the line is low */ + gpio_set_2bits(gpio, PUPDR, pin, PUPDR_PULLDOWN); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR) & ~(1 << pin)); + g_assert_false(get_irq(gpio_id * NUM_GPIO_PINS + pin)); + + /* Clean the test */ + gpio_writel(gpio, MODER, reset(gpio, MODER)); + gpio_writel(gpio, PUPDR, reset(gpio, PUPDR)); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR)); +} + +static void test_push_pull(const void *data) +{ + /* + * Test that configuring a line in push-pull output mode + * disconnects the pin, that the pin can't be set or reset + * externally afterwards. + */ + unsigned int pin = test_pin(data); + uint32_t gpio = test_gpio_addr(data); + uint32_t gpio2 = GPIO_BASE_ADDR + (GPIO_H - gpio); + + qtest_irq_intercept_in(global_qtest, "/machine/soc/syscfg"); + + /* Setting a line high externally, configuring it in push-pull output */ + /* And checking the pin was disconnected */ + gpio_set_irq(gpio, pin, 1); + gpio_set_2bits(gpio, MODER, pin, MODER_OUTPUT); + g_assert_cmphex(get_disconnected_pins(gpio), ==, 0xFFFF); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR) & ~(1 << pin)); + + /* Setting a line low externally, configuring it in push-pull output */ + /* And checking the pin was disconnected */ + gpio_set_irq(gpio2, pin, 0); + gpio_set_bit(gpio2, ODR, pin, 1); + gpio_set_2bits(gpio2, MODER, pin, MODER_OUTPUT); + g_assert_cmphex(get_disconnected_pins(gpio2), ==, 0xFFFF); + g_assert_cmphex(gpio_readl(gpio2, IDR), ==, reset(gpio2, IDR) | (1 << pin)); + + /* Trying to set a push-pull output pin, checking it doesn't work */ + gpio_set_irq(gpio, pin, 1); + g_assert_cmphex(get_disconnected_pins(gpio), ==, 0xFFFF); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR) & ~(1 << pin)); + + /* Trying to reset a push-pull output pin, checking it doesn't work */ + gpio_set_irq(gpio2, pin, 0); + g_assert_cmphex(get_disconnected_pins(gpio2), ==, 0xFFFF); + g_assert_cmphex(gpio_readl(gpio2, IDR), ==, reset(gpio2, IDR) | (1 << pin)); + + /* Clean the test */ + gpio_writel(gpio, MODER, reset(gpio, MODER)); + gpio_writel(gpio2, ODR, reset(gpio2, ODR)); + gpio_writel(gpio2, MODER, reset(gpio2, MODER)); +} + +static void test_open_drain(const void *data) +{ + /* + * Test that configuring a line in open-drain output mode + * disconnects a pin set high externally and that the pin + * can't be set high externally while configured in open-drain. + * + * However a pin set low externally shouldn't be disconnected, + * and it can be set low externally when in open-drain mode. + */ + unsigned int pin = test_pin(data); + uint32_t gpio = test_gpio_addr(data); + uint32_t gpio2 = GPIO_BASE_ADDR + (GPIO_H - gpio); + + qtest_irq_intercept_in(global_qtest, "/machine/soc/syscfg"); + + /* Setting a line high externally, configuring it in open-drain output */ + /* And checking the pin was disconnected */ + gpio_set_irq(gpio, pin, 1); + gpio_set_bit(gpio, OTYPER, pin, OTYPER_OPEN_DRAIN); + gpio_set_2bits(gpio, MODER, pin, MODER_OUTPUT); + g_assert_cmphex(get_disconnected_pins(gpio), ==, 0xFFFF); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR) & ~(1 << pin)); + + /* Setting a line low externally, configuring it in open-drain output */ + /* And checking the pin wasn't disconnected */ + gpio_set_irq(gpio2, pin, 0); + gpio_set_bit(gpio2, ODR, pin, 1); + gpio_set_bit(gpio2, OTYPER, pin, OTYPER_OPEN_DRAIN); + gpio_set_2bits(gpio2, MODER, pin, MODER_OUTPUT); + g_assert_cmphex(get_disconnected_pins(gpio2), ==, 0xFFFF & ~(1 << pin)); + g_assert_cmphex(gpio_readl(gpio2, IDR), ==, + reset(gpio2, IDR) & ~(1 << pin)); + + /* Trying to set a open-drain output pin, checking it doesn't work */ + gpio_set_irq(gpio, pin, 1); + g_assert_cmphex(get_disconnected_pins(gpio), ==, 0xFFFF); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR) & ~(1 << pin)); + + /* Trying to reset a open-drain output pin, checking it works */ + gpio_set_bit(gpio, ODR, pin, 1); + gpio_set_irq(gpio, pin, 0); + g_assert_cmphex(get_disconnected_pins(gpio2), ==, 0xFFFF & ~(1 << pin)); + g_assert_cmphex(gpio_readl(gpio2, IDR), ==, + reset(gpio2, IDR) & ~(1 << pin)); + + /* Clean the test */ + disconnect_all_pins(gpio2); + gpio_writel(gpio2, OTYPER, reset(gpio2, OTYPER)); + gpio_writel(gpio2, ODR, reset(gpio2, ODR)); + gpio_writel(gpio2, MODER, reset(gpio2, MODER)); + g_assert_cmphex(gpio_readl(gpio2, IDR), ==, reset(gpio2, IDR)); + disconnect_all_pins(gpio); + gpio_writel(gpio, OTYPER, reset(gpio, OTYPER)); + gpio_writel(gpio, ODR, reset(gpio, ODR)); + gpio_writel(gpio, MODER, reset(gpio, MODER)); + g_assert_cmphex(gpio_readl(gpio, IDR), ==, reset(gpio, IDR)); +} + +static void test_bsrr_brr(const void *data) +{ + /* + * Test that writing a '1' in BSS and BSRR + * has the desired effect on ODR. + * In BSRR, BSx has priority over BRx. + */ + unsigned int pin = test_pin(data); + uint32_t gpio = test_gpio_addr(data); + + gpio_writel(gpio, BSRR, (1 << pin)); + g_assert_cmphex(gpio_readl(gpio, ODR), ==, reset(gpio, ODR) | (1 << pin)); + + gpio_writel(gpio, BSRR, (1 << (pin + NUM_GPIO_PINS))); + g_assert_cmphex(gpio_readl(gpio, ODR), ==, reset(gpio, ODR)); + + gpio_writel(gpio, BSRR, (1 << pin)); + g_assert_cmphex(gpio_readl(gpio, ODR), ==, reset(gpio, ODR) | (1 << pin)); + + gpio_writel(gpio, BRR, (1 << pin)); + g_assert_cmphex(gpio_readl(gpio, ODR), ==, reset(gpio, ODR)); + + /* BSx should have priority over BRx */ + gpio_writel(gpio, BSRR, (1 << pin) | (1 << (pin + NUM_GPIO_PINS))); + g_assert_cmphex(gpio_readl(gpio, ODR), ==, reset(gpio, ODR) | (1 << pin)); + + gpio_writel(gpio, BRR, (1 << pin)); + g_assert_cmphex(gpio_readl(gpio, ODR), ==, reset(gpio, ODR)); + + gpio_writel(gpio, ODR, reset(gpio, ODR)); +} + +int main(int argc, char **argv) +{ + int ret; + + g_test_init(&argc, &argv, NULL); + g_test_set_nonfatal_assertions(); + qtest_add_func("stm32l4x5/gpio/test_idr_reset_value", + test_idr_reset_value); + /* + * The inputs for the tests (gpio and pin) can be changed, + * but the tests don't work for pins that are high at reset + * (GPIOA15, GPIO13 and GPIOB5). + * Specifically, rising the pin then checking `get_irq()` + * is problematic since the pin was already high. + */ + qtest_add_data_func("stm32l4x5/gpio/test_gpioc5_output_mode", + test_data(GPIO_C, 5), + test_gpio_output_mode); + qtest_add_data_func("stm32l4x5/gpio/test_gpioh3_output_mode", + test_data(GPIO_H, 3), + test_gpio_output_mode); + qtest_add_data_func("stm32l4x5/gpio/test_gpio_input_mode1", + test_data(GPIO_D, 6), + test_gpio_input_mode); + qtest_add_data_func("stm32l4x5/gpio/test_gpio_input_mode2", + test_data(GPIO_C, 10), + test_gpio_input_mode); + qtest_add_data_func("stm32l4x5/gpio/test_gpio_pull_up_pull_down1", + test_data(GPIO_B, 5), + test_pull_up_pull_down); + qtest_add_data_func("stm32l4x5/gpio/test_gpio_pull_up_pull_down2", + test_data(GPIO_F, 1), + test_pull_up_pull_down); + qtest_add_data_func("stm32l4x5/gpio/test_gpio_push_pull1", + test_data(GPIO_G, 6), + test_push_pull); + qtest_add_data_func("stm32l4x5/gpio/test_gpio_push_pull2", + test_data(GPIO_H, 3), + test_push_pull); + qtest_add_data_func("stm32l4x5/gpio/test_gpio_open_drain1", + test_data(GPIO_C, 4), + test_open_drain); + qtest_add_data_func("stm32l4x5/gpio/test_gpio_open_drain2", + test_data(GPIO_E, 11), + test_open_drain); + qtest_add_data_func("stm32l4x5/gpio/test_bsrr_brr1", + test_data(GPIO_A, 12), + test_bsrr_brr); + qtest_add_data_func("stm32l4x5/gpio/test_bsrr_brr2", + test_data(GPIO_D, 0), + test_bsrr_brr); + + qtest_start("-machine b-l475e-iot01a"); + ret = g_test_run(); + qtest_end(); + + return ret; +} diff --git a/tests/qtest/stm32l4x5_rcc-test.c b/tests/qtest/stm32l4x5_rcc-test.c new file mode 100644 index 00000000000..d927c655d13 --- /dev/null +++ b/tests/qtest/stm32l4x5_rcc-test.c @@ -0,0 +1,189 @@ +/* + * QTest testcase for STM32L4x5_RCC + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/registerfields.h" +#include "libqtest-single.h" +#include "hw/misc/stm32l4x5_rcc_internals.h" + +#define RCC_BASE_ADDR 0x40021000 +#define NVIC_ISER 0xE000E100 +#define NVIC_ISPR 0xE000E200 +#define NVIC_ICPR 0xE000E280 +#define RCC_IRQ 5 + +static void enable_nvic_irq(unsigned int n) +{ + writel(NVIC_ISER, 1 << n); +} + +static void unpend_nvic_irq(unsigned int n) +{ + writel(NVIC_ICPR, 1 << n); +} + +static bool check_nvic_pending(unsigned int n) +{ + return readl(NVIC_ISPR) & (1 << n); +} + +static void rcc_writel(unsigned int offset, uint32_t value) +{ + writel(RCC_BASE_ADDR + offset, value); +} + +static uint32_t rcc_readl(unsigned int offset) +{ + return readl(RCC_BASE_ADDR + offset); +} + +static void test_init_msi(void) +{ + /* MSIRANGE can be set only when MSI is OFF or READY */ + rcc_writel(A_CR, R_CR_MSION_MASK); + /* Wait until MSI is stable */ + g_assert_true((rcc_readl(A_CR) & R_CR_MSIRDY_MASK) == R_CR_MSIRDY_MASK); + /* TODO find a way to test MSI value */ +} + +static void test_set_msi_as_sysclk(void) +{ + /* Clocking from MSI, in case MSI was not the default source */ + rcc_writel(A_CFGR, 0); + /* Wait until MSI is selected and stable */ + g_assert_true((rcc_readl(A_CFGR) & R_CFGR_SWS_MASK) == 0); +} + +static void test_init_pll(void) +{ + uint32_t value; + + /* + * Update PLL and set MSI as the source clock. + * PLLM = 1 --> 000 + * PLLN = 40 --> 40 + * PPLLR = 2 --> 00 + * PLLDIV = unused, PLLP = unused (SAI3), PLLQ = unused (48M1) + * SRC = MSI --> 01 + */ + rcc_writel(A_PLLCFGR, R_PLLCFGR_PLLREN_MASK | + (40 << R_PLLCFGR_PLLN_SHIFT) | + (0b01 << R_PLLCFGR_PLLSRC_SHIFT)); + + /* PLL activation */ + value = rcc_readl(A_CR); + rcc_writel(A_CR, value | R_CR_PLLON_MASK); + + /* Waiting for PLL lock. */ + g_assert_true((rcc_readl(A_CR) & R_CR_PLLRDY_MASK) == R_CR_PLLRDY_MASK); + + /* Switches on the PLL clock source */ + value = rcc_readl(A_CFGR); + rcc_writel(A_CFGR, (value & ~R_CFGR_SW_MASK) | + (0b11 << R_CFGR_SW_SHIFT)); + + /* Wait until SYSCLK is stable. */ + g_assert_true((rcc_readl(A_CFGR) & R_CFGR_SWS_MASK) == + (0b11 << R_CFGR_SWS_SHIFT)); +} + +static void test_activate_lse(void) +{ + /* LSE activation, no LSE Bypass */ + rcc_writel(A_BDCR, R_BDCR_LSEDRV_MASK | R_BDCR_LSEON_MASK); + g_assert_true((rcc_readl(A_BDCR) & R_BDCR_LSERDY_MASK) == R_BDCR_LSERDY_MASK); +} + +static void test_irq(void) +{ + enable_nvic_irq(RCC_IRQ); + + rcc_writel(A_CIER, R_CIER_LSIRDYIE_MASK); + rcc_writel(A_CSR, R_CSR_LSION_MASK); + g_assert_true(check_nvic_pending(RCC_IRQ)); + rcc_writel(A_CICR, R_CICR_LSIRDYC_MASK); + unpend_nvic_irq(RCC_IRQ); + + rcc_writel(A_CIER, R_CIER_LSERDYIE_MASK); + rcc_writel(A_BDCR, R_BDCR_LSEON_MASK); + g_assert_true(check_nvic_pending(RCC_IRQ)); + rcc_writel(A_CICR, R_CICR_LSERDYC_MASK); + unpend_nvic_irq(RCC_IRQ); + + /* + * MSI has been enabled by previous tests, + * shouln't generate an interruption. + */ + rcc_writel(A_CIER, R_CIER_MSIRDYIE_MASK); + rcc_writel(A_CR, R_CR_MSION_MASK); + g_assert_false(check_nvic_pending(RCC_IRQ)); + + rcc_writel(A_CIER, R_CIER_HSIRDYIE_MASK); + rcc_writel(A_CR, R_CR_HSION_MASK); + g_assert_true(check_nvic_pending(RCC_IRQ)); + rcc_writel(A_CICR, R_CICR_HSIRDYC_MASK); + unpend_nvic_irq(RCC_IRQ); + + rcc_writel(A_CIER, R_CIER_HSERDYIE_MASK); + rcc_writel(A_CR, R_CR_HSEON_MASK); + g_assert_true(check_nvic_pending(RCC_IRQ)); + rcc_writel(A_CICR, R_CICR_HSERDYC_MASK); + unpend_nvic_irq(RCC_IRQ); + + /* + * PLL has been enabled by previous tests, + * shouln't generate an interruption. + */ + rcc_writel(A_CIER, R_CIER_PLLRDYIE_MASK); + rcc_writel(A_CR, R_CR_PLLON_MASK); + g_assert_false(check_nvic_pending(RCC_IRQ)); + + rcc_writel(A_CIER, R_CIER_PLLSAI1RDYIE_MASK); + rcc_writel(A_CR, R_CR_PLLSAI1ON_MASK); + g_assert_true(check_nvic_pending(RCC_IRQ)); + rcc_writel(A_CICR, R_CICR_PLLSAI1RDYC_MASK); + unpend_nvic_irq(RCC_IRQ); + + rcc_writel(A_CIER, R_CIER_PLLSAI2RDYIE_MASK); + rcc_writel(A_CR, R_CR_PLLSAI2ON_MASK); + g_assert_true(check_nvic_pending(RCC_IRQ)); + rcc_writel(A_CICR, R_CICR_PLLSAI2RDYC_MASK); + unpend_nvic_irq(RCC_IRQ); +} + +int main(int argc, char **argv) +{ + int ret; + + g_test_init(&argc, &argv, NULL); + g_test_set_nonfatal_assertions(); + /* + * These test separately that we can enable the plls, change the sysclk, + * and enable different devices. + * They are dependent on one another. + * We assume that all operations that would take some time to have an effect + * (e.g. changing the PLL frequency) are done instantaneously. + */ + qtest_add_func("stm32l4x5/rcc/init_msi", test_init_msi); + qtest_add_func("stm32l4x5/rcc/set_msi_as_sysclk", + test_set_msi_as_sysclk); + qtest_add_func("stm32l4x5/rcc/activate_lse", test_activate_lse); + qtest_add_func("stm32l4x5/rcc/init_pll", test_init_pll); + + qtest_add_func("stm32l4x5/rcc/irq", test_irq); + + qtest_start("-machine b-l475e-iot01a"); + ret = g_test_run(); + qtest_end(); + + return ret; +} diff --git a/tests/qtest/stm32l4x5_syscfg-test.c b/tests/qtest/stm32l4x5_syscfg-test.c new file mode 100644 index 00000000000..ed4801798d4 --- /dev/null +++ b/tests/qtest/stm32l4x5_syscfg-test.c @@ -0,0 +1,331 @@ +/* + * QTest testcase for STM32L4x5_SYSCFG + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" + +#define SYSCFG_BASE_ADDR 0x40010000 +#define SYSCFG_MEMRMP 0x00 +#define SYSCFG_CFGR1 0x04 +#define SYSCFG_EXTICR1 0x08 +#define SYSCFG_EXTICR2 0x0C +#define SYSCFG_EXTICR3 0x10 +#define SYSCFG_EXTICR4 0x14 +#define SYSCFG_SCSR 0x18 +#define SYSCFG_CFGR2 0x1C +#define SYSCFG_SWPR 0x20 +#define SYSCFG_SKR 0x24 +#define SYSCFG_SWPR2 0x28 +#define INVALID_ADDR 0x2C + +static void syscfg_writel(unsigned int offset, uint32_t value) +{ + writel(SYSCFG_BASE_ADDR + offset, value); +} + +static uint32_t syscfg_readl(unsigned int offset) +{ + return readl(SYSCFG_BASE_ADDR + offset); +} + +static void syscfg_set_irq(int num, int level) +{ + qtest_set_irq_in(global_qtest, "/machine/soc/syscfg", + NULL, num, level); +} + +static void system_reset(void) +{ + QDict *response; + response = qtest_qmp(global_qtest, "{'execute': 'system_reset'}"); + g_assert(qdict_haskey(response, "return")); + qobject_unref(response); +} + +static void test_reset(void) +{ + /* + * Test that registers are initialized at the correct values + */ + g_assert_cmpuint(syscfg_readl(SYSCFG_MEMRMP), ==, 0x00000000); + + g_assert_cmpuint(syscfg_readl(SYSCFG_CFGR1), ==, 0x7C000001); + + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR1), ==, 0x00000000); + + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR2), ==, 0x00000000); + + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR3), ==, 0x00000000); + + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR4), ==, 0x00000000); + + g_assert_cmpuint(syscfg_readl(SYSCFG_SCSR), ==, 0x00000000); + + g_assert_cmpuint(syscfg_readl(SYSCFG_CFGR2), ==, 0x00000000); + + g_assert_cmpuint(syscfg_readl(SYSCFG_SWPR), ==, 0x00000000); + + g_assert_cmpuint(syscfg_readl(SYSCFG_SKR), ==, 0x00000000); + + g_assert_cmpuint(syscfg_readl(SYSCFG_SWPR2), ==, 0x00000000); +} + +static void test_reserved_bits(void) +{ + /* + * Test that reserved bits stay at reset value + * (which is 0 for all of them) by writing '1' + * in all reserved bits (keeping reset value for + * other bits) and checking that the + * register is still at reset value + */ + syscfg_writel(SYSCFG_MEMRMP, 0xFFFFFEF8); + g_assert_cmpuint(syscfg_readl(SYSCFG_MEMRMP), ==, 0x00000000); + + syscfg_writel(SYSCFG_CFGR1, 0x7F00FEFF); + g_assert_cmpuint(syscfg_readl(SYSCFG_CFGR1), ==, 0x7C000001); + + syscfg_writel(SYSCFG_EXTICR1, 0xFFFF0000); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR1), ==, 0x00000000); + + syscfg_writel(SYSCFG_EXTICR2, 0xFFFF0000); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR2), ==, 0x00000000); + + syscfg_writel(SYSCFG_EXTICR3, 0xFFFF0000); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR3), ==, 0x00000000); + + syscfg_writel(SYSCFG_EXTICR4, 0xFFFF0000); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR4), ==, 0x00000000); + + syscfg_writel(SYSCFG_SKR, 0xFFFFFF00); + g_assert_cmpuint(syscfg_readl(SYSCFG_SKR), ==, 0x00000000); +} + +static void test_set_and_clear(void) +{ + /* + * Test that regular bits can be set and cleared + */ + syscfg_writel(SYSCFG_MEMRMP, 0x00000107); + g_assert_cmpuint(syscfg_readl(SYSCFG_MEMRMP), ==, 0x00000107); + syscfg_writel(SYSCFG_MEMRMP, 0x00000000); + g_assert_cmpuint(syscfg_readl(SYSCFG_MEMRMP), ==, 0x00000000); + + /* cfgr1 bit 0 is clear only so we keep it set */ + syscfg_writel(SYSCFG_CFGR1, 0xFCFF0101); + g_assert_cmpuint(syscfg_readl(SYSCFG_CFGR1), ==, 0xFCFF0101); + syscfg_writel(SYSCFG_CFGR1, 0x00000001); + g_assert_cmpuint(syscfg_readl(SYSCFG_CFGR1), ==, 0x00000001); + + syscfg_writel(SYSCFG_EXTICR1, 0x0000FFFF); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR1), ==, 0x0000FFFF); + syscfg_writel(SYSCFG_EXTICR1, 0x00000000); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR1), ==, 0x00000000); + + syscfg_writel(SYSCFG_EXTICR2, 0x0000FFFF); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR2), ==, 0x0000FFFF); + syscfg_writel(SYSCFG_EXTICR2, 0x00000000); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR2), ==, 0x00000000); + + syscfg_writel(SYSCFG_EXTICR3, 0x0000FFFF); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR3), ==, 0x0000FFFF); + syscfg_writel(SYSCFG_EXTICR3, 0x00000000); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR3), ==, 0x00000000); + + syscfg_writel(SYSCFG_EXTICR4, 0x0000FFFF); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR4), ==, 0x0000FFFF); + syscfg_writel(SYSCFG_EXTICR4, 0x00000000); + g_assert_cmpuint(syscfg_readl(SYSCFG_EXTICR4), ==, 0x00000000); + + syscfg_writel(SYSCFG_SKR, 0x000000FF); + g_assert_cmpuint(syscfg_readl(SYSCFG_SKR), ==, 0x000000FF); + syscfg_writel(SYSCFG_SKR, 0x00000000); + g_assert_cmpuint(syscfg_readl(SYSCFG_SKR), ==, 0x00000000); +} + +static void test_clear_by_writing_1(void) +{ + /* + * Test that writing '1' doesn't set the bit + */ + syscfg_writel(SYSCFG_CFGR2, 0x00000100); + g_assert_cmpuint(syscfg_readl(SYSCFG_CFGR2), ==, 0x00000000); +} + +static void test_set_only_bits(void) +{ + /* + * Test that set only bits stay can't be cleared + */ + syscfg_writel(SYSCFG_CFGR2, 0x0000000F); + syscfg_writel(SYSCFG_CFGR2, 0x00000000); + g_assert_cmpuint(syscfg_readl(SYSCFG_CFGR2), ==, 0x0000000F); + + syscfg_writel(SYSCFG_SWPR, 0xFFFFFFFF); + syscfg_writel(SYSCFG_SWPR, 0x00000000); + g_assert_cmpuint(syscfg_readl(SYSCFG_SWPR), ==, 0xFFFFFFFF); + + syscfg_writel(SYSCFG_SWPR2, 0xFFFFFFFF); + syscfg_writel(SYSCFG_SWPR2, 0x00000000); + g_assert_cmpuint(syscfg_readl(SYSCFG_SWPR2), ==, 0xFFFFFFFF); + + system_reset(); +} + +static void test_clear_only_bits(void) +{ + /* + * Test that clear only bits stay can't be set + */ + syscfg_writel(SYSCFG_CFGR1, 0x00000000); + syscfg_writel(SYSCFG_CFGR1, 0x00000001); + g_assert_cmpuint(syscfg_readl(SYSCFG_CFGR1), ==, 0x00000000); + + system_reset(); +} + +static void test_interrupt(void) +{ + /* + * Test that GPIO rising lines result in an irq + * with the right configuration + */ + qtest_irq_intercept_in(global_qtest, "/machine/soc/exti"); + + /* GPIOA is the default source for EXTI lines 0 to 15 */ + + syscfg_set_irq(0, 1); + + g_assert_true(get_irq(0)); + + + syscfg_set_irq(15, 1); + + g_assert_true(get_irq(15)); + + /* Configure GPIOB[1] as the source input for EXTI1 */ + syscfg_writel(SYSCFG_EXTICR1, 0x00000010); + + syscfg_set_irq(17, 1); + + g_assert_true(get_irq(1)); + + /* Clean the test */ + syscfg_writel(SYSCFG_EXTICR1, 0x00000000); + syscfg_set_irq(0, 0); + syscfg_set_irq(15, 0); + syscfg_set_irq(17, 0); +} + +static void test_irq_pin_multiplexer(void) +{ + /* + * Test that syscfg irq sets the right exti irq + */ + + qtest_irq_intercept_in(global_qtest, "/machine/soc/exti"); + + syscfg_set_irq(0, 1); + + /* Check that irq 0 was set and irq 15 wasn't */ + g_assert_true(get_irq(0)); + g_assert_false(get_irq(15)); + + /* Clean the test */ + syscfg_set_irq(0, 0); + + syscfg_set_irq(15, 1); + + /* Check that irq 15 was set and irq 0 wasn't */ + g_assert_true(get_irq(15)); + g_assert_false(get_irq(0)); + + /* Clean the test */ + syscfg_set_irq(15, 0); +} + +static void test_irq_gpio_multiplexer(void) +{ + /* + * Test that an irq is generated only by the right GPIO + */ + + qtest_irq_intercept_in(global_qtest, "/machine/soc/exti"); + + /* GPIOA is the default source for EXTI lines 0 to 15 */ + + /* Check that setting rising pin GPIOA[0] generates an irq */ + syscfg_set_irq(0, 1); + + g_assert_true(get_irq(0)); + + /* Clean the test */ + syscfg_set_irq(0, 0); + + /* Check that setting rising pin GPIOB[0] doesn't generate an irq */ + syscfg_set_irq(16, 1); + + g_assert_false(get_irq(0)); + + /* Clean the test */ + syscfg_set_irq(16, 0); + + /* Configure GPIOB[0] as the source input for EXTI0 */ + syscfg_writel(SYSCFG_EXTICR1, 0x00000001); + + /* Check that setting rising pin GPIOA[0] doesn't generate an irq */ + syscfg_set_irq(0, 1); + + g_assert_false(get_irq(0)); + + /* Clean the test */ + syscfg_set_irq(0, 0); + + /* Check that setting rising pin GPIOB[0] generates an irq */ + syscfg_set_irq(16, 1); + + g_assert_true(get_irq(0)); + + /* Clean the test */ + syscfg_set_irq(16, 0); + syscfg_writel(SYSCFG_EXTICR1, 0x00000000); +} + +int main(int argc, char **argv) +{ + int ret; + + g_test_init(&argc, &argv, NULL); + g_test_set_nonfatal_assertions(); + + qtest_add_func("stm32l4x5/syscfg/test_reset", test_reset); + qtest_add_func("stm32l4x5/syscfg/test_reserved_bits", + test_reserved_bits); + qtest_add_func("stm32l4x5/syscfg/test_set_and_clear", + test_set_and_clear); + qtest_add_func("stm32l4x5/syscfg/test_clear_by_writing_1", + test_clear_by_writing_1); + qtest_add_func("stm32l4x5/syscfg/test_set_only_bits", + test_set_only_bits); + qtest_add_func("stm32l4x5/syscfg/test_clear_only_bits", + test_clear_only_bits); + qtest_add_func("stm32l4x5/syscfg/test_interrupt", + test_interrupt); + qtest_add_func("stm32l4x5/syscfg/test_irq_pin_multiplexer", + test_irq_pin_multiplexer); + qtest_add_func("stm32l4x5/syscfg/test_irq_gpio_multiplexer", + test_irq_gpio_multiplexer); + + qtest_start("-machine b-l475e-iot01a"); + ret = g_test_run(); + qtest_end(); + + return ret; +} diff --git a/tests/qtest/test-hmp.c b/tests/qtest/test-hmp.c index fc9125f8bba..1b2e07522fe 100644 --- a/tests/qtest/test-hmp.c +++ b/tests/qtest/test-hmp.c @@ -64,7 +64,6 @@ static const char *hmp_cmds[] = { "qom-get /machine initrd", "screendump /dev/null", "sendkey x", - "singlestep on", "wavcapture /dev/null", "stopcapture 0", "sum 0 512", diff --git a/tests/qtest/virtio-9p-test.c b/tests/qtest/virtio-9p-test.c index 65e69491e5a..3c8cd235cf0 100644 --- a/tests/qtest/virtio-9p-test.c +++ b/tests/qtest/virtio-9p-test.c @@ -693,9 +693,20 @@ static void fs_unlinkat_hardlink(void *obj, void *data, g_assert(stat(real_file, &st_real) == 0); } +static void cleanup_9p_local_driver(void *data) +{ + /* remove previously created test dir when test is completed */ + virtio_9p_remove_local_test_dir(); +} + static void *assign_9p_local_driver(GString *cmd_line, void *arg) { + /* make sure test dir for the 'local' tests exists */ + virtio_9p_create_local_test_dir(); + virtio_9p_assign_local_driver(cmd_line, "security_model=mapped-xattr"); + + g_test_queue_destroy(cleanup_9p_local_driver, NULL); return arg; } @@ -735,15 +746,6 @@ static void register_virtio_9p_test(void) /* 9pfs test cases using the 'local' filesystem driver */ - - /* - * XXX: Until we are sure that these tests can run everywhere, - * keep them as "slow" so that they aren't run with "make check". - */ - if (!g_test_slow()) { - return; - } - opts.before = assign_9p_local_driver; qos_add_test("local/config", "virtio-9p", pci_config, &opts); qos_add_test("local/create_dir", "virtio-9p", fs_create_dir, &opts); @@ -759,15 +761,3 @@ static void register_virtio_9p_test(void) } libqos_init(register_virtio_9p_test); - -static void __attribute__((constructor)) construct_9p_test(void) -{ - /* make sure test dir for the 'local' tests exists */ - virtio_9p_create_local_test_dir(); -} - -static void __attribute__((destructor)) destruct_9p_test(void) -{ - /* remove previously created test dir when test suite completed */ - virtio_9p_remove_local_test_dir(); -} diff --git a/tests/qtest/virtio-iommu-test.c b/tests/qtest/virtio-iommu-test.c index 068e7a9e6c1..afb225971d2 100644 --- a/tests/qtest/virtio-iommu-test.c +++ b/tests/qtest/virtio-iommu-test.c @@ -34,7 +34,7 @@ static void pci_config(void *obj, void *data, QGuestAllocator *t_alloc) uint8_t bypass = qvirtio_config_readb(dev, 36); g_assert_cmpint(input_range_start, ==, 0); - g_assert_cmphex(input_range_end, ==, UINT64_MAX); + g_assert_cmphex(input_range_end, >=, UINT32_MAX); g_assert_cmpint(domain_range_start, ==, 0); g_assert_cmpint(domain_range_end, ==, UINT32_MAX); g_assert_cmpint(bypass, ==, 1); diff --git a/tests/qtest/virtio-net-failover.c b/tests/qtest/virtio-net-failover.c index 0d40bc1f2dd..73dfabc2728 100644 --- a/tests/qtest/virtio-net-failover.c +++ b/tests/qtest/virtio-net-failover.c @@ -486,7 +486,7 @@ static void test_hotplug_1_reverse(void) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -517,7 +517,7 @@ static void test_hotplug_2(void) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -566,7 +566,7 @@ static void test_hotplug_2_reverse(void) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'rombar': 0," "'romfile': ''," @@ -639,7 +639,7 @@ static void test_migrate_out(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -754,7 +754,7 @@ static void test_migrate_in(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -808,7 +808,7 @@ static void test_off_migrate_out(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'off'," + "'failover': false," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -876,7 +876,7 @@ static void test_off_migrate_in(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'off'," + "'failover': false," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -927,7 +927,7 @@ static void test_guest_off_migrate_out(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1003,7 +1003,7 @@ static void test_guest_off_migrate_in(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1054,7 +1054,7 @@ static void test_migrate_guest_off_abort(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1154,7 +1154,7 @@ static void test_migrate_abort_wait_unplug(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1243,7 +1243,7 @@ static void test_migrate_abort_active(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1342,7 +1342,7 @@ static void test_migrate_off_abort(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'off'," + "'failover': false," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1434,7 +1434,7 @@ static void test_migrate_abort_timeout(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1530,7 +1530,7 @@ static void test_multi_out(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1561,7 +1561,7 @@ static void test_multi_out(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby1", "{'bus': 'root2'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs2'," "'mac': '"MAC_STANDBY1"'}"); @@ -1700,7 +1700,7 @@ static void test_multi_in(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1724,7 +1724,7 @@ static void test_multi_in(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby1", "{'bus': 'root2'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs2'," "'mac': '"MAC_STANDBY1"'}"); diff --git a/tests/qtest/xlnx-versal-trng-test.c b/tests/qtest/xlnx-versal-trng-test.c index cef4e575bba..ba86f39d13c 100644 --- a/tests/qtest/xlnx-versal-trng-test.c +++ b/tests/qtest/xlnx-versal-trng-test.c @@ -298,10 +298,13 @@ static size_t trng_collect(uint32_t *rnd, size_t cnt) return i; } +/* These tests all generate 512 bits of random data with the device */ +#define TEST_DATA_WORDS (512 / 32) + static void trng_test_autogen(void) { - const size_t cnt = 512 / 32; - uint32_t rng[cnt], prng[cnt]; + const size_t cnt = TEST_DATA_WORDS; + uint32_t rng[TEST_DATA_WORDS], prng[TEST_DATA_WORDS]; size_t n; trng_reset(); @@ -343,8 +346,8 @@ static void trng_test_autogen(void) static void trng_test_oneshot(void) { - const size_t cnt = 512 / 32; - uint32_t rng[cnt]; + const size_t cnt = TEST_DATA_WORDS; + uint32_t rng[TEST_DATA_WORDS]; size_t n; trng_reset(); @@ -370,8 +373,8 @@ static void trng_test_oneshot(void) static void trng_test_per_str(void) { - const size_t cnt = 512 / 32; - uint32_t rng[cnt], prng[cnt]; + const size_t cnt = TEST_DATA_WORDS; + uint32_t rng[TEST_DATA_WORDS], prng[TEST_DATA_WORDS]; size_t n; trng_reset(); @@ -415,8 +418,8 @@ static void trng_test_forced_prng(void) const char *prop = "forced-prng"; const uint64_t seed = 0xdeadbeefbad1bad0ULL; - const size_t cnt = 512 / 32; - uint32_t rng[cnt], prng[cnt]; + const size_t cnt = TEST_DATA_WORDS; + uint32_t rng[TEST_DATA_WORDS], prng[TEST_DATA_WORDS]; size_t n; trng_reset(); diff --git a/tests/symqemu/binaries/printf/expected_outputs/000025 b/tests/symqemu/binaries/printf/expected_outputs/000024 similarity index 100% rename from tests/symqemu/binaries/printf/expected_outputs/000025 rename to tests/symqemu/binaries/printf/expected_outputs/000024 diff --git a/tests/symqemu/binaries/printf/expected_outputs/000024-optimistic b/tests/symqemu/binaries/printf/expected_outputs/000024-optimistic deleted file mode 100644 index afa944593a9..00000000000 --- a/tests/symqemu/binaries/printf/expected_outputs/000024-optimistic +++ /dev/null @@ -1 +0,0 @@ -aaaaa %X %d %d %d aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000039 b/tests/symqemu/binaries/printf/expected_outputs/000025-optimistic similarity index 100% rename from tests/symqemu/binaries/printf/expected_outputs/000039 rename to tests/symqemu/binaries/printf/expected_outputs/000025-optimistic diff --git a/tests/symqemu/binaries/printf/expected_outputs/000035 b/tests/symqemu/binaries/printf/expected_outputs/000035 index 844182abefc..1a897f93993 100644 --- a/tests/symqemu/binaries/printf/expected_outputs/000035 +++ b/tests/symqemu/binaries/printf/expected_outputs/000035 @@ -1 +1 @@ -aaaaa %d % %d %d aaaaa +aaaaa %d %ç %d %d aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000036 b/tests/symqemu/binaries/printf/expected_outputs/000036 index 046ba49afcf..c6b5f9dd6f1 100644 --- a/tests/symqemu/binaries/printf/expected_outputs/000036 +++ b/tests/symqemu/binaries/printf/expected_outputs/000036 @@ -1 +1 @@ -aaaaa %d %p %d %d aaaaa +aaaaa %d %A %d %d aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000049 b/tests/symqemu/binaries/printf/expected_outputs/000038 similarity index 100% rename from tests/symqemu/binaries/printf/expected_outputs/000049 rename to tests/symqemu/binaries/printf/expected_outputs/000038 diff --git a/tests/symqemu/binaries/printf/expected_outputs/000038-optimistic b/tests/symqemu/binaries/printf/expected_outputs/000038-optimistic deleted file mode 100644 index e45d8101ed2..00000000000 --- a/tests/symqemu/binaries/printf/expected_outputs/000038-optimistic +++ /dev/null @@ -1 +0,0 @@ -aaaaa %d %X %d %d aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000039-optimistic b/tests/symqemu/binaries/printf/expected_outputs/000039-optimistic new file mode 100644 index 00000000000..af76a9a1bc1 --- /dev/null +++ b/tests/symqemu/binaries/printf/expected_outputs/000039-optimistic @@ -0,0 +1 @@ +aaaaa %d %d %d %d aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000045 b/tests/symqemu/binaries/printf/expected_outputs/000045 index aca2b1be80f..127acd4339b 100644 --- a/tests/symqemu/binaries/printf/expected_outputs/000045 +++ b/tests/symqemu/binaries/printf/expected_outputs/000045 @@ -1 +1 @@ -aaaaa %d %d %ç %d aaaaa +aaaaa %d %d %à %d aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000046 b/tests/symqemu/binaries/printf/expected_outputs/000046 index 95bd00febf4..557e61a8156 100644 --- a/tests/symqemu/binaries/printf/expected_outputs/000046 +++ b/tests/symqemu/binaries/printf/expected_outputs/000046 @@ -1 +1 @@ -aaaaa %d %d %o %d aaaaa +aaaaa %d %d %` %d aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000048 b/tests/symqemu/binaries/printf/expected_outputs/000048 new file mode 100644 index 00000000000..af76a9a1bc1 --- /dev/null +++ b/tests/symqemu/binaries/printf/expected_outputs/000048 @@ -0,0 +1 @@ +aaaaa %d %d %d %d aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000048-optimistic b/tests/symqemu/binaries/printf/expected_outputs/000048-optimistic deleted file mode 100644 index 44c2254d142..00000000000 --- a/tests/symqemu/binaries/printf/expected_outputs/000048-optimistic +++ /dev/null @@ -1 +0,0 @@ -aaaaa %d %d %X %d aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000049-optimistic b/tests/symqemu/binaries/printf/expected_outputs/000049-optimistic new file mode 100644 index 00000000000..af76a9a1bc1 --- /dev/null +++ b/tests/symqemu/binaries/printf/expected_outputs/000049-optimistic @@ -0,0 +1 @@ +aaaaa %d %d %d %d aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000052 b/tests/symqemu/binaries/printf/expected_outputs/000052 index 27d1ebe5f8f..76afa7bd87e 100644 --- a/tests/symqemu/binaries/printf/expected_outputs/000052 +++ b/tests/symqemu/binaries/printf/expected_outputs/000052 @@ -1 +1 @@ -aaaaa %d %d %d %ç aaaaa +aaaaa %d %d %d % aaaaa diff --git a/tests/symqemu/binaries/printf/expected_outputs/000053 b/tests/symqemu/binaries/printf/expected_outputs/000053 index 62a5c8c1104..a1da40f5698 100644 --- a/tests/symqemu/binaries/printf/expected_outputs/000053 +++ b/tests/symqemu/binaries/printf/expected_outputs/000053 @@ -1 +1 @@ -aaaaa %d %d %d %b aaaaa +aaaaa %d %d %d %` aaaaa diff --git a/tests/symqemu/util.py b/tests/symqemu/util.py index 8c479839836..4a20d8e1944 100644 --- a/tests/symqemu/util.py +++ b/tests/symqemu/util.py @@ -1,7 +1,7 @@ import pathlib import subprocess -SYMQEMU_EXECUTABLE = pathlib.Path(__file__).parent.parent.parent / "build" / "x86_64-linux-user" / "qemu-x86_64" +SYMQEMU_EXECUTABLE = pathlib.Path(__file__).parent.parent.parent / "build" / "qemu-x86_64" BINARIES_DIR = pathlib.Path(__file__).parent / "binaries" diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target index 8cf65f68dd8..f21be50d3b2 100644 --- a/tests/tcg/Makefile.target +++ b/tests/tcg/Makefile.target @@ -93,12 +93,9 @@ QEMU_OPTS= # If TCG debugging, or TCI is enabled things are a lot slower -# ??? Makefile no longer has any indication that TCI is enabled, -# but for the record: -# 15s original default -# 60s with --enable-debug -# 90s with --enable-tcg-interpreter -TIMEOUT=90 +# so we have to set our timeout for that. The current worst case +# offender is the system memory test running under TCI. +TIMEOUT=120 ifeq ($(filter %-softmmu, $(TARGET)),) # The order we include is important. We include multiarch first and @@ -171,7 +168,7 @@ RUN_TESTS+=$(EXTRA_RUNS) # Some plugins need additional arguments above the default to fully # exercise things. We can define them on a per-test basis here. -run-plugin-%-with-libmem.so: PLUGIN_ARGS=$(COMMA)inline=true$(COMMA)callback=true +run-plugin-%-with-libmem.so: PLUGIN_ARGS=$(COMMA)inline=true ifeq ($(filter %-softmmu, $(TARGET)),) run-%: % diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target index cded1d01fcd..70d728ae9af 100644 --- a/tests/tcg/aarch64/Makefile.target +++ b/tests/tcg/aarch64/Makefile.target @@ -10,6 +10,7 @@ VPATH += $(AARCH64_SRC) # Base architecture tests AARCH64_TESTS=fcvt pcalign-a64 lse2-fault +AARCH64_TESTS += test-2248 test-2150 fcvt: LDFLAGS+=-lm @@ -67,7 +68,7 @@ endif # SME Tests ifneq ($(CROSS_AS_HAS_ARMV9_SME),) -AARCH64_TESTS += sme-outprod1 +AARCH64_TESTS += sme-outprod1 sme-smopa-1 sme-smopa-2 endif # System Registers Tests diff --git a/tests/tcg/aarch64/gdbstub/test-sve-ioctl.py b/tests/tcg/aarch64/gdbstub/test-sve-ioctl.py index ee8d467e59d..a78a3a2514d 100644 --- a/tests/tcg/aarch64/gdbstub/test-sve-ioctl.py +++ b/tests/tcg/aarch64/gdbstub/test-sve-ioctl.py @@ -8,19 +8,10 @@ # import gdb -import sys +from test_gdbstub import main, report initial_vlen = 0 -failcount = 0 -def report(cond, msg): - "Report success/fail of test" - if cond: - print ("PASS: %s" % (msg)) - else: - print ("FAIL: %s" % (msg)) - global failcount - failcount += 1 class TestBreakpoint(gdb.Breakpoint): def __init__(self, sym_name="__sve_ld_done"): @@ -64,26 +55,5 @@ def run_test(): gdb.execute("c") -# -# This runs as the script it sourced (via -x, via run-test.py) -# -try: - inferior = gdb.selected_inferior() - arch = inferior.architecture() - report(arch.name() == "aarch64", "connected to aarch64") -except (gdb.error, AttributeError): - print("SKIPPING (not connected)", file=sys.stderr) - exit(0) - -try: - # Run the actual tests - run_test() -except: - print ("GDB Exception: %s" % (sys.exc_info()[0])) - failcount += 1 - import code - code.InteractiveConsole(locals=globals()).interact() - raise -print("All tests complete: %d failures" % failcount) -exit(failcount) +main(run_test, expected_arch="aarch64") diff --git a/tests/tcg/aarch64/gdbstub/test-sve.py b/tests/tcg/aarch64/gdbstub/test-sve.py index afd8ece98dd..84cdcd4a32e 100644 --- a/tests/tcg/aarch64/gdbstub/test-sve.py +++ b/tests/tcg/aarch64/gdbstub/test-sve.py @@ -6,20 +6,10 @@ # import gdb -import sys +from test_gdbstub import main, report MAGIC = 0xDEADBEEF -failcount = 0 - -def report(cond, msg): - "Report success/fail of test" - if cond: - print ("PASS: %s" % (msg)) - else: - print ("FAIL: %s" % (msg)) - global failcount - failcount += 1 def run_test(): "Run through the tests one by one" @@ -54,24 +44,5 @@ def run_test(): report(str(v.type) == "uint64_t", "size of %s" % (reg)) report(int(v) == MAGIC, "%s is 0x%x" % (reg, MAGIC)) -# -# This runs as the script it sourced (via -x, via run-test.py) -# -try: - inferior = gdb.selected_inferior() - arch = inferior.architecture() - report(arch.name() == "aarch64", "connected to aarch64") -except (gdb.error, AttributeError): - print("SKIPPING (not connected)", file=sys.stderr) - exit(0) - -try: - # Run the actual tests - run_test() -except: - print ("GDB Exception: %s" % (sys.exc_info()[0])) - failcount += 1 - -print("All tests complete: %d failures" % failcount) -exit(failcount) +main(run_test, expected_arch="aarch64") diff --git a/tests/tcg/aarch64/semicall.h b/tests/tcg/aarch64/semicall.h index 8a3fce35c5f..30d4de9a549 100644 --- a/tests/tcg/aarch64/semicall.h +++ b/tests/tcg/aarch64/semicall.h @@ -1,10 +1,10 @@ /* * Semihosting Tests - AArch64 helper * - * Copyright (c) 2019 + * Copyright (c) 2019, 2024 * Written by Alex Bennée * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ uintptr_t __semi_call(uintptr_t type, uintptr_t arg0) diff --git a/tests/tcg/aarch64/sme-smopa-1.c b/tests/tcg/aarch64/sme-smopa-1.c new file mode 100644 index 00000000000..c62d5e00073 --- /dev/null +++ b/tests/tcg/aarch64/sme-smopa-1.c @@ -0,0 +1,47 @@ +#include +#include + +int main() +{ + static const int cmp[4][4] = { + { 110, 134, 158, 182 }, + { 390, 478, 566, 654 }, + { 670, 822, 974, 1126 }, + { 950, 1166, 1382, 1598 } + }; + int dst[4][4]; + int *tmp = &dst[0][0]; + + asm volatile( + ".arch armv8-r+sme\n\t" + "smstart\n\t" + "index z0.b, #0, #1\n\t" + "movprfx z1, z0\n\t" + "add z1.b, z1.b, #16\n\t" + "ptrue p0.b\n\t" + "smopa za0.s, p0/m, p0/m, z0.b, z1.b\n\t" + "ptrue p0.s, vl4\n\t" + "mov w12, #0\n\t" + "st1w { za0h.s[w12, #0] }, p0, [%0]\n\t" + "add %0, %0, #16\n\t" + "st1w { za0h.s[w12, #1] }, p0, [%0]\n\t" + "add %0, %0, #16\n\t" + "st1w { za0h.s[w12, #2] }, p0, [%0]\n\t" + "add %0, %0, #16\n\t" + "st1w { za0h.s[w12, #3] }, p0, [%0]\n\t" + "smstop" + : "+r"(tmp) : : "memory"); + + if (memcmp(cmp, dst, sizeof(dst)) == 0) { + return 0; + } + + /* See above for correct results. */ + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 4; ++j) { + printf("%6d", dst[i][j]); + } + printf("\n"); + } + return 1; +} diff --git a/tests/tcg/aarch64/sme-smopa-2.c b/tests/tcg/aarch64/sme-smopa-2.c new file mode 100644 index 00000000000..c9f48c3bfca --- /dev/null +++ b/tests/tcg/aarch64/sme-smopa-2.c @@ -0,0 +1,54 @@ +#include +#include + +int main() +{ + static const long cmp[4][4] = { + { 110, 134, 158, 182 }, + { 390, 478, 566, 654 }, + { 670, 822, 974, 1126 }, + { 950, 1166, 1382, 1598 } + }; + long dst[4][4]; + long *tmp = &dst[0][0]; + long svl; + + /* Validate that we have a wide enough vector for 4 elements. */ + asm(".arch armv8-r+sme-i64\n\trdsvl %0, #1" : "=r"(svl)); + if (svl < 32) { + return 0; + } + + asm volatile( + "smstart\n\t" + "index z0.h, #0, #1\n\t" + "movprfx z1, z0\n\t" + "add z1.h, z1.h, #16\n\t" + "ptrue p0.b\n\t" + "smopa za0.d, p0/m, p0/m, z0.h, z1.h\n\t" + "ptrue p0.d, vl4\n\t" + "mov w12, #0\n\t" + "st1d { za0h.d[w12, #0] }, p0, [%0]\n\t" + "add %0, %0, #32\n\t" + "st1d { za0h.d[w12, #1] }, p0, [%0]\n\t" + "mov w12, #2\n\t" + "add %0, %0, #32\n\t" + "st1d { za0h.d[w12, #0] }, p0, [%0]\n\t" + "add %0, %0, #32\n\t" + "st1d { za0h.d[w12, #1] }, p0, [%0]\n\t" + "smstop" + : "+r"(tmp) : : "memory"); + + if (memcmp(cmp, dst, sizeof(dst)) == 0) { + return 0; + } + + /* See above for correct results. */ + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 4; ++j) { + printf("%6ld", dst[i][j]); + } + printf("\n"); + } + return 1; +} diff --git a/tests/tcg/aarch64/sysregs.c b/tests/tcg/aarch64/sysregs.c index f7a055f1d5f..301e61d0dd4 100644 --- a/tests/tcg/aarch64/sysregs.c +++ b/tests/tcg/aarch64/sysregs.c @@ -137,7 +137,7 @@ int main(void) /* all hidden, DebugVer fixed to 0x6 (ARMv8 debug architecture) */ get_cpu_reg_check_mask(id_aa64dfr0_el1, _m(0000,0000,0000,0006)); get_cpu_reg_check_zero(id_aa64dfr1_el1); - get_cpu_reg_check_mask(SYS_ID_AA64ZFR0_EL1, _m(0ff0,ff0f,00ff,00ff)); + get_cpu_reg_check_mask(SYS_ID_AA64ZFR0_EL1, _m(0ff0,ff0f,0fff,00ff)); get_cpu_reg_check_mask(SYS_ID_AA64SMFR0_EL1, _m(8ff1,fcff,0000,0000)); get_cpu_reg_check_zero(id_aa64afr0_el1); diff --git a/tests/tcg/aarch64/system/vtimer.c b/tests/tcg/aarch64/system/vtimer.c index 42f2f7796c7..7d725eced34 100644 --- a/tests/tcg/aarch64/system/vtimer.c +++ b/tests/tcg/aarch64/system/vtimer.c @@ -6,7 +6,7 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#include +#include #include /* grabbed from Linux */ diff --git a/tests/tcg/aarch64/test-2150.c b/tests/tcg/aarch64/test-2150.c new file mode 100644 index 00000000000..fb86c119586 --- /dev/null +++ b/tests/tcg/aarch64/test-2150.c @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* See https://gitlab.com/qemu-project/qemu/-/issues/2150 */ + +int main() +{ + asm volatile( + "movi v6.4s, #1\n" + "movi v7.4s, #0\n" + "sub v6.2d, v7.2d, v6.2d\n" + : : : "v6", "v7"); + return 0; +} diff --git a/tests/tcg/aarch64/test-2248.c b/tests/tcg/aarch64/test-2248.c new file mode 100644 index 00000000000..aac2e178368 --- /dev/null +++ b/tests/tcg/aarch64/test-2248.c @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* See https://gitlab.com/qemu-project/qemu/-/issues/2248 */ + +#include + +__attribute__((noinline)) +long test(long x, long y, long sh) +{ + long r; + asm("cmp %1, %2\n\t" + "cset x12, lt\n\t" + "and w11, w12, #0xff\n\t" + "cmp w11, #0\n\t" + "csetm x14, ne\n\t" + "lsr x13, x14, %3\n\t" + "sxtb %0, w13" + : "=r"(r) + : "r"(x), "r"(y), "r"(sh) + : "x11", "x12", "x13", "x14"); + return r; +} + +int main() +{ + long r = test(0, 1, 2); + assert(r == -1); + return 0; +} diff --git a/tests/tcg/alpha/Makefile.target b/tests/tcg/alpha/Makefile.target index b94500a7d9c..fdd7ddf64ec 100644 --- a/tests/tcg/alpha/Makefile.target +++ b/tests/tcg/alpha/Makefile.target @@ -13,6 +13,3 @@ test-cmov: test-cond.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) run-test-cmov: test-cmov - -# On Alpha Linux only supports 8k pages -EXTRA_RUNS+=run-test-mmap-8192 diff --git a/tests/tcg/arm/Makefile.target b/tests/tcg/arm/Makefile.target index 3473f4619eb..0a1965fce79 100644 --- a/tests/tcg/arm/Makefile.target +++ b/tests/tcg/arm/Makefile.target @@ -79,6 +79,3 @@ sha512-vector: sha512.c ARM_TESTS += sha512-vector TESTS += $(ARM_TESTS) - -# On ARM Linux only supports 4k pages -EXTRA_RUNS+=run-test-mmap-4096 diff --git a/tests/tcg/arm/semicall.h b/tests/tcg/arm/semicall.h index ad8ac51310b..624937c5577 100644 --- a/tests/tcg/arm/semicall.h +++ b/tests/tcg/arm/semicall.h @@ -1,10 +1,10 @@ /* * Semihosting Tests - ARM Helper * - * Copyright (c) 2019 + * Copyright (c) 2019, 2024 * Written by Alex Bennée * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ uintptr_t __semi_call(uintptr_t type, uintptr_t arg0) diff --git a/tests/tcg/hppa/Makefile.target b/tests/tcg/hppa/Makefile.target index cdd0d572a78..ea5ae2186df 100644 --- a/tests/tcg/hppa/Makefile.target +++ b/tests/tcg/hppa/Makefile.target @@ -2,9 +2,6 @@ # # HPPA specific tweaks - specifically masking out broken tests -# On parisc Linux supports 4K/16K/64K (but currently only 4k works) -EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-16384 run-test-mmap-65536 - # This triggers failures for hppa-linux about 1% of the time # HPPA is the odd target that can't use the sigtramp page; # it requires the full vdso with dwarf2 unwind info. diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target index 3dec7c6c423..bbe2c44b2a7 100644 --- a/tests/tcg/i386/Makefile.target +++ b/tests/tcg/i386/Makefile.target @@ -13,7 +13,7 @@ config-cc.mak: Makefile I386_SRCS=$(notdir $(wildcard $(I386_SRC)/*.c)) ALL_X86_TESTS=$(I386_SRCS:.c=) -SKIP_I386_TESTS=test-i386-ssse3 test-avx test-3dnow test-mmx +SKIP_I386_TESTS=test-i386-ssse3 test-avx test-3dnow test-mmx test-flags X86_64_TESTS:=$(filter test-i386-adcox test-i386-bmi2 $(SKIP_I386_TESTS), $(ALL_X86_TESTS)) test-i386-sse-exceptions: CFLAGS += -msse4.1 -mfpmath=sse @@ -71,9 +71,6 @@ endif I386_TESTS:=$(filter-out $(SKIP_I386_TESTS), $(ALL_X86_TESTS)) TESTS=$(MULTIARCH_TESTS) $(I386_TESTS) -# On i386 and x86_64 Linux only supports 4k pages (large pages are a different hack) -EXTRA_RUNS+=run-test-mmap-4096 - sha512-sse: CFLAGS=-msse4.1 -O3 sha512-sse: sha512.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) diff --git a/tests/tcg/i386/system/boot.S b/tests/tcg/i386/system/boot.S index 9e8920cbfe0..28902c400d8 100644 --- a/tests/tcg/i386/system/boot.S +++ b/tests/tcg/i386/system/boot.S @@ -2,12 +2,12 @@ * i386 boot code, based on qemu-bmibug. * * Copyright 2019 Doug Gale - * Copyright 2019 Linaro + * Copyright 2019, 2024 Linaro * - * This work is licensed under the terms of the GNU GPL, version 3 or later. + * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ .section .head diff --git a/tests/tcg/i386/test-flags.c b/tests/tcg/i386/test-flags.c new file mode 100644 index 00000000000..c379e296275 --- /dev/null +++ b/tests/tcg/i386/test-flags.c @@ -0,0 +1,37 @@ +#define _GNU_SOURCE +#include +#include +#include +#include + +volatile unsigned long flags; +volatile unsigned long flags_after; +int *addr; + +void sigsegv(int sig, siginfo_t *info, ucontext_t *uc) +{ + flags = uc->uc_mcontext.gregs[REG_EFL]; + mprotect(addr, 4096, PROT_READ|PROT_WRITE); +} + +int main() +{ + struct sigaction sa = { .sa_handler = (void *)sigsegv, .sa_flags = SA_SIGINFO }; + sigaction(SIGSEGV, &sa, NULL); + + /* fault in the page then protect it */ + addr = mmap (NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); + *addr = 0x1234; + mprotect(addr, 4096, PROT_READ); + + asm("# set flags to all ones \n" + "mov $-1, %%eax \n" + "movq addr, %%rdi \n" + "sahf \n" + "sub %%eax, (%%rdi) \n" + "pushf \n" + "pop flags_after(%%rip) \n" : : : "eax", "edi", "memory"); + + /* OF can have any value before the SUB instruction. */ + assert((flags & 0xff) == 0xd7 && (flags_after & 0x8ff) == 0x17); +} diff --git a/tests/tcg/m68k/Makefile.target b/tests/tcg/m68k/Makefile.target index 6ff214e60a5..33f7b1b1275 100644 --- a/tests/tcg/m68k/Makefile.target +++ b/tests/tcg/m68k/Makefile.target @@ -5,6 +5,3 @@ VPATH += $(SRC_PATH)/tests/tcg/m68k TESTS += trap denormal - -# On m68k Linux supports 4k and 8k pages (but 8k is currently broken) -EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-8192 diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target index d31ba8d6ae4..5e3391ec9d2 100644 --- a/tests/tcg/multiarch/Makefile.target +++ b/tests/tcg/multiarch/Makefile.target @@ -51,18 +51,9 @@ run-plugin-vma-pthread-with-%: vma-pthread $(call skip-test, $<, "flaky on CI?") endif -# We define the runner for test-mmap after the individual -# architectures have defined their supported pages sizes. If no -# additional page sizes are defined we only run the default test. - -# default case (host page size) run-test-mmap: test-mmap $(call run-test, test-mmap, $(QEMU) $<, $< (default)) -# additional page sizes (defined by each architecture adding to EXTRA_RUNS) -run-test-mmap-%: test-mmap - $(call run-test, test-mmap-$*, $(QEMU) -p $* $<, $< ($* byte pages)) - ifneq ($(GDB),) GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py @@ -80,6 +71,13 @@ run-gdbstub-qxfer-auxv-read: sha1 --bin $< --test $(MULTIARCH_SRC)/gdbstub/test-qxfer-auxv-read.py, \ basic gdbstub qXfer:auxv:read support) +run-gdbstub-qxfer-siginfo-read: segfault + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(GDB) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin "$< -s" --test $(MULTIARCH_SRC)/gdbstub/test-qxfer-siginfo-read.py, \ + basic gdbstub qXfer:siginfo:read support) + run-gdbstub-proc-mappings: sha1 $(call run-test, $@, $(GDB_SCRIPT) \ --gdb $(GDB) \ @@ -101,13 +99,44 @@ run-gdbstub-registers: sha512 --bin $< --test $(MULTIARCH_SRC)/gdbstub/registers.py, \ checking register enumeration) +run-gdbstub-prot-none: prot-none + $(call run-test, $@, env PROT_NONE_PY=1 $(GDB_SCRIPT) \ + --gdb $(GDB) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(MULTIARCH_SRC)/gdbstub/prot-none.py, \ + accessing PROT_NONE memory) + +run-gdbstub-catch-syscalls: catch-syscalls + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(GDB) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(MULTIARCH_SRC)/gdbstub/catch-syscalls.py, \ + hitting a syscall catchpoint) + +run-gdbstub-follow-fork-mode-child: follow-fork-mode + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(GDB) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(MULTIARCH_SRC)/gdbstub/follow-fork-mode-child.py, \ + following children on fork) + +run-gdbstub-follow-fork-mode-parent: follow-fork-mode + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(GDB) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(MULTIARCH_SRC)/gdbstub/follow-fork-mode-parent.py, \ + following parents on fork) + else run-gdbstub-%: $(call skip-test, "gdbstub test $*", "need working gdb with $(patsubst -%,,$(TARGET_NAME)) support") endif EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read \ run-gdbstub-proc-mappings run-gdbstub-thread-breakpoint \ - run-gdbstub-registers + run-gdbstub-registers run-gdbstub-prot-none \ + run-gdbstub-catch-syscalls run-gdbstub-follow-fork-mode-child \ + run-gdbstub-follow-fork-mode-parent \ + run-gdbstub-qxfer-siginfo-read # ARM Compatible Semi Hosting Tests # diff --git a/tests/tcg/multiarch/arm-compat-semi/semiconsole.c b/tests/tcg/multiarch/arm-compat-semi/semiconsole.c index 1d82efc589d..1e2268f4b75 100644 --- a/tests/tcg/multiarch/arm-compat-semi/semiconsole.c +++ b/tests/tcg/multiarch/arm-compat-semi/semiconsole.c @@ -1,10 +1,10 @@ /* * linux-user semihosting console * - * Copyright (c) 2019 + * Copyright (c) 2024 * Written by Alex Bennée * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ #define SYS_READC 0x07 diff --git a/tests/tcg/multiarch/arm-compat-semi/semihosting.c b/tests/tcg/multiarch/arm-compat-semi/semihosting.c index 8627eee3cf7..f609c01341a 100644 --- a/tests/tcg/multiarch/arm-compat-semi/semihosting.c +++ b/tests/tcg/multiarch/arm-compat-semi/semihosting.c @@ -1,10 +1,10 @@ /* * linux-user semihosting checks * - * Copyright (c) 2019 + * Copyright (c) 2019, 2024 * Written by Alex Bennée * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ #define SYS_WRITE0 0x04 diff --git a/tests/tcg/multiarch/catch-syscalls.c b/tests/tcg/multiarch/catch-syscalls.c new file mode 100644 index 00000000000..d1ff1936a7a --- /dev/null +++ b/tests/tcg/multiarch/catch-syscalls.c @@ -0,0 +1,51 @@ +/* + * Test GDB syscall catchpoints. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#define _GNU_SOURCE +#include +#include + +const char *catch_syscalls_state = "start"; + +void end_of_main(void) +{ +} + +int main(void) +{ + int ret = EXIT_FAILURE; + char c0 = 'A', c1; + int fd[2]; + + catch_syscalls_state = "pipe2"; + if (pipe2(fd, 0)) { + goto out; + } + + catch_syscalls_state = "write"; + if (write(fd[1], &c0, sizeof(c0)) != sizeof(c0)) { + goto out_close; + } + + catch_syscalls_state = "read"; + if (read(fd[0], &c1, sizeof(c1)) != sizeof(c1)) { + goto out_close; + } + + catch_syscalls_state = "check"; + if (c0 == c1) { + ret = EXIT_SUCCESS; + } + +out_close: + catch_syscalls_state = "close"; + close(fd[0]); + close(fd[1]); + +out: + catch_syscalls_state = "end"; + end_of_main(); + return ret; +} diff --git a/tests/tcg/multiarch/float_convd.c b/tests/tcg/multiarch/float_convd.c index 0a1f0f93dc5..58d7f8b4c58 100644 --- a/tests/tcg/multiarch/float_convd.c +++ b/tests/tcg/multiarch/float_convd.c @@ -1,9 +1,9 @@ /* * Floating Point Convert Doubles to Various * - * Copyright (c) 2019 Linaro + * Copyright (c) 2019, 2024 Linaro * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ #include diff --git a/tests/tcg/multiarch/float_convs.c b/tests/tcg/multiarch/float_convs.c index 2e4fa55324d..cb1fdd439e3 100644 --- a/tests/tcg/multiarch/float_convs.c +++ b/tests/tcg/multiarch/float_convs.c @@ -1,9 +1,9 @@ /* * Floating Point Convert Single to Various * - * Copyright (c) 2019 Linaro + * Copyright (c) 2019, 2024 Linaro * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ #include diff --git a/tests/tcg/multiarch/float_helpers.h b/tests/tcg/multiarch/float_helpers.h index 309f3f4bf10..c42ebe64b9e 100644 --- a/tests/tcg/multiarch/float_helpers.h +++ b/tests/tcg/multiarch/float_helpers.h @@ -1,9 +1,9 @@ /* * Common Float Helpers * - * Copyright (c) 2019 Linaro + * Copyright (c) 2019, 2024 Linaro * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ #include diff --git a/tests/tcg/multiarch/float_madds.c b/tests/tcg/multiarch/float_madds.c index 4888f8641f4..a692e052d5b 100644 --- a/tests/tcg/multiarch/float_madds.c +++ b/tests/tcg/multiarch/float_madds.c @@ -1,9 +1,9 @@ /* * Fused Multiply Add (Single) * - * Copyright (c) 2019 Linaro + * Copyright (c) 2019, 2024 Linaro * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ #include diff --git a/tests/tcg/multiarch/follow-fork-mode.c b/tests/tcg/multiarch/follow-fork-mode.c new file mode 100644 index 00000000000..cb6b032b388 --- /dev/null +++ b/tests/tcg/multiarch/follow-fork-mode.c @@ -0,0 +1,56 @@ +/* + * Test GDB's follow-fork-mode. + * + * fork() a chain of processes. + * Parents sends one byte to their children, and children return their + * position in the chain, in order to prove that they survived GDB's fork() + * handling. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include + +void break_after_fork(void) +{ +} + +int main(void) +{ + int depth = 42, err, i, fd[2], status; + pid_t child, pid; + ssize_t n; + char b; + + for (i = 0; i < depth; i++) { + err = pipe(fd); + assert(err == 0); + child = fork(); + break_after_fork(); + assert(child != -1); + if (child == 0) { + close(fd[1]); + + n = read(fd[0], &b, 1); + close(fd[0]); + assert(n == 1); + assert(b == (char)i); + } else { + close(fd[0]); + + b = (char)i; + n = write(fd[1], &b, 1); + close(fd[1]); + assert(n == 1); + + pid = waitpid(child, &status, 0); + assert(pid == child); + assert(WIFEXITED(status)); + return WEXITSTATUS(status) - 1; + } + } + + return depth; +} diff --git a/tests/tcg/multiarch/gdbstub/catch-syscalls.py b/tests/tcg/multiarch/gdbstub/catch-syscalls.py new file mode 100644 index 00000000000..ccce35902fb --- /dev/null +++ b/tests/tcg/multiarch/gdbstub/catch-syscalls.py @@ -0,0 +1,53 @@ +"""Test GDB syscall catchpoints. + +SPDX-License-Identifier: GPL-2.0-or-later +""" +from test_gdbstub import main, report + + +def check_state(expected): + """Check the catch_syscalls_state value""" + actual = gdb.parse_and_eval("catch_syscalls_state").string() + report(actual == expected, "{} == {}".format(actual, expected)) + + +def run_test(): + """Run through the tests one by one""" + gdb.Breakpoint("main") + gdb.execute("continue") + + # Check that GDB stops for pipe2/read calls/returns, but not for write. + gdb.execute("delete") + try: + gdb.execute("catch syscall pipe2 read") + except gdb.error as exc: + exc_str = str(exc) + if "not supported on this architecture" in exc_str: + print("SKIP: {}".format(exc_str)) + return + raise + for _ in range(2): + gdb.execute("continue") + check_state("pipe2") + for _ in range(2): + gdb.execute("continue") + check_state("read") + + # Check that deletion works. + gdb.execute("delete") + gdb.Breakpoint("end_of_main") + gdb.execute("continue") + check_state("end") + + # Check that catch-all works (libc should at least call exit). + gdb.execute("delete") + gdb.execute("catch syscall") + gdb.execute("continue") + gdb.execute("delete") + gdb.execute("continue") + + exitcode = int(gdb.parse_and_eval("$_exitcode")) + report(exitcode == 0, "{} == 0".format(exitcode)) + + +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/follow-fork-mode-child.py b/tests/tcg/multiarch/gdbstub/follow-fork-mode-child.py new file mode 100644 index 00000000000..72a6e440c08 --- /dev/null +++ b/tests/tcg/multiarch/gdbstub/follow-fork-mode-child.py @@ -0,0 +1,40 @@ +"""Test GDB's follow-fork-mode child. + +SPDX-License-Identifier: GPL-2.0-or-later +""" +from test_gdbstub import main, report + + +def run_test(): + """Run through the tests one by one""" + gdb.execute("set follow-fork-mode child") + # Check that the parent breakpoints are unset. + gdb.execute("break break_after_fork") + # Check that the parent syscall catchpoints are unset. + # Skip this check on the architectures that don't have them. + have_fork_syscall = False + for fork_syscall in ("fork", "clone", "clone2", "clone3"): + try: + gdb.execute("catch syscall {}".format(fork_syscall)) + except gdb.error: + pass + else: + have_fork_syscall = True + gdb.execute("continue") + for i in range(42): + if have_fork_syscall: + # syscall entry. + if i % 2 == 0: + # Check that the parent single-stepping is turned off. + gdb.execute("si") + else: + gdb.execute("continue") + # syscall exit. + gdb.execute("continue") + # break_after_fork() + gdb.execute("continue") + exitcode = int(gdb.parse_and_eval("$_exitcode")) + report(exitcode == 42, "{} == 42".format(exitcode)) + + +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/follow-fork-mode-parent.py b/tests/tcg/multiarch/gdbstub/follow-fork-mode-parent.py new file mode 100644 index 00000000000..5c2fe722088 --- /dev/null +++ b/tests/tcg/multiarch/gdbstub/follow-fork-mode-parent.py @@ -0,0 +1,16 @@ +"""Test GDB's follow-fork-mode parent. + +SPDX-License-Identifier: GPL-2.0-or-later +""" +from test_gdbstub import main, report + + +def run_test(): + """Run through the tests one by one""" + gdb.execute("set follow-fork-mode parent") + gdb.execute("continue") + exitcode = int(gdb.parse_and_eval("$_exitcode")) + report(exitcode == 0, "{} == 0".format(exitcode)) + + +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/interrupt.py b/tests/tcg/multiarch/gdbstub/interrupt.py index c016e7afbbf..90a45b5140a 100644 --- a/tests/tcg/multiarch/gdbstub/interrupt.py +++ b/tests/tcg/multiarch/gdbstub/interrupt.py @@ -8,19 +8,7 @@ # import gdb -import sys - -failcount = 0 - - -def report(cond, msg): - "Report success/fail of test" - if cond: - print("PASS: %s" % (msg)) - else: - print("FAIL: %s" % (msg)) - global failcount - failcount += 1 +from test_gdbstub import main, report def check_interrupt(thread): @@ -59,6 +47,9 @@ def run_test(): Test if interrupting the code always lands us on the same thread when running with scheduler-lock enabled. """ + if len(gdb.selected_inferior().threads()) == 1: + print("SKIP: set to run on a single thread") + exit(0) gdb.execute("set scheduler-locking on") for thread in gdb.selected_inferior().threads(): @@ -66,32 +57,4 @@ def run_test(): "thread %d resumes correctly on interrupt" % thread.num) -# -# This runs as the script it sourced (via -x, via run-test.py) -# -try: - inferior = gdb.selected_inferior() - arch = inferior.architecture() - print("ATTACHED: %s" % arch.name()) -except (gdb.error, AttributeError): - print("SKIPPING (not connected)", file=sys.stderr) - exit(0) - -if gdb.parse_and_eval('$pc') == 0: - print("SKIP: PC not set") - exit(0) -if len(gdb.selected_inferior().threads()) == 1: - print("SKIP: set to run on a single thread") - exit(0) - -try: - # Run the actual tests - run_test() -except (gdb.error): - print("GDB Exception: %s" % (sys.exc_info()[0])) - failcount += 1 - pass - -# Finally kill the inferior and exit gdb with a count of failures -gdb.execute("kill") -exit(failcount) +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/memory.py b/tests/tcg/multiarch/gdbstub/memory.py index fb1d06b7bb7..532b92e7fb3 100644 --- a/tests/tcg/multiarch/gdbstub/memory.py +++ b/tests/tcg/multiarch/gdbstub/memory.py @@ -9,18 +9,7 @@ import gdb import sys - -failcount = 0 - - -def report(cond, msg): - "Report success/fail of test" - if cond: - print("PASS: %s" % (msg)) - else: - print("FAIL: %s" % (msg)) - global failcount - failcount += 1 +from test_gdbstub import main, report def check_step(): @@ -99,29 +88,5 @@ def run_test(): report(cbp.hit_count == 0, "didn't reach backstop") -# -# This runs as the script it sourced (via -x, via run-test.py) -# -try: - inferior = gdb.selected_inferior() - arch = inferior.architecture() - print("ATTACHED: %s" % arch.name()) -except (gdb.error, AttributeError): - print("SKIPPING (not connected)", file=sys.stderr) - exit(0) - -if gdb.parse_and_eval('$pc') == 0: - print("SKIP: PC not set") - exit(0) - -try: - # Run the actual tests - run_test() -except (gdb.error): - print("GDB Exception: %s" % (sys.exc_info()[0])) - failcount += 1 - pass - -# Finally kill the inferior and exit gdb with a count of failures -gdb.execute("kill") -exit(failcount) + +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/prot-none.py b/tests/tcg/multiarch/gdbstub/prot-none.py new file mode 100644 index 00000000000..7e264589cb8 --- /dev/null +++ b/tests/tcg/multiarch/gdbstub/prot-none.py @@ -0,0 +1,36 @@ +"""Test that GDB can access PROT_NONE pages. + +This runs as a sourced script (via -x, via run-test.py). + +SPDX-License-Identifier: GPL-2.0-or-later +""" +import ctypes +from test_gdbstub import main, report + + +def probe_proc_self_mem(): + buf = ctypes.create_string_buffer(b'aaa') + try: + with open("/proc/self/mem", "rb") as fp: + fp.seek(ctypes.addressof(buf)) + return fp.read(3) == b'aaa' + except OSError: + return False + + +def run_test(): + """Run through the tests one by one""" + if not probe_proc_self_mem(): + print("SKIP: /proc/self/mem is not usable") + exit(0) + gdb.Breakpoint("break_here") + gdb.execute("continue") + val = gdb.parse_and_eval("*(char[2] *)q").string() + report(val == "42", "{} == 42".format(val)) + gdb.execute("set *(char[3] *)q = \"24\"") + gdb.execute("continue") + exitcode = int(gdb.parse_and_eval("$_exitcode")) + report(exitcode == 0, "{} == 0".format(exitcode)) + + +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/registers.py b/tests/tcg/multiarch/gdbstub/registers.py index 688c0611072..b3d13cb077f 100644 --- a/tests/tcg/multiarch/gdbstub/registers.py +++ b/tests/tcg/multiarch/gdbstub/registers.py @@ -7,20 +7,11 @@ # SPDX-License-Identifier: GPL-2.0-or-later import gdb -import sys import xml.etree.ElementTree as ET +from test_gdbstub import main, report -initial_vlen = 0 -failcount = 0 -def report(cond, msg): - "Report success/fail of test." - if cond: - print("PASS: %s" % (msg)) - else: - print("FAIL: %s" % (msg)) - global failcount - failcount += 1 +initial_vlen = 0 def fetch_xml_regmap(): @@ -75,6 +66,7 @@ def fetch_xml_regmap(): return reg_map + def get_register_by_regnum(reg_map, regnum): """ Helper to find a register from the map via its XML regnum @@ -84,6 +76,7 @@ def get_register_by_regnum(reg_map, regnum): return entry return None + def crosscheck_remote_xml(reg_map): """ Cross-check the list of remote-registers with the XML info. @@ -144,6 +137,7 @@ def crosscheck_remote_xml(reg_map): elif "seen" not in x_reg: print(f"{x_reg} wasn't seen in remote-registers") + def initial_register_read(reg_map): """ Do an initial read of all registers that we know gdb cares about @@ -214,27 +208,4 @@ def run_test(): complete_and_diff(reg_map) -# -# This runs as the script it sourced (via -x, via run-test.py) -# -try: - inferior = gdb.selected_inferior() - arch = inferior.architecture() - print("ATTACHED: %s" % arch.name()) -except (gdb.error, AttributeError): - print("SKIPPING (not connected)", file=sys.stderr) - exit(0) - -if gdb.parse_and_eval('$pc') == 0: - print("SKIP: PC not set") - exit(0) - -try: - run_test() -except (gdb.error): - print ("GDB Exception: %s" % (sys.exc_info()[0])) - failcount += 1 - pass - -print("All tests complete: %d failures" % failcount) -exit(failcount) +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/sha1.py b/tests/tcg/multiarch/gdbstub/sha1.py index 416728415f9..1ce711a402c 100644 --- a/tests/tcg/multiarch/gdbstub/sha1.py +++ b/tests/tcg/multiarch/gdbstub/sha1.py @@ -7,19 +7,11 @@ # import gdb -import sys +from test_gdbstub import main, report + initial_vlen = 0 -failcount = 0 -def report(cond, msg): - "Report success/fail of test" - if cond: - print("PASS: %s" % (msg)) - else: - print("FAIL: %s" % (msg)) - global failcount - failcount += 1 def check_break(sym_name): "Setup breakpoint, continue and check we stopped." @@ -35,6 +27,7 @@ def check_break(sym_name): bp.delete() + def run_test(): "Run through the tests one by one" @@ -57,28 +50,5 @@ def run_test(): # finally check we don't barf inspecting registers gdb.execute("info registers") -# -# This runs as the script it sourced (via -x, via run-test.py) -# -try: - inferior = gdb.selected_inferior() - arch = inferior.architecture() - print("ATTACHED: %s" % arch.name()) -except (gdb.error, AttributeError): - print("SKIPPING (not connected)", file=sys.stderr) - exit(0) - -if gdb.parse_and_eval('$pc') == 0: - print("SKIP: PC not set") - exit(0) - -try: - # Run the actual tests - run_test() -except (gdb.error): - print ("GDB Exception: %s" % (sys.exc_info()[0])) - failcount += 1 - pass - -print("All tests complete: %d failures" % failcount) -exit(failcount) + +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/test-proc-mappings.py b/tests/tcg/multiarch/gdbstub/test-proc-mappings.py index 04ec61d2197..564613fabf0 100644 --- a/tests/tcg/multiarch/gdbstub/test-proc-mappings.py +++ b/tests/tcg/multiarch/gdbstub/test-proc-mappings.py @@ -3,20 +3,7 @@ This runs as a sourced script (via -x, via run-test.py).""" from __future__ import print_function import gdb -import sys - - -n_failures = 0 - - -def report(cond, msg): - """Report success/fail of a test""" - if cond: - print("PASS: {}".format(msg)) - else: - print("FAIL: {}".format(msg)) - global n_failures - n_failures += 1 +from test_gdbstub import main, report def run_test(): @@ -37,26 +24,4 @@ def run_test(): # report("/sha1" in mappings, "Found the test binary name in the mappings") -def main(): - """Prepare the environment and run through the tests""" - try: - inferior = gdb.selected_inferior() - print("ATTACHED: {}".format(inferior.architecture().name())) - except (gdb.error, AttributeError): - print("SKIPPING (not connected)") - exit(0) - - if gdb.parse_and_eval('$pc') == 0: - print("SKIP: PC not set") - exit(0) - - try: - # Run the actual tests - run_test() - except gdb.error: - report(False, "GDB Exception: {}".format(sys.exc_info()[0])) - print("All tests complete: %d failures" % n_failures) - exit(n_failures) - - -main() +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/test-qxfer-auxv-read.py b/tests/tcg/multiarch/gdbstub/test-qxfer-auxv-read.py index 926fa962b77..00c26ab4a95 100644 --- a/tests/tcg/multiarch/gdbstub/test-qxfer-auxv-read.py +++ b/tests/tcg/multiarch/gdbstub/test-qxfer-auxv-read.py @@ -6,18 +6,8 @@ # import gdb -import sys +from test_gdbstub import main, report -failcount = 0 - -def report(cond, msg): - "Report success/fail of test" - if cond: - print ("PASS: %s" % (msg)) - else: - print ("FAIL: %s" % (msg)) - global failcount - failcount += 1 def run_test(): "Run through the tests one by one" @@ -26,28 +16,5 @@ def run_test(): report(isinstance(auxv, str), "Fetched auxv from inferior") report(auxv.find("sha1"), "Found test binary name in auxv") -# -# This runs as the script it sourced (via -x, via run-test.py) -# -try: - inferior = gdb.selected_inferior() - arch = inferior.architecture() - print("ATTACHED: %s" % arch.name()) -except (gdb.error, AttributeError): - print("SKIPPING (not connected)", file=sys.stderr) - exit(0) - -if gdb.parse_and_eval('$pc') == 0: - print("SKIP: PC not set") - exit(0) - -try: - # Run the actual tests - run_test() -except (gdb.error): - print ("GDB Exception: %s" % (sys.exc_info()[0])) - failcount += 1 - pass -print("All tests complete: %d failures" % failcount) -exit(failcount) +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/test-qxfer-siginfo-read.py b/tests/tcg/multiarch/gdbstub/test-qxfer-siginfo-read.py new file mode 100644 index 00000000000..862596b07a7 --- /dev/null +++ b/tests/tcg/multiarch/gdbstub/test-qxfer-siginfo-read.py @@ -0,0 +1,26 @@ +from __future__ import print_function +# +# Test gdbstub Xfer:siginfo:read stub. +# +# The test runs a binary that causes a SIGSEGV and then looks for additional +# info about the signal through printing GDB's '$_siginfo' special variable, +# which sends a Xfer:siginfo:read query to the gdbstub. +# +# The binary causes a SIGSEGV at dereferencing a pointer with value 0xdeadbeef, +# so the test looks for and checks if this address is correctly reported by the +# gdbstub. +# +# This is launched via tests/guest-debug/run-test.py +# + +import gdb +from test_gdbstub import main, report + +def run_test(): + "Run through the test" + + gdb.execute("continue", False, True) + resp = gdb.execute("print/x $_siginfo", False, True) + report(resp.find("si_addr = 0xdeadbeef"), "Found fault address.") + +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py b/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py index e57d2a8db8b..4d6b6b9fbe7 100644 --- a/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py +++ b/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py @@ -6,18 +6,8 @@ # import gdb -import sys +from test_gdbstub import main, report -failcount = 0 - -def report(cond, msg): - "Report success/fail of test" - if cond: - print ("PASS: %s" % (msg)) - else: - print ("FAIL: %s" % (msg)) - global failcount - failcount += 1 def run_test(): "Run through the tests one by one" @@ -29,28 +19,5 @@ def run_test(): frame = gdb.selected_frame() report(str(frame.function()) == "thread1_func", "break @ %s"%frame) -# -# This runs as the script it sourced (via -x, via run-test.py) -# -try: - inferior = gdb.selected_inferior() - arch = inferior.architecture() - print("ATTACHED: %s" % arch.name()) -except (gdb.error, AttributeError): - print("SKIPPING (not connected)", file=sys.stderr) - exit(0) - -if gdb.parse_and_eval('$pc') == 0: - print("SKIP: PC not set") - exit(0) - -try: - # Run the actual tests - run_test() -except (gdb.error): - print ("GDB Exception: %s" % (sys.exc_info()[0])) - failcount += 1 - pass -print("All tests complete: %d failures" % failcount) -exit(failcount) +main(run_test) diff --git a/tests/tcg/multiarch/libs/float_helpers.c b/tests/tcg/multiarch/libs/float_helpers.c index 4e68d2b6598..fad5fc98933 100644 --- a/tests/tcg/multiarch/libs/float_helpers.c +++ b/tests/tcg/multiarch/libs/float_helpers.c @@ -5,9 +5,9 @@ * floating point constants useful for exercising the edge cases in * floating point tests. * - * Copyright (c) 2019 Linaro + * Copyright (c) 2019, 2024 Linaro * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ /* we want additional float type definitions */ diff --git a/tests/tcg/multiarch/linux/linux-madvise.c b/tests/tcg/multiarch/linux/linux-madvise.c index 29d0997e687..539fb3b7726 100644 --- a/tests/tcg/multiarch/linux/linux-madvise.c +++ b/tests/tcg/multiarch/linux/linux-madvise.c @@ -42,6 +42,8 @@ static void test_file(void) assert(ret == 0); written = write(fd, &c, sizeof(c)); assert(written == sizeof(c)); + ret = ftruncate(fd, pagesize); + assert(ret == 0); page = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE, fd, 0); assert(page != MAP_FAILED); diff --git a/tests/tcg/multiarch/linux/linux-shmat-maps.c b/tests/tcg/multiarch/linux/linux-shmat-maps.c new file mode 100644 index 00000000000..0ccf7a973a8 --- /dev/null +++ b/tests/tcg/multiarch/linux/linux-shmat-maps.c @@ -0,0 +1,55 @@ +/* + * Test that shmat() does not break /proc/self/maps. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include +#include +#include + +int main(void) +{ + char buf[128]; + int err, fd; + int shmid; + ssize_t n; + void *p; + + shmid = shmget(IPC_PRIVATE, 1, IPC_CREAT | 0600); + assert(shmid != -1); + + /* + * The original bug required a non-NULL address, which skipped the + * mmap_find_vma step, which could result in a host mapping smaller + * than the target mapping. Choose an address at random. + */ + p = shmat(shmid, (void *)0x800000, SHM_RND); + if (p == (void *)-1) { + /* + * Because we are now running the testcase for all guests for which + * we have a cross-compiler, the above random address might conflict + * with the guest executable in some way. Rather than stopping, + * continue with a system supplied address, which should never fail. + */ + p = shmat(shmid, NULL, 0); + assert(p != (void *)-1); + } + + fd = open("/proc/self/maps", O_RDONLY); + assert(fd != -1); + do { + n = read(fd, buf, sizeof(buf)); + assert(n >= 0); + } while (n != 0); + close(fd); + + err = shmdt(p); + assert(err == 0); + err = shmctl(shmid, IPC_RMID, NULL); + assert(err == 0); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/multiarch/linux/linux-shmat-null.c b/tests/tcg/multiarch/linux/linux-shmat-null.c new file mode 100644 index 00000000000..94eaaec371a --- /dev/null +++ b/tests/tcg/multiarch/linux/linux-shmat-null.c @@ -0,0 +1,38 @@ +/* + * Test shmat(NULL). + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include + +int main(void) +{ + int shmid; + char *p; + int err; + + /* Create, attach and intialize shared memory. */ + shmid = shmget(IPC_PRIVATE, 1, IPC_CREAT | 0600); + assert(shmid != -1); + p = shmat(shmid, NULL, 0); + assert(p != (void *)-1); + *p = 42; + + /* Reattach, check that the value is still there. */ + err = shmdt(p); + assert(err == 0); + p = shmat(shmid, NULL, 0); + assert(p != (void *)-1); + assert(*p == 42); + + /* Detach. */ + err = shmdt(p); + assert(err == 0); + err = shmctl(shmid, IPC_RMID, NULL); + assert(err == 0); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/multiarch/prot-none.c b/tests/tcg/multiarch/prot-none.c new file mode 100644 index 00000000000..dc56aadb3c5 --- /dev/null +++ b/tests/tcg/multiarch/prot-none.c @@ -0,0 +1,40 @@ +/* + * Test that GDB can access PROT_NONE pages. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include +#include + +void break_here(void *q) +{ +} + +int main(void) +{ + long pagesize = sysconf(_SC_PAGESIZE); + void *p, *q; + int err; + + p = mmap(NULL, pagesize * 2, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + assert(p != MAP_FAILED); + q = p + pagesize - 1; + strcpy(q, "42"); + + err = mprotect(p, pagesize * 2, PROT_NONE); + assert(err == 0); + + break_here(q); + + err = mprotect(p, pagesize * 2, PROT_READ); + assert(err == 0); + if (getenv("PROT_NONE_PY")) { + assert(strcmp(q, "24") == 0); + } + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/multiarch/segfault.c b/tests/tcg/multiarch/segfault.c new file mode 100644 index 00000000000..e6c8ff31ca3 --- /dev/null +++ b/tests/tcg/multiarch/segfault.c @@ -0,0 +1,14 @@ +#include +#include + +/* Cause a segfault for testing purposes. */ + +int main(int argc, char *argv[]) +{ + int *ptr = (void *)0xdeadbeef; + + if (argc == 2 && strcmp(argv[1], "-s") == 0) { + /* Cause segfault. */ + printf("%d\n", *ptr); + } +} diff --git a/tests/tcg/ppc/Makefile.target b/tests/tcg/ppc/Makefile.target deleted file mode 100644 index f5e08c7376c..00000000000 --- a/tests/tcg/ppc/Makefile.target +++ /dev/null @@ -1,12 +0,0 @@ -# -*- Mode: makefile -*- -# -# PPC - included from tests/tcg/Makefile -# - -ifneq (,$(findstring 64,$(TARGET_NAME))) -# On PPC64 Linux can be configured with 4k (default) or 64k pages (currently broken) -EXTRA_RUNS+=run-test-mmap-4096 #run-test-mmap-65536 -else -# On PPC32 Linux supports 4K/16K/64K/256K (but currently only 4k works) -EXTRA_RUNS+=run-test-mmap-4096 #run-test-mmap-16384 run-test-mmap-65536 run-test-mmap-262144 -endif diff --git a/tests/tcg/riscv64/Makefile.target b/tests/tcg/riscv64/Makefile.target index a7e390c384d..4da5b9a3b32 100644 --- a/tests/tcg/riscv64/Makefile.target +++ b/tests/tcg/riscv64/Makefile.target @@ -17,4 +17,4 @@ run-test-aes: QEMU_OPTS += -cpu rv64,zk=on TESTS += test-fcvtmod test-fcvtmod: CFLAGS += -march=rv64imafdc test-fcvtmod: LDFLAGS += -static -run-test-fcvtmod: QEMU_OPTS += -cpu rv64,d=true,Zfa=true +run-test-fcvtmod: QEMU_OPTS += -cpu rv64,d=true,zfa=true diff --git a/tests/tcg/riscv64/semicall.h b/tests/tcg/riscv64/semicall.h index f8c88f32dc5..11d0650cb06 100644 --- a/tests/tcg/riscv64/semicall.h +++ b/tests/tcg/riscv64/semicall.h @@ -1,10 +1,10 @@ /* * Semihosting Tests - RiscV64 Helper * - * Copyright (c) 2021 + * Copyright (c) 2021, 2024 * Written by Alex Bennée * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ uintptr_t __semi_call(uintptr_t type, uintptr_t arg0) diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target index 0e670f3f8b9..a8f86c94498 100644 --- a/tests/tcg/s390x/Makefile.target +++ b/tests/tcg/s390x/Makefile.target @@ -44,6 +44,10 @@ TESTS+=clgebr TESTS+=clc TESTS+=laalg TESTS+=add-logical-with-carry +TESTS+=lae +TESTS+=cvd +TESTS+=cvb +TESTS+=ts cdsg: CFLAGS+=-pthread cdsg: LDFLAGS+=-pthread diff --git a/tests/tcg/s390x/cvb.c b/tests/tcg/s390x/cvb.c new file mode 100644 index 00000000000..e1735f6b81c --- /dev/null +++ b/tests/tcg/s390x/cvb.c @@ -0,0 +1,102 @@ +/* + * Test the CONVERT TO BINARY instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include +#include + +static int signum; + +static void signal_handler(int n) +{ + signum = n; +} + +#define FAIL 0x1234567887654321 +#define OK32(x) (0x1234567800000000 | (uint32_t)(x)) + +static int64_t cvb(uint64_t x) +{ + int64_t ret = FAIL; + + signum = -1; + asm("cvb %[ret],%[x]" : [ret] "+r" (ret) : [x] "R" (x)); + + return ret; +} + +static int64_t cvby(uint64_t x) +{ + int64_t ret = FAIL; + + signum = -1; + asm("cvby %[ret],%[x]" : [ret] "+r" (ret) : [x] "T" (x)); + + return ret; +} + +static int64_t cvbg(__uint128_t x) +{ + int64_t ret = FAIL; + + signum = -1; + asm("cvbg %[ret],%[x]" : [ret] "+r" (ret) : [x] "T" (x)); + + return ret; +} + +int main(void) +{ + __uint128_t m = (((__uint128_t)0x9223372036854775) << 16) | 0x8070; + struct sigaction act; + int err; + + memset(&act, 0, sizeof(act)); + act.sa_handler = signal_handler; + err = sigaction(SIGFPE, &act, NULL); + assert(err == 0); + err = sigaction(SIGILL, &act, NULL); + assert(err == 0); + + assert(cvb(0xc) == OK32(0) && signum == -1); + assert(cvb(0x1c) == OK32(1) && signum == -1); + assert(cvb(0x25594c) == OK32(25594) && signum == -1); + assert(cvb(0x1d) == OK32(-1) && signum == -1); + assert(cvb(0x2147483647c) == OK32(0x7fffffff) && signum == -1); + assert(cvb(0x2147483648d) == OK32(-0x80000000) && signum == -1); + assert(cvb(0x7) == FAIL && signum == SIGILL); + assert(cvb(0x2147483648c) == OK32(0x80000000) && signum == SIGFPE); + assert(cvb(0x3000000000c) == OK32(0xb2d05e00) && signum == SIGFPE); + assert(cvb(0x2147483649d) == OK32(0x7fffffff) && signum == SIGFPE); + assert(cvb(0x3000000000d) == OK32(0x4d2fa200) && signum == SIGFPE); + + assert(cvby(0xc) == OK32(0)); + assert(cvby(0x1c) == OK32(1)); + assert(cvby(0x25594c) == OK32(25594)); + assert(cvby(0x1d) == OK32(-1)); + assert(cvby(0x2147483647c) == OK32(0x7fffffff)); + assert(cvby(0x2147483648d) == OK32(-0x80000000)); + assert(cvby(0x7) == FAIL && signum == SIGILL); + assert(cvby(0x2147483648c) == OK32(0x80000000) && signum == SIGFPE); + assert(cvby(0x3000000000c) == OK32(0xb2d05e00) && signum == SIGFPE); + assert(cvby(0x2147483649d) == OK32(0x7fffffff) && signum == SIGFPE); + assert(cvby(0x3000000000d) == OK32(0x4d2fa200) && signum == SIGFPE); + + assert(cvbg(0xc) == 0); + assert(cvbg(0x1c) == 1); + assert(cvbg(0x25594c) == 25594); + assert(cvbg(0x1d) == -1); + assert(cvbg(m + 0xc) == 0x7fffffffffffffff); + assert(cvbg(m + 0x1d) == -0x8000000000000000); + assert(cvbg(0x7) == FAIL && signum == SIGILL); + assert(cvbg(m + 0x1c) == FAIL && signum == SIGFPE); + assert(cvbg(m + 0x2d) == FAIL && signum == SIGFPE); + assert(cvbg(((__uint128_t)1 << 80) + 0xc) == FAIL && signum == SIGFPE); + assert(cvbg(((__uint128_t)1 << 80) + 0xd) == FAIL && signum == SIGFPE); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/s390x/cvd.c b/tests/tcg/s390x/cvd.c new file mode 100644 index 00000000000..d776688985e --- /dev/null +++ b/tests/tcg/s390x/cvd.c @@ -0,0 +1,63 @@ +/* + * Test the CONVERT TO DECIMAL instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include + +static uint64_t cvd(int32_t x) +{ + uint64_t ret; + + asm("cvd %[x],%[ret]" : [ret] "=R" (ret) : [x] "r" (x)); + + return ret; +} + +static uint64_t cvdy(int32_t x) +{ + uint64_t ret; + + asm("cvdy %[x],%[ret]" : [ret] "=T" (ret) : [x] "r" (x)); + + return ret; +} + +static __uint128_t cvdg(int64_t x) +{ + __uint128_t ret; + + asm("cvdg %[x],%[ret]" : [ret] "=T" (ret) : [x] "r" (x)); + + return ret; +} + +int main(void) +{ + __uint128_t m = (((__uint128_t)0x9223372036854775) << 16) | 0x8070; + + assert(cvd(0) == 0xc); + assert(cvd(1) == 0x1c); + assert(cvd(25594) == 0x25594c); + assert(cvd(-1) == 0x1d); + assert(cvd(0x7fffffff) == 0x2147483647c); + assert(cvd(-0x80000000) == 0x2147483648d); + + assert(cvdy(0) == 0xc); + assert(cvdy(1) == 0x1c); + assert(cvdy(25594) == 0x25594c); + assert(cvdy(-1) == 0x1d); + assert(cvdy(0x7fffffff) == 0x2147483647c); + assert(cvdy(-0x80000000) == 0x2147483648d); + + assert(cvdg(0) == 0xc); + assert(cvdg(1) == 0x1c); + assert(cvdg(25594) == 0x25594c); + assert(cvdg(-1) == 0x1d); + assert(cvdg(0x7fffffffffffffff) == (m + 0xc)); + assert(cvdg(-0x8000000000000000) == (m + 0x1d)); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/s390x/gdbstub/test-signals-s390x.py b/tests/tcg/s390x/gdbstub/test-signals-s390x.py index ca2bbc0b03e..b6b7b39fc46 100644 --- a/tests/tcg/s390x/gdbstub/test-signals-s390x.py +++ b/tests/tcg/s390x/gdbstub/test-signals-s390x.py @@ -7,19 +7,7 @@ # import gdb -import sys - -failcount = 0 - - -def report(cond, msg): - """Report success/fail of test""" - if cond: - print("PASS: %s" % (msg)) - else: - print("FAIL: %s" % (msg)) - global failcount - failcount += 1 +from test_gdbstub import main, report def run_test(): @@ -42,31 +30,7 @@ def run_test(): gdb.Breakpoint("_exit") gdb.execute("c") status = int(gdb.parse_and_eval("$r2")) - report(status == 0, "status == 0"); - - -# -# This runs as the script it sourced (via -x, via run-test.py) -# -try: - inferior = gdb.selected_inferior() - arch = inferior.architecture() - print("ATTACHED: %s" % arch.name()) -except (gdb.error, AttributeError): - print("SKIPPING (not connected)", file=sys.stderr) - exit(0) - -if gdb.parse_and_eval("$pc") == 0: - print("SKIP: PC not set") - exit(0) + report(status == 0, "status == 0") -try: - # Run the actual tests - run_test() -except (gdb.error): - print("GDB Exception: %s" % (sys.exc_info()[0])) - failcount += 1 - pass -print("All tests complete: %d failures" % failcount) -exit(failcount) +main(run_test) diff --git a/tests/tcg/s390x/gdbstub/test-svc.py b/tests/tcg/s390x/gdbstub/test-svc.py index 804705fede9..17210b4e020 100644 --- a/tests/tcg/s390x/gdbstub/test-svc.py +++ b/tests/tcg/s390x/gdbstub/test-svc.py @@ -3,20 +3,7 @@ This runs as a sourced script (via -x, via run-test.py).""" from __future__ import print_function import gdb -import sys - - -n_failures = 0 - - -def report(cond, msg): - """Report success/fail of a test""" - if cond: - print("PASS: {}".format(msg)) - else: - print("FAIL: {}".format(msg)) - global n_failures - n_failures += 1 +from test_gdbstub import main, report def run_test(): @@ -35,26 +22,4 @@ def run_test(): gdb.execute("si") -def main(): - """Prepare the environment and run through the tests""" - try: - inferior = gdb.selected_inferior() - print("ATTACHED: {}".format(inferior.architecture().name())) - except (gdb.error, AttributeError): - print("SKIPPING (not connected)") - exit(0) - - if gdb.parse_and_eval('$pc') == 0: - print("SKIP: PC not set") - exit(0) - - try: - # Run the actual tests - run_test() - except gdb.error: - report(False, "GDB Exception: {}".format(sys.exc_info()[0])) - print("All tests complete: %d failures" % n_failures) - exit(n_failures) - - -main() +main(run_test) diff --git a/tests/tcg/s390x/lae.c b/tests/tcg/s390x/lae.c new file mode 100644 index 00000000000..59712b5e371 --- /dev/null +++ b/tests/tcg/s390x/lae.c @@ -0,0 +1,31 @@ +/* + * Test the LOAD ADDRESS EXTENDED instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include + +int main(void) +{ + unsigned long long ar = -1, b2 = 100000, r, x2 = 500; + /* + * Hardcode the register number, since clang does not allow using %rN in + * place of %aN. + */ + register unsigned long long r2 __asm__("2"); + int tmp; + + asm("ear %[tmp],%%a2\n" + "lae %%r2,42(%[x2],%[b2])\n" + "ear %[ar],%%a2\n" + "sar %%a2,%[tmp]" + : [tmp] "=&r" (tmp), "=&r" (r2), [ar] "+r" (ar) + : [b2] "r" (b2), [x2] "r" (x2) + : "memory"); + r = r2; + assert(ar == 0xffffffff00000000ULL); + assert(r == 100542); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/s390x/ts.c b/tests/tcg/s390x/ts.c new file mode 100644 index 00000000000..441faf30d98 --- /dev/null +++ b/tests/tcg/s390x/ts.c @@ -0,0 +1,35 @@ +/* + * Test the TEST AND SET instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include + +static int ts(char *p) +{ + int cc; + + asm("ts %[p]\n" + "ipm %[cc]" + : [cc] "=r" (cc) + , [p] "+Q" (*p) + : : "cc"); + + return (cc >> 28) & 3; +} + +int main(void) +{ + char c; + + c = 0x80; + assert(ts(&c) == 1); + assert(c == 0xff); + + c = 0x7f; + assert(ts(&c) == 0); + assert(c == 0xff); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/sh4/Makefile.target b/tests/tcg/sh4/Makefile.target index 47c39a44b69..4d09291c0c2 100644 --- a/tests/tcg/sh4/Makefile.target +++ b/tests/tcg/sh4/Makefile.target @@ -3,12 +3,17 @@ # SuperH specific tweaks # -# On sh Linux supports 4k, 8k, 16k and 64k pages (but only 4k currently works) -EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-8192 run-test-mmap-16384 run-test-mmap-65536 - # This triggers failures for sh4-linux about 10% of the time. # Random SIGSEGV at unpredictable guest address, cause unknown. run-signals: signals $(call skip-test, $<, "BROKEN") run-plugin-signals-with-%: $(call skip-test, $<, "BROKEN") + +VPATH += $(SRC_PATH)/tests/tcg/sh4 + +test-macl: CFLAGS += -O -g +TESTS += test-macl + +test-macw: CFLAGS += -O -g +TESTS += test-macw diff --git a/tests/tcg/sh4/test-macl.c b/tests/tcg/sh4/test-macl.c new file mode 100644 index 00000000000..b66c854365a --- /dev/null +++ b/tests/tcg/sh4/test-macl.c @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include +#include +#include + +#define MACL_S_MIN (-(1ll << 47)) +#define MACL_S_MAX ((1ll << 47) - 1) + +int64_t mac_l(int64_t mac, const int32_t *a, const int32_t *b) +{ + register uint32_t macl __asm__("macl") = mac; + register uint32_t mach __asm__("mach") = mac >> 32; + + asm volatile("mac.l @%0+,@%1+" + : "+r"(a), "+r"(b), "+x"(macl), "+x"(mach)); + + return ((uint64_t)mach << 32) | macl; +} + +typedef struct { + int64_t mac; + int32_t a, b; + int64_t res[2]; +} Test; + +__attribute__((noinline)) +void test(const Test *t, int sat) +{ + int64_t res; + + if (sat) { + asm volatile("sets"); + } else { + asm volatile("clrs"); + } + res = mac_l(t->mac, &t->a, &t->b); + + if (res != t->res[sat]) { + fprintf(stderr, "%#llx + (%#x * %#x) = %#llx -- got %#llx\n", + t->mac, t->a, t->b, t->res[sat], res); + abort(); + } +} + +int main() +{ + static const Test tests[] = { + { 0x00007fff12345678ll, INT32_MAX, INT32_MAX, + { 0x40007ffe12345679ll, MACL_S_MAX } }, + { MACL_S_MIN, -1, 1, + { 0xffff7fffffffffffll, MACL_S_MIN } }, + { INT64_MIN, -1, 1, + { INT64_MAX, MACL_S_MIN } }, + { 0x00007fff00000000ll, INT32_MAX, INT32_MAX, + { 0x40007ffe00000001ll, MACL_S_MAX } }, + { 4, 1, 2, { 6, 6 } }, + { -4, -1, -2, { -2, -2 } }, + }; + + for (int i = 0; i < sizeof(tests) / sizeof(tests[0]); ++i) { + for (int j = 0; j < 2; ++j) { + test(&tests[i], j); + } + } + return 0; +} diff --git a/tests/tcg/sh4/test-macw.c b/tests/tcg/sh4/test-macw.c new file mode 100644 index 00000000000..4eceec8634b --- /dev/null +++ b/tests/tcg/sh4/test-macw.c @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include +#include +#include + +int64_t mac_w(int64_t mac, const int16_t *a, const int16_t *b) +{ + register uint32_t macl __asm__("macl") = mac; + register uint32_t mach __asm__("mach") = mac >> 32; + + asm volatile("mac.w @%0+,@%1+" + : "+r"(a), "+r"(b), "+x"(macl), "+x"(mach)); + + return ((uint64_t)mach << 32) | macl; +} + +typedef struct { + int64_t mac; + int16_t a, b; + int64_t res[2]; +} Test; + +__attribute__((noinline)) +void test(const Test *t, int sat) +{ + int64_t res; + + if (sat) { + asm volatile("sets"); + } else { + asm volatile("clrs"); + } + res = mac_w(t->mac, &t->a, &t->b); + + if (res != t->res[sat]) { + fprintf(stderr, "%#llx + (%#x * %#x) = %#llx -- got %#llx\n", + t->mac, t->a, t->b, t->res[sat], res); + abort(); + } +} + +int main() +{ + static const Test tests[] = { + { 0, 2, 3, { 6, 6 } }, + { 0x123456787ffffffell, 2, -3, + { 0x123456787ffffff8ll, 0x123456787ffffff8ll } }, + { 0xabcdef127ffffffall, 2, 3, + { 0xabcdef1280000000ll, 0x000000017fffffffll } }, + { 0xfffffffffll, INT16_MAX, INT16_MAX, + { 0x103fff0000ll, 0xf3fff0000ll } }, + }; + + for (int i = 0; i < sizeof(tests) / sizeof(tests[0]); ++i) { + for (int j = 0; j < 2; ++j) { + test(&tests[i], j); + } + } + return 0; +} diff --git a/tests/tcg/sparc64/Makefile.target b/tests/tcg/sparc64/Makefile.target deleted file mode 100644 index 408dace7839..00000000000 --- a/tests/tcg/sparc64/Makefile.target +++ /dev/null @@ -1,6 +0,0 @@ -# -*- Mode: makefile -*- -# -# sparc specific tweaks - -# On Sparc64 Linux support 8k pages -EXTRA_RUNS+=run-test-mmap-8192 diff --git a/tests/tcg/x86_64/system/boot.S b/tests/tcg/x86_64/system/boot.S index dac9bd534d7..7213aec63b2 100644 --- a/tests/tcg/x86_64/system/boot.S +++ b/tests/tcg/x86_64/system/boot.S @@ -1,16 +1,16 @@ /* * x86_64 boot and support code * - * Copyright 2019 Linaro + * Copyright 2019, 2024 Linaro * - * This work is licensed under the terms of the GNU GPL, version 3 or later. + * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * * Unlike the i386 version we instead use Xen's PVHVM booting header * which should drop us automatically into 32 bit mode ready to go. I've * nabbed bits of the Linux kernel setup to achieve this. * - * SPDX-License-Identifier: GPL-3.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later */ .section .head diff --git a/tests/tcg/xtensa/test_break.S b/tests/tcg/xtensa/test_break.S index 3aa18b5cec3..4c618feb5b1 100644 --- a/tests/tcg/xtensa/test_break.S +++ b/tests/tcg/xtensa/test_break.S @@ -129,7 +129,7 @@ test ibreak_remove 4: test_end -test ibreak_priority +test ibreak_break_priority set_vector debug_vector, 2f rsil a2, debug_level - 1 movi a2, 1f @@ -145,6 +145,29 @@ test ibreak_priority movi a3, 0x2 assert eq, a2, a3 test_end + +test ibreak_icount_priority + set_vector debug_vector, 2f + rsil a2, debug_level - 1 + movi a2, 1f + wsr a2, ibreaka0 + movi a2, 1 + wsr a2, ibreakenable + movi a2, -2 + wsr a2, icount + movi a2, 1 + wsr a2, icountlevel + isync + rsil a2, 0 + nop +1: + break 0, 0 + test_fail +2: + rsr a2, debugcause + movi a3, 0x1 + assert eq, a2, a3 +test_end #endif test icount diff --git a/tests/tsan/suppressions.tsan b/tests/tsan/suppressions.tsan index d9a002a2ef1..b3ef59c27c0 100644 --- a/tests/tsan/suppressions.tsan +++ b/tests/tsan/suppressions.tsan @@ -4,7 +4,6 @@ # TSan reports a double lock on RECURSIVE mutexes. # Since the recursive lock is intentional, we choose to ignore it. -mutex:aio_context_acquire mutex:pthread_mutex_lock # TSan reports a race between pthread_mutex_init() and diff --git a/tests/unit/meson.build b/tests/unit/meson.build index 45837648fa7..46619ae9ef6 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -101,7 +101,7 @@ if have_block } if gnutls.found() and \ tasn1.found() and \ - targetos != 'windows' + host_os != 'windows' tests += { 'test-crypto-tlscredsx509': ['crypto-tls-x509-helpers.c', 'pkix_asn1_tab.c', tasn1, crypto, gnutls], @@ -116,7 +116,7 @@ if have_block if xts == 'private' tests += {'test-crypto-xts': [crypto, io]} endif - if targetos != 'windows' + if host_os != 'windows' tests += { 'test-image-locking': [testblock], 'test-nested-aio-poll': [testblock], @@ -151,7 +151,7 @@ if have_system # are not runnable under TSan due to a known issue. # https://github.com/google/sanitizers/issues/1116 if not get_option('tsan') - if targetos != 'windows' + if host_os != 'windows' tests += { 'test-char': ['socket-helpers.c', qom, io, chardev] } @@ -163,7 +163,7 @@ if have_system endif endif -if have_ga and targetos == 'linux' +if have_ga and host_os == 'linux' tests += {'test-qga': ['../qtest/libqmp.c']} test_deps += {'test-qga': qga} endif @@ -173,8 +173,12 @@ test_env.set('G_TEST_SRCDIR', meson.current_source_dir()) test_env.set('G_TEST_BUILDDIR', meson.current_build_dir()) slow_tests = { - 'test-crypto-tlscredsx509': 45, - 'test-crypto-tlssession': 45 + 'test-aio-multithread' : 120, + 'test-bufferiszero': 60, + 'test-crypto-block' : 300, + 'test-crypto-tlscredsx509': 90, + 'test-crypto-tlssession': 90, + 'test-replication': 60, } foreach test_name, extra: tests diff --git a/tests/unit/socket-helpers.c b/tests/unit/socket-helpers.c index 6de27baee2e..f3439cc4e52 100644 --- a/tests/unit/socket-helpers.c +++ b/tests/unit/socket-helpers.c @@ -160,7 +160,6 @@ void socket_check_afunix_support(bool *has_afunix) int fd; fd = socket(PF_UNIX, SOCK_STREAM, 0); - close(fd); #ifdef _WIN32 *has_afunix = (fd != (int)INVALID_SOCKET); @@ -168,5 +167,8 @@ void socket_check_afunix_support(bool *has_afunix) *has_afunix = (fd >= 0); #endif + if (*has_afunix) { + close(fd); + } return; } diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c index 337b6e4ea75..e77d86be875 100644 --- a/tests/unit/test-aio.c +++ b/tests/unit/test-aio.c @@ -100,76 +100,12 @@ static void event_ready_cb(EventNotifier *e) /* Tests using aio_*. */ -typedef struct { - QemuMutex start_lock; - EventNotifier notifier; - bool thread_acquired; -} AcquireTestData; - -static void *test_acquire_thread(void *opaque) -{ - AcquireTestData *data = opaque; - - /* Wait for other thread to let us start */ - qemu_mutex_lock(&data->start_lock); - qemu_mutex_unlock(&data->start_lock); - - /* event_notifier_set might be called either before or after - * the main thread's call to poll(). The test case's outcome - * should be the same in either case. - */ - event_notifier_set(&data->notifier); - aio_context_acquire(ctx); - aio_context_release(ctx); - - data->thread_acquired = true; /* success, we got here */ - - return NULL; -} - static void set_event_notifier(AioContext *nctx, EventNotifier *notifier, EventNotifierHandler *handler) { aio_set_event_notifier(nctx, notifier, handler, NULL, NULL); } -static void dummy_notifier_read(EventNotifier *n) -{ - event_notifier_test_and_clear(n); -} - -static void test_acquire(void) -{ - QemuThread thread; - AcquireTestData data; - - /* Dummy event notifier ensures aio_poll() will block */ - event_notifier_init(&data.notifier, false); - set_event_notifier(ctx, &data.notifier, dummy_notifier_read); - g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */ - - qemu_mutex_init(&data.start_lock); - qemu_mutex_lock(&data.start_lock); - data.thread_acquired = false; - - qemu_thread_create(&thread, "test_acquire_thread", - test_acquire_thread, - &data, QEMU_THREAD_JOINABLE); - - /* Block in aio_poll(), let other thread kick us and acquire context */ - aio_context_acquire(ctx); - qemu_mutex_unlock(&data.start_lock); /* let the thread run */ - g_assert(aio_poll(ctx, true)); - g_assert(!data.thread_acquired); - aio_context_release(ctx); - - qemu_thread_join(&thread); - set_event_notifier(ctx, &data.notifier, NULL); - event_notifier_cleanup(&data.notifier); - - g_assert(data.thread_acquired); -} - static void test_bh_schedule(void) { BHTestData data = { .n = 0 }; @@ -879,7 +815,7 @@ static void test_worker_thread_co_enter(void) qemu_thread_get_self(&this_thread); co = qemu_coroutine_create(co_check_current_thread, &this_thread); - qemu_thread_create(&worker_thread, "test_acquire_thread", + qemu_thread_create(&worker_thread, "test_aio_co_enter", test_aio_co_enter, co, QEMU_THREAD_JOINABLE); @@ -899,7 +835,6 @@ int main(int argc, char **argv) while (g_main_context_iteration(NULL, false)); g_test_init(&argc, &argv, NULL); - g_test_add_func("/aio/acquire", test_acquire); g_test_add_func("/aio/bh/schedule", test_bh_schedule); g_test_add_func("/aio/bh/schedule10", test_bh_schedule10); g_test_add_func("/aio/bh/cancel", test_bh_cancel); diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index 704d1a3f361..666880472b8 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -179,13 +179,7 @@ static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs) static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs) { - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(bdrv_get_aio_context(bs)); - } do_drain_begin(drain_type, bs); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(bdrv_get_aio_context(bs)); - } } static BlockBackend * no_coroutine_fn test_setup(void) @@ -209,13 +203,7 @@ static BlockBackend * no_coroutine_fn test_setup(void) static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs) { - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(bdrv_get_aio_context(bs)); - } do_drain_end(drain_type, bs); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(bdrv_get_aio_context(bs)); - } } /* @@ -520,12 +508,8 @@ static void test_iothread_main_thread_bh(void *opaque) { struct test_iothread_data *data = opaque; - /* Test that the AioContext is not yet locked in a random BH that is - * executed during drain, otherwise this would deadlock. */ - aio_context_acquire(bdrv_get_aio_context(data->bs)); bdrv_flush(data->bs); bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */ - aio_context_release(bdrv_get_aio_context(data->bs)); } /* @@ -567,7 +551,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) blk_set_disable_request_queuing(blk, true); blk_set_aio_context(blk, ctx_a, &error_abort); - aio_context_acquire(ctx_a); s->bh_indirection_ctx = ctx_b; @@ -582,8 +565,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) g_assert(acb != NULL); g_assert_cmpint(aio_ret, ==, -EINPROGRESS); - aio_context_release(ctx_a); - data = (struct test_iothread_data) { .bs = bs, .drain_type = drain_type, @@ -592,10 +573,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) switch (drain_thread) { case 0: - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(ctx_a); - } - /* * Increment in_flight so that do_drain_begin() waits for * test_iothread_main_thread_bh(). This prevents the race between @@ -613,20 +590,10 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) do_drain_begin(drain_type, bs); g_assert_cmpint(bs->in_flight, ==, 0); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(ctx_a); - } qemu_event_wait(&done_event); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(ctx_a); - } g_assert_cmpint(aio_ret, ==, 0); do_drain_end(drain_type, bs); - - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(ctx_a); - } break; case 1: co = qemu_coroutine_create(test_iothread_drain_co_entry, &data); @@ -637,9 +604,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) g_assert_not_reached(); } - aio_context_acquire(ctx_a); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx_a); bdrv_unref(bs); blk_unref(blk); @@ -757,7 +722,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, BlockJob *job; TestBlockJob *tjob; IOThread *iothread = NULL; - AioContext *ctx; int ret; src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR, @@ -787,11 +751,11 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, } if (use_iothread) { + AioContext *ctx; + iothread = iothread_new(); ctx = iothread_get_aio_context(iothread); blk_set_aio_context(blk_src, ctx, &error_abort); - } else { - ctx = qemu_get_aio_context(); } target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR, @@ -800,16 +764,15 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, blk_insert_bs(blk_target, target, &error_abort); blk_set_allow_aio_context_change(blk_target, true); - aio_context_acquire(ctx); tjob = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); tjob->bs = src; job = &tjob->common; - bdrv_graph_wrlock(target); + bdrv_graph_wrlock(); block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); - bdrv_graph_wrunlock(target); + bdrv_graph_wrunlock(); switch (result) { case TEST_JOB_SUCCESS: @@ -821,7 +784,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, tjob->prepare_ret = -EIO; break; } - aio_context_release(ctx); job_start(&job->job); @@ -912,12 +874,10 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, } g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); - aio_context_acquire(ctx); if (use_iothread) { blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); } - aio_context_release(ctx); blk_unref(blk_src); blk_unref(blk_target); @@ -991,11 +951,11 @@ static void bdrv_test_top_close(BlockDriverState *bs) { BdrvChild *c, *next_c; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { bdrv_unref_child(bs, c); } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } static int coroutine_fn GRAPH_RDLOCK @@ -1085,10 +1045,10 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); /* This child will be the one to pass to requests through to, and * it will stall until a drain occurs */ @@ -1096,21 +1056,21 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, &error_abort); child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; /* Takes our reference to child_bs */ - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", &child_of_bds, BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); /* This child is just there to be deleted * (for detach_instead_of_delete == true) */ null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); blk_insert_bs(blk, bs, &error_abort); @@ -1193,14 +1153,14 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque) bdrv_dec_in_flight(data->child_b->bs); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(data->parent_b, data->child_b); bdrv_ref(data->c); data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret) @@ -1298,7 +1258,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb) /* Set child relationships */ bdrv_ref(b); bdrv_ref(a); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds, BDRV_CHILD_DATA, &error_abort); child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds, @@ -1308,7 +1268,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb) bdrv_attach_child(parent_a, a, "PA-A", by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_assert_cmpint(parent_a->refcnt, ==, 1); g_assert_cmpint(parent_b->refcnt, ==, 1); @@ -1401,9 +1361,7 @@ static void test_append_to_drained(void) g_assert_cmpint(base_s->drain_count, ==, 1); g_assert_cmpint(base->in_flight, ==, 0); - aio_context_acquire(qemu_get_aio_context()); bdrv_append(overlay, base, &error_abort); - aio_context_release(qemu_get_aio_context()); g_assert_cmpint(base->in_flight, ==, 0); g_assert_cmpint(overlay->in_flight, ==, 0); @@ -1438,16 +1396,11 @@ static void test_set_aio_context(void) bdrv_drained_begin(bs); bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort); - - aio_context_acquire(ctx_a); bdrv_drained_end(bs); bdrv_drained_begin(bs); bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort); - aio_context_release(ctx_a); - aio_context_acquire(ctx_b); bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort); - aio_context_release(ctx_b); bdrv_drained_end(bs); bdrv_unref(bs); @@ -1638,6 +1591,7 @@ static const BlockJobDriver test_simple_job_driver = { static int drop_intermediate_poll_update_filename(BdrvChild *child, BlockDriverState *new_base, const char *filename, + bool backing_mask_protocol, Error **errp) { /* @@ -1727,7 +1681,7 @@ static void test_drop_intermediate_poll(void) * Establish the chain last, so the chain links are the first * elements in the BDS.parents lists */ - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); for (i = 0; i < 3; i++) { if (i) { /* Takes the reference to chain[i - 1] */ @@ -1735,7 +1689,7 @@ static void test_drop_intermediate_poll(void) &chain_child_class, BDRV_CHILD_COW, &error_abort); } } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); job = block_job_create("job", &test_simple_job_driver, NULL, job_node, 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); @@ -1749,7 +1703,7 @@ static void test_drop_intermediate_poll(void) job->should_complete = true; g_assert(!job_has_completed); - ret = bdrv_drop_intermediate(chain[1], chain[0], NULL); + ret = bdrv_drop_intermediate(chain[1], chain[0], NULL, false); aio_poll(qemu_get_aio_context(), false); g_assert(ret == 0); g_assert(job_has_completed); @@ -1982,10 +1936,10 @@ static void do_test_replace_child_mid_drain(int old_drain_count, new_child_bs->total_sectors = 1; bdrv_ref(old_child_bs); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds, BDRV_CHILD_COW, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); parent_s->setup_completed = true; for (i = 0; i < old_drain_count; i++) { @@ -2016,9 +1970,9 @@ static void do_test_replace_child_mid_drain(int old_drain_count, g_assert(parent_bs->quiesce_counter == old_drain_count); bdrv_drained_begin(old_child_bs); bdrv_drained_begin(new_child_bs); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_replace_node(old_child_bs, new_child_bs, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_drained_end(new_child_bs); bdrv_drained_end(old_child_bs); g_assert(parent_bs->quiesce_counter == new_drain_count); diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c index 074adcbb937..cafc023db42 100644 --- a/tests/unit/test-bdrv-graph-mod.c +++ b/tests/unit/test-bdrv-graph-mod.c @@ -137,15 +137,13 @@ static void test_update_perm_tree(void) blk_insert_bs(root, bs, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(filter, bs, "child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); - aio_context_acquire(qemu_get_aio_context()); ret = bdrv_append(filter, bs, NULL); g_assert_cmpint(ret, <, 0); - aio_context_release(qemu_get_aio_context()); bdrv_unref(filter); blk_unref(root); @@ -206,14 +204,12 @@ static void test_should_update_child(void) bdrv_set_backing_hd(target, bs, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); g_assert(target->backing->bs == bs); bdrv_attach_child(filter, target, "target", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); - aio_context_acquire(qemu_get_aio_context()); + bdrv_graph_wrunlock(); bdrv_append(filter, bs, &error_abort); - aio_context_release(qemu_get_aio_context()); bdrv_graph_rdlock_main_loop(); g_assert(target->backing->bs == bs); @@ -248,7 +244,7 @@ static void test_parallel_exclusive_write(void) bdrv_ref(base); bdrv_ref(fl1); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(top, fl1, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); @@ -260,7 +256,7 @@ static void test_parallel_exclusive_write(void) &error_abort); bdrv_replace_node(fl1, fl2, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_drained_end(fl2); bdrv_drained_end(fl1); @@ -367,7 +363,7 @@ static void test_parallel_perm_update(void) */ bdrv_ref(base); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA, &error_abort); c_fl1 = bdrv_attach_child(ws, fl1, "first", &child_of_bds, @@ -380,7 +376,7 @@ static void test_parallel_perm_update(void) bdrv_attach_child(fl2, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); /* Select fl1 as first child to be active */ s->selected = c_fl1; @@ -434,15 +430,13 @@ static void test_append_greedy_filter(void) BlockDriverState *base = no_perm_node("base"); BlockDriverState *fl = exclusive_writer_node("fl1"); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(top, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); - aio_context_acquire(qemu_get_aio_context()); bdrv_append(fl, base, &error_abort); - aio_context_release(qemu_get_aio_context()); bdrv_unref(fl); bdrv_unref(top); } diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index 9b15d2768cc..3766d5de6be 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -483,7 +483,6 @@ static void test_sync_op(const void *opaque) bdrv_graph_rdunlock_main_loop(); blk_set_aio_context(blk, ctx, &error_abort); - aio_context_acquire(ctx); if (t->fn) { t->fn(c); } @@ -491,7 +490,6 @@ static void test_sync_op(const void *opaque) t->blkfn(blk); } blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); bdrv_unref(bs); blk_unref(blk); @@ -576,9 +574,7 @@ static void test_attach_blockjob(void) aio_poll(qemu_get_aio_context(), false); } - aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); tjob->n = 0; while (tjob->n == 0) { @@ -595,9 +591,7 @@ static void test_attach_blockjob(void) WITH_JOB_LOCK_GUARD() { job_complete_sync_locked(&tjob->common.job, &error_abort); } - aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); bdrv_unref(bs); blk_unref(blk); @@ -654,9 +648,7 @@ static void test_propagate_basic(void) /* Switch the AioContext back */ main_ctx = qemu_get_aio_context(); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == main_ctx); g_assert(bdrv_get_aio_context(bs_a) == main_ctx); g_assert(bdrv_get_aio_context(bs_verify) == main_ctx); @@ -732,9 +724,7 @@ static void test_propagate_diamond(void) /* Switch the AioContext back */ main_ctx = qemu_get_aio_context(); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == main_ctx); g_assert(bdrv_get_aio_context(bs_verify) == main_ctx); g_assert(bdrv_get_aio_context(bs_a) == main_ctx); @@ -764,13 +754,11 @@ static void test_propagate_mirror(void) &error_abort); /* Start a mirror job */ - aio_context_acquire(main_ctx); mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0, MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT, false, "filter_node", MIRROR_COPY_MODE_BACKGROUND, &error_abort); - aio_context_release(main_ctx); WITH_JOB_LOCK_GUARD() { job = job_get_locked("job0"); @@ -785,9 +773,7 @@ static void test_propagate_mirror(void) g_assert(job->aio_context == ctx); /* Change the AioContext of target */ - aio_context_acquire(ctx); bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort); - aio_context_release(ctx); g_assert(bdrv_get_aio_context(src) == main_ctx); g_assert(bdrv_get_aio_context(target) == main_ctx); g_assert(bdrv_get_aio_context(filter) == main_ctx); @@ -805,10 +791,8 @@ static void test_propagate_mirror(void) g_assert(bdrv_get_aio_context(filter) == main_ctx); /* ...unless we explicitly allow it */ - aio_context_acquire(ctx); blk_set_allow_aio_context_change(blk, true); bdrv_try_change_aio_context(target, ctx, NULL, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(src) == ctx); @@ -817,10 +801,8 @@ static void test_propagate_mirror(void) job_cancel_sync_all(); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort); - aio_context_release(ctx); blk_unref(blk); bdrv_unref(src); @@ -836,7 +818,6 @@ static void test_attach_second_node(void) BlockDriverState *bs, *filter; QDict *options; - aio_context_acquire(main_ctx); blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); blk_insert_bs(blk, bs, &error_abort); @@ -846,15 +827,12 @@ static void test_attach_second_node(void) qdict_put_str(options, "file", "base"); filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); - aio_context_release(main_ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == ctx); g_assert(bdrv_get_aio_context(filter) == ctx); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == main_ctx); g_assert(bdrv_get_aio_context(bs) == main_ctx); g_assert(bdrv_get_aio_context(filter) == main_ctx); @@ -868,11 +846,9 @@ static void test_attach_preserve_blk_ctx(void) { IOThread *iothread = iothread_new(); AioContext *ctx = iothread_get_aio_context(iothread); - AioContext *main_ctx = qemu_get_aio_context(); BlockBackend *blk; BlockDriverState *bs; - aio_context_acquire(main_ctx); blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); bs->total_sectors = 65536 / BDRV_SECTOR_SIZE; @@ -881,25 +857,18 @@ static void test_attach_preserve_blk_ctx(void) blk_insert_bs(blk, bs, &error_abort); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == ctx); - aio_context_release(main_ctx); /* Remove the node again */ - aio_context_acquire(ctx); blk_remove_bs(blk); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context()); /* Re-attach the node */ - aio_context_acquire(main_ctx); blk_insert_bs(blk, bs, &error_abort); - aio_context_release(main_ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == ctx); - aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); bdrv_unref(bs); blk_unref(blk); } diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index a130f6fefba..fe3e0d2d38c 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -228,7 +228,6 @@ static void cancel_common(CancelJob *s) BlockJob *job = &s->common; BlockBackend *blk = s->blk; JobStatus sts = job->job.status; - AioContext *ctx = job->job.aio_context; job_cancel_sync(&job->job, true); WITH_JOB_LOCK_GUARD() { @@ -240,9 +239,7 @@ static void cancel_common(CancelJob *s) job_unref_locked(&job->job); } - aio_context_acquire(ctx); destroy_blk(blk); - aio_context_release(ctx); } @@ -391,132 +388,6 @@ static void test_cancel_concluded(void) cancel_common(s); } -/* (See test_yielding_driver for the job description) */ -typedef struct YieldingJob { - BlockJob common; - bool should_complete; -} YieldingJob; - -static void yielding_job_complete(Job *job, Error **errp) -{ - YieldingJob *s = container_of(job, YieldingJob, common.job); - s->should_complete = true; - job_enter(job); -} - -static int coroutine_fn yielding_job_run(Job *job, Error **errp) -{ - YieldingJob *s = container_of(job, YieldingJob, common.job); - - job_transition_to_ready(job); - - while (!s->should_complete) { - job_yield(job); - } - - return 0; -} - -/* - * This job transitions immediately to the READY state, and then - * yields until it is to complete. - */ -static const BlockJobDriver test_yielding_driver = { - .job_driver = { - .instance_size = sizeof(YieldingJob), - .free = block_job_free, - .user_resume = block_job_user_resume, - .run = yielding_job_run, - .complete = yielding_job_complete, - }, -}; - -/* - * Test that job_complete_locked() works even on jobs that are in a paused - * state (i.e., STANDBY). - * - * To do this, run YieldingJob in an IO thread, get it into the READY - * state, then have a drained section. Before ending the section, - * acquire the context so the job will not be entered and will thus - * remain on STANDBY. - * - * job_complete_locked() should still work without error. - * - * Note that on the QMP interface, it is impossible to lock an IO - * thread before a drained section ends. In practice, the - * bdrv_drain_all_end() and the aio_context_acquire() will be - * reversed. However, that makes for worse reproducibility here: - * Sometimes, the job would no longer be in STANDBY then but already - * be started. We cannot prevent that, because the IO thread runs - * concurrently. We can only prevent it by taking the lock before - * ending the drained section, so we do that. - * - * (You can reverse the order of operations and most of the time the - * test will pass, but sometimes the assert(status == STANDBY) will - * fail.) - */ -static void test_complete_in_standby(void) -{ - BlockBackend *blk; - IOThread *iothread; - AioContext *ctx; - Job *job; - BlockJob *bjob; - - /* Create a test drive, move it to an IO thread */ - blk = create_blk(NULL); - iothread = iothread_new(); - - ctx = iothread_get_aio_context(iothread); - blk_set_aio_context(blk, ctx, &error_abort); - - /* Create our test job */ - bjob = mk_job(blk, "job", &test_yielding_driver, true, - JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); - job = &bjob->job; - assert_job_status_is(job, JOB_STATUS_CREATED); - - /* Wait for the job to become READY */ - job_start(job); - /* - * Here we are waiting for the status to change, so don't bother - * protecting the read every time. - */ - AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY); - - /* Begin the drained section, pausing the job */ - bdrv_drain_all_begin(); - assert_job_status_is(job, JOB_STATUS_STANDBY); - - /* Lock the IO thread to prevent the job from being run */ - aio_context_acquire(ctx); - /* This will schedule the job to resume it */ - bdrv_drain_all_end(); - aio_context_release(ctx); - - WITH_JOB_LOCK_GUARD() { - /* But the job cannot run, so it will remain on standby */ - assert(job->status == JOB_STATUS_STANDBY); - - /* Even though the job is on standby, this should work */ - job_complete_locked(job, &error_abort); - - /* The test is done now, clean up. */ - job_finish_sync_locked(job, NULL, &error_abort); - assert(job->status == JOB_STATUS_PENDING); - - job_finalize_locked(job, &error_abort); - assert(job->status == JOB_STATUS_CONCLUDED); - - job_dismiss_locked(&job, &error_abort); - } - - aio_context_acquire(ctx); - destroy_blk(blk); - aio_context_release(ctx); - iothread_join(iothread); -} - int main(int argc, char **argv) { qemu_init_main_loop(&error_abort); @@ -531,13 +402,5 @@ int main(int argc, char **argv) g_test_add_func("/blockjob/cancel/standby", test_cancel_standby); g_test_add_func("/blockjob/cancel/pending", test_cancel_pending); g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded); - - /* - * This test is flaky and sometimes fails in CI and otherwise: - * don't run unless user opts in via environment variable. - */ - if (getenv("QEMU_TEST_FLAKY_TESTS")) { - g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby); - } return g_test_run(); } diff --git a/tests/unit/test-char.c b/tests/unit/test-char.c index 649fdf64e19..f273ce52261 100644 --- a/tests/unit/test-char.c +++ b/tests/unit/test-char.c @@ -556,7 +556,7 @@ static int make_udp_socket(int *port) socklen_t alen = sizeof(addr); int ret, sock = qemu_socket(PF_INET, SOCK_DGRAM, 0); - g_assert_cmpint(sock, >, 0); + g_assert_cmpint(sock, >=, 0); addr.sin_family = AF_INET ; addr.sin_addr.s_addr = htonl(INADDR_ANY); addr.sin_port = 0; @@ -1203,6 +1203,30 @@ static void char_serial_test(void) } #endif +#if defined(HAVE_CHARDEV_PARALLEL) && !defined(WIN32) +static void char_parallel_test(void) +{ + QemuOpts *opts; + Chardev *chr; + + opts = qemu_opts_create(qemu_find_opts("chardev"), "parallel-id", + 1, &error_abort); + qemu_opt_set(opts, "backend", "parallel", &error_abort); + qemu_opt_set(opts, "path", "/dev/null", &error_abort); + + chr = qemu_chr_new_from_opts(opts, NULL, NULL); +#ifdef __linux__ + /* fails to PPCLAIM, see qemu_chr_open_pp_fd() */ + g_assert_null(chr); +#else + g_assert_nonnull(chr); + object_unparent(OBJECT(chr)); +#endif + + qemu_opts_del(opts); +} +#endif + #ifndef _WIN32 static void char_file_fifo_test(void) { @@ -1383,7 +1407,7 @@ static void char_hotswap_test(void) int port; int sock = make_udp_socket(&port); - g_assert_cmpint(sock, >, 0); + g_assert_cmpint(sock, >=, 0); chr_args = g_strdup_printf("udp:127.0.0.1:%d", port); @@ -1544,6 +1568,9 @@ int main(int argc, char **argv) g_test_add_func("/char/udp", char_udp_test); #if defined(HAVE_CHARDEV_SERIAL) && !defined(WIN32) g_test_add_func("/char/serial", char_serial_test); +#endif +#if defined(HAVE_CHARDEV_PARALLEL) && !defined(WIN32) + g_test_add_func("/char/parallel", char_parallel_test); #endif g_test_add_func("/char/hotswap", char_hotswap_test); g_test_add_func("/char/websocket", char_websock_test); diff --git a/tests/unit/test-crypto-block.c b/tests/unit/test-crypto-block.c index 347cd5f3d79..6cfc817a92e 100644 --- a/tests/unit/test-crypto-block.c +++ b/tests/unit/test-crypto-block.c @@ -283,6 +283,7 @@ static void test_block(gconstpointer opaque) test_block_init_func, test_block_write_func, &header, + 0, &error_abort); g_assert(blk); @@ -362,6 +363,7 @@ test_luks_bad_header(gconstpointer data) test_block_init_func, test_block_write_func, &buf, + 0, &error_abort); g_assert(blk); diff --git a/tests/unit/test-crypto-cipher.c b/tests/unit/test-crypto-cipher.c index d9d9d078ff1..f5152e569dd 100644 --- a/tests/unit/test-crypto-cipher.c +++ b/tests/unit/test-crypto-cipher.c @@ -382,6 +382,19 @@ static QCryptoCipherTestData test_data[] = { .plaintext = "90afe91bb288544f2c32dc239b2635e6", .ciphertext = "6cb4561c40bf0a9705931cb6d408e7fa", }, +#ifdef CONFIG_CRYPTO_SM4 + { + /* SM4, GB/T 32907-2016, Appendix A.1 */ + .path = "/crypto/cipher/sm4", + .alg = QCRYPTO_CIPHER_ALG_SM4, + .mode = QCRYPTO_CIPHER_MODE_ECB, + .key = "0123456789abcdeffedcba9876543210", + .plaintext = + "0123456789abcdeffedcba9876543210", + .ciphertext = + "681edf34d206965e86b3e94f536e4246", + }, +#endif { /* #1 32 byte key, 32 byte PTX */ .path = "/crypto/cipher/aes-xts-128-1", @@ -663,9 +676,8 @@ static void test_cipher(const void *opaque) cipher = qcrypto_cipher_new( data->alg, data->mode, key, nkey, - &err); + data->plaintext ? &error_abort : &err); if (data->plaintext) { - g_assert(err == NULL); g_assert(cipher != NULL); } else { error_free_or_abort(&err); @@ -809,6 +821,10 @@ int main(int argc, char **argv) for (i = 0; i < G_N_ELEMENTS(test_data); i++) { if (qcrypto_cipher_supports(test_data[i].alg, test_data[i].mode)) { g_test_add_data_func(test_data[i].path, &test_data[i], test_cipher); + } else { + g_printerr("# skip unsupported %s:%s\n", + QCryptoCipherAlgorithm_str(test_data[i].alg), + QCryptoCipherMode_str(test_data[i].mode)); } } diff --git a/tests/unit/test-io-task.c b/tests/unit/test-io-task.c index 953a50ae66e..115dba89702 100644 --- a/tests/unit/test-io-task.c +++ b/tests/unit/test-io-task.c @@ -25,7 +25,7 @@ #include "qapi/error.h" #include "qemu/module.h" -#define TYPE_DUMMY "qemu:dummy" +#define TYPE_DUMMY "qemu-dummy" typedef struct DummyObject DummyObject; typedef struct DummyObjectClass DummyObjectClass; diff --git a/tests/unit/test-iov.c b/tests/unit/test-iov.c index 6f7623d3107..75bc3be0057 100644 --- a/tests/unit/test-iov.c +++ b/tests/unit/test-iov.c @@ -197,15 +197,17 @@ static void test_io(void) s = g_test_rand_int_range(0, j - k + 1); r = iov_send(sv[1], iov, niov, k, s); g_assert(memcmp(iov, siov, sizeof(*iov)*niov) == 0); - if (r >= 0) { - k += r; - usleep(g_test_rand_int_range(0, 30)); - } else if (errno == EAGAIN) { - select(sv[1]+1, NULL, &fds, NULL, NULL); - continue; - } else { - perror("send"); - exit(1); + if (r < 0) { + if (errno == EAGAIN) { + r = 0; + } else { + perror("send"); + exit(1); + } + } + k += r; + if (k < j) { + select(sv[1] + 1, NULL, &fds, NULL, NULL); } } while(k < j); } diff --git a/tests/unit/test-qga.c b/tests/unit/test-qga.c index 671e83cb86c..8cddf5dc37b 100644 --- a/tests/unit/test-qga.c +++ b/tests/unit/test-qga.c @@ -822,7 +822,7 @@ static void test_qga_guest_exec(gconstpointer fix) /* exec 'echo foo bar' */ ret = qmp_fd(fixture->fd, "{'execute': 'guest-exec', 'arguments': {" - " 'path': '/bin/echo', 'arg': [ '-n', '\" test_str \"' ]," + " 'path': 'echo', 'arg': [ '-n', '\" test_str \"' ]," " 'capture-output': true } }"); g_assert_nonnull(ret); qmp_assert_no_error(ret); @@ -883,7 +883,7 @@ static void test_qga_guest_exec_separated(gconstpointer fix) /* exec 'echo foo bar' */ ret = qmp_fd(fixture->fd, "{'execute': 'guest-exec', 'arguments': {" - " 'path': '/bin/bash'," + " 'path': 'bash'," " 'arg': [ '-c', 'for i in $(seq 4); do if (( $i %% 2 )); then echo stdout; else echo stderr 1>&2; fi; done;' ]," " 'capture-output': 'separated' } }"); g_assert_nonnull(ret); @@ -924,7 +924,7 @@ static void test_qga_guest_exec_merged(gconstpointer fix) /* exec 'echo foo bar' */ ret = qmp_fd(fixture->fd, "{'execute': 'guest-exec', 'arguments': {" - " 'path': '/bin/bash'," + " 'path': 'bash'," " 'arg': [ '-c', 'for i in $(seq 4); do if (( $i %% 2 )); then echo stdout; else echo stderr 1>&2; fi; done;' ]," " 'capture-output': 'merged' } }"); g_assert_nonnull(ret); diff --git a/tests/unit/test-qmp-event.c b/tests/unit/test-qmp-event.c index 3626d2372f1..08e95a382bd 100644 --- a/tests/unit/test-qmp-event.c +++ b/tests/unit/test-qmp-event.c @@ -24,19 +24,15 @@ #include "test-qapi-events.h" #include "test-qapi-emit-events.h" -typedef struct TestEventData { - QDict *expect; - bool emitted; -} TestEventData; - -TestEventData *test_event_data; -static GMutex test_event_lock; +static QDict *expected_event; void test_qapi_event_emit(test_QAPIEvent event, QDict *d) { QDict *t; int64_t s, ms; + g_assert(expected_event); + /* Verify that we have timestamp, then remove it to compare other fields */ t = qdict_get_qdict(d, "timestamp"); g_assert(t); @@ -52,71 +48,38 @@ void test_qapi_event_emit(test_QAPIEvent event, QDict *d) qdict_del(d, "timestamp"); - g_assert(qobject_is_equal(QOBJECT(d), QOBJECT(test_event_data->expect))); - test_event_data->emitted = true; -} - -static void event_prepare(TestEventData *data, - const void *unused) -{ - /* Global variable test_event_data was used to pass the expectation, so - test cases can't be executed at same time. */ - g_mutex_lock(&test_event_lock); - test_event_data = data; -} - -static void event_teardown(TestEventData *data, - const void *unused) -{ - test_event_data = NULL; - g_mutex_unlock(&test_event_lock); + g_assert(qobject_is_equal(QOBJECT(d), QOBJECT(expected_event))); + qobject_unref(expected_event); + expected_event = NULL; } -static void event_test_add(const char *testpath, - void (*test_func)(TestEventData *data, - const void *user_data)) +static void test_event_a(void) { - g_test_add(testpath, TestEventData, NULL, event_prepare, test_func, - event_teardown); -} - - -/* Test cases */ - -static void test_event_a(TestEventData *data, - const void *unused) -{ - data->expect = qdict_from_jsonf_nofail("{ 'event': 'EVENT_A' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'EVENT_A' }"); qapi_event_send_event_a(); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } -static void test_event_b(TestEventData *data, - const void *unused) +static void test_event_b(void) { - data->expect = qdict_from_jsonf_nofail("{ 'event': 'EVENT_B' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'EVENT_B' }"); qapi_event_send_event_b(); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } -static void test_event_c(TestEventData *data, - const void *unused) +static void test_event_c(void) { UserDefOne b = { .integer = 2, .string = (char *)"test1" }; - data->expect = qdict_from_jsonf_nofail( + expected_event = qdict_from_jsonf_nofail( "{ 'event': 'EVENT_C', 'data': {" " 'a': 1, 'b': { 'integer': 2, 'string': 'test1' }, 'c': 'test2' } }"); qapi_event_send_event_c(true, 1, &b, "test2"); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } /* Complex type */ -static void test_event_d(TestEventData *data, - const void *unused) +static void test_event_d(void) { UserDefOne struct1 = { .integer = 2, .string = (char *)"test1", @@ -129,65 +92,56 @@ static void test_event_d(TestEventData *data, .enum2 = ENUM_ONE_VALUE2, }; - data->expect = qdict_from_jsonf_nofail( + expected_event = qdict_from_jsonf_nofail( "{ 'event': 'EVENT_D', 'data': {" " 'a': {" " 'struct1': { 'integer': 2, 'string': 'test1', 'enum1': 'value1' }," " 'string': 'test2', 'enum2': 'value2' }," " 'b': 'test3', 'enum3': 'value3' } }"); qapi_event_send_event_d(&a, "test3", NULL, true, ENUM_ONE_VALUE3); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } -static void test_event_deprecated(TestEventData *data, const void *unused) +static void test_event_deprecated(void) { - data->expect = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES1' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES1' }"); memset(&compat_policy, 0, sizeof(compat_policy)); qapi_event_send_test_event_features1(); - g_assert(data->emitted); + g_assert(!expected_event); compat_policy.has_deprecated_output = true; compat_policy.deprecated_output = COMPAT_POLICY_OUTPUT_HIDE; - data->emitted = false; qapi_event_send_test_event_features1(); - g_assert(!data->emitted); - - qobject_unref(data->expect); } -static void test_event_deprecated_data(TestEventData *data, const void *unused) +static void test_event_deprecated_data(void) { memset(&compat_policy, 0, sizeof(compat_policy)); - data->expect = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0'," + expected_event = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0'," " 'data': { 'foo': 42 } }"); qapi_event_send_test_event_features0(42); - g_assert(data->emitted); + g_assert(!expected_event); - qobject_unref(data->expect); compat_policy.has_deprecated_output = true; compat_policy.deprecated_output = COMPAT_POLICY_OUTPUT_HIDE; - data->expect = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0' }"); qapi_event_send_test_event_features0(42); - g_assert(data->emitted); - - qobject_unref(data->expect); } int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); - event_test_add("/event/event_a", test_event_a); - event_test_add("/event/event_b", test_event_b); - event_test_add("/event/event_c", test_event_c); - event_test_add("/event/event_d", test_event_d); - event_test_add("/event/deprecated", test_event_deprecated); - event_test_add("/event/deprecated_data", test_event_deprecated_data); + g_test_add_func("/event/event_a", test_event_a); + g_test_add_func("/event/event_b", test_event_b); + g_test_add_func("/event/event_c", test_event_c); + g_test_add_func("/event/event_d", test_event_d); + g_test_add_func("/event/deprecated", test_event_deprecated); + g_test_add_func("/event/deprecated_data", test_event_deprecated_data); g_test_run(); return 0; diff --git a/tests/unit/test-replication.c b/tests/unit/test-replication.c index afff908d77a..5d2003b8ced 100644 --- a/tests/unit/test-replication.c +++ b/tests/unit/test-replication.c @@ -199,17 +199,13 @@ static BlockBackend *start_primary(void) static void teardown_primary(void) { BlockBackend *blk; - AioContext *ctx; /* remove P_ID */ blk = blk_by_name(P_ID); assert(blk); - ctx = blk_get_aio_context(blk); - aio_context_acquire(ctx); monitor_remove_blk(blk); blk_unref(blk); - aio_context_release(ctx); } static void test_primary_read(void) @@ -345,27 +341,20 @@ static void teardown_secondary(void) { /* only need to destroy two BBs */ BlockBackend *blk; - AioContext *ctx; /* remove S_LOCAL_DISK_ID */ blk = blk_by_name(S_LOCAL_DISK_ID); assert(blk); - ctx = blk_get_aio_context(blk); - aio_context_acquire(ctx); monitor_remove_blk(blk); blk_unref(blk); - aio_context_release(ctx); /* remove S_ID */ blk = blk_by_name(S_ID); assert(blk); - ctx = blk_get_aio_context(blk); - aio_context_acquire(ctx); monitor_remove_blk(blk); blk_unref(blk); - aio_context_release(ctx); } static void test_secondary_read(void) diff --git a/tests/unit/test-smp-parse.c b/tests/unit/test-smp-parse.c index 24972666a74..8994337e12c 100644 --- a/tests/unit/test-smp-parse.c +++ b/tests/unit/test-smp-parse.c @@ -20,8 +20,8 @@ #define T true #define F false -#define MIN_CPUS 1 /* set the min CPUs supported by the machine as 1 */ -#define MAX_CPUS 512 /* set the max CPUs supported by the machine as 512 */ +#define MIN_CPUS 1 /* set the min CPUs supported by the machine as 1 */ +#define MAX_CPUS 4096 /* set the max CPUs supported by the machine as 4096 */ #define SMP_MACHINE_NAME "TEST-SMP" @@ -75,6 +75,40 @@ .has_maxcpus = hf, .maxcpus = f, \ } +/* + * Currently a 5-level topology hierarchy is supported on s390 ccw machines + * -drawers/books/sockets/cores/threads + */ +#define SMP_CONFIG_WITH_BOOKS_DRAWERS(ha, a, hb, b, hc, c, hd, \ + d, he, e, hf, f, hg, g) \ + { \ + .has_cpus = ha, .cpus = a, \ + .has_drawers = hb, .drawers = b, \ + .has_books = hc, .books = c, \ + .has_sockets = hd, .sockets = d, \ + .has_cores = he, .cores = e, \ + .has_threads = hf, .threads = f, \ + .has_maxcpus = hg, .maxcpus = g, \ + } + +/* + * Currently QEMU supports up to a 7-level topology hierarchy, which is the + * QEMU's unified abstract representation of CPU topology. + * -drawers/books/sockets/dies/clusters/cores/threads + */ +#define SMP_CONFIG_WITH_FULL_TOPO(a, b, c, d, e, f, g, h, i) \ + { \ + .has_cpus = true, .cpus = a, \ + .has_drawers = true, .drawers = b, \ + .has_books = true, .books = c, \ + .has_sockets = true, .sockets = d, \ + .has_dies = true, .dies = e, \ + .has_clusters = true, .clusters = f, \ + .has_cores = true, .cores = g, \ + .has_threads = true, .threads = h, \ + .has_maxcpus = true, .maxcpus = i, \ + } + /** * @config - the given SMP configuration * @expect_prefer_sockets - the expected parsing result for the @@ -308,6 +342,16 @@ static const struct SMPTestData data_generic_invalid[] = { /* config: -smp 2,clusters=2 */ .config = SMP_CONFIG_WITH_CLUSTERS(T, 2, F, 0, T, 2, F, 0, F, 0, F, 0), .expect_error = "clusters not supported by this machine's CPU topology", + }, { + /* config: -smp 2,books=2 */ + .config = SMP_CONFIG_WITH_BOOKS_DRAWERS(T, 2, F, 0, T, 2, F, + 0, F, 0, F, 0, F, 0), + .expect_error = "books not supported by this machine's CPU topology", + }, { + /* config: -smp 2,drawers=2 */ + .config = SMP_CONFIG_WITH_BOOKS_DRAWERS(T, 2, T, 2, F, 0, F, + 0, F, 0, F, 0, F, 0), + .expect_error = "drawers not supported by this machine's CPU topology", }, { /* config: -smp 8,sockets=2,cores=4,threads=2,maxcpus=8 */ .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, T, 2, T, 8), @@ -323,17 +367,23 @@ static const struct SMPTestData data_generic_invalid[] = { "sockets (2) * cores (4) * threads (2) " "== maxcpus (16) < smp_cpus (18)", }, { - /* config: -smp 1 - * should tweak the supported min CPUs to 2 for testing */ - .config = SMP_CONFIG_GENERIC(T, 1, F, 0, F, 0, F, 0, F, 0), + /* + * config: -smp 1 + * The test machine should tweak the supported min CPUs to + * 2 (MIN_CPUS + 1) for testing. + */ + .config = SMP_CONFIG_GENERIC(T, MIN_CPUS, F, 0, F, 0, F, 0, F, 0), .expect_error = "Invalid SMP CPUs 1. The min CPUs supported " "by machine '" SMP_MACHINE_NAME "' is 2", }, { - /* config: -smp 512 - * should tweak the supported max CPUs to 511 for testing */ - .config = SMP_CONFIG_GENERIC(T, 512, F, 0, F, 0, F, 0, F, 0), - .expect_error = "Invalid SMP CPUs 512. The max CPUs supported " - "by machine '" SMP_MACHINE_NAME "' is 511", + /* + * config: -smp 4096 + * The test machine should tweak the supported max CPUs to + * 4095 (MAX_CPUS - 1) for testing. + */ + .config = SMP_CONFIG_GENERIC(T, 4096, F, 0, F, 0, F, 0, F, 0), + .expect_error = "Invalid SMP CPUs 4096. The max CPUs supported " + "by machine '" SMP_MACHINE_NAME "' is 4095", }, }; @@ -373,11 +423,199 @@ static const struct SMPTestData data_with_clusters_invalid[] = { }, }; +static const struct SMPTestData data_with_books_invalid[] = { + { + /* config: -smp 16,books=2,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_WITH_BOOKS_DRAWERS(T, 16, F, 1, T, 2, T, + 2, T, 4, T, 2, T, 16), + .expect_error = "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "books (2) * sockets (2) * cores (4) * threads (2) " + "!= maxcpus (16)", + }, { + /* config: -smp 34,books=2,sockets=2,cores=4,threads=2,maxcpus=32 */ + .config = SMP_CONFIG_WITH_BOOKS_DRAWERS(T, 34, F, 1, T, 2, T, + 2, T, 4, T, 2, T, 32), + .expect_error = "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "books (2) * sockets (2) * cores (4) * threads (2) " + "== maxcpus (32) < smp_cpus (34)", + }, +}; + +static const struct SMPTestData data_with_drawers_invalid[] = { + { + /* config: -smp 16,drawers=2,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_WITH_BOOKS_DRAWERS(T, 16, T, 2, F, 1, T, + 2, T, 4, T, 2, T, 16), + .expect_error = "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "drawers (2) * sockets (2) * cores (4) * threads (2) " + "!= maxcpus (16)", + }, { + /* config: -smp 34,drawers=2,sockets=2,cores=4,threads=2,maxcpus=32 */ + .config = SMP_CONFIG_WITH_BOOKS_DRAWERS(T, 34, T, 2, F, 1, T, + 2, T, 4, T, 2, T, 32), + .expect_error = "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "drawers (2) * sockets (2) * cores (4) * threads (2) " + "== maxcpus (32) < smp_cpus (34)", + }, +}; + +static const struct SMPTestData data_with_drawers_books_invalid[] = { + { + /* + * config: -smp 200,drawers=2,books=2,sockets=2,cores=4,\ + * threads=2,maxcpus=200 + */ + .config = SMP_CONFIG_WITH_BOOKS_DRAWERS(T, 200, T, 3, T, 5, T, + 2, T, 4, T, 2, T, 200), + .expect_error = "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "drawers (3) * books (5) * sockets (2) * " + "cores (4) * threads (2) != maxcpus (200)", + }, { + /* + * config: -smp 242,drawers=2,books=2,sockets=2,cores=4,\ + * threads=2,maxcpus=240 + */ + .config = SMP_CONFIG_WITH_BOOKS_DRAWERS(T, 242, T, 3, T, 5, T, + 2, T, 4, T, 2, T, 240), + .expect_error = "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "drawers (3) * books (5) * sockets (2) * " + "cores (4) * threads (2) " + "== maxcpus (240) < smp_cpus (242)", + }, +}; + +static const struct SMPTestData data_full_topo_invalid[] = { + { + /* + * config: -smp 200,drawers=3,books=5,sockets=2,dies=4,\ + * clusters=2,cores=7,threads=2,maxcpus=200 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(200, 3, 5, 2, 4, 2, 7, 2, 200), + .expect_error = "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "drawers (3) * books (5) * sockets (2) * dies (4) * " + "clusters (2) * cores (7) * threads (2) " + "!= maxcpus (200)", + }, { + /* + * config: -smp 3361,drawers=3,books=5,sockets=2,dies=4,\ + * clusters=2,cores=7,threads=2,maxcpus=3360 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(3361, 3, 5, 2, 4, 2, 7, 2, 3360), + .expect_error = "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "drawers (3) * books (5) * sockets (2) * dies (4) * " + "clusters (2) * cores (7) * threads (2) " + "== maxcpus (3360) < smp_cpus (3361)", + }, { + /* + * config: -smp 1,drawers=3,books=5,sockets=2,dies=4,\ + * clusters=2,cores=7,threads=3,maxcpus=5040 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(3361, 3, 5, 2, 4, 2, 7, 3, 5040), + .expect_error = "Invalid SMP CPUs 5040. The max CPUs supported " + "by machine '" SMP_MACHINE_NAME "' is 4096", + }, +}; + +static const struct SMPTestData data_zero_topo_invalid[] = { + { + /* + * Test "cpus=0". + * config: -smp 0,drawers=1,books=1,sockets=1,dies=1,\ + * clusters=1,cores=1,threads=1,maxcpus=1 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(0, 1, 1, 1, 1, 1, 1, 1, 1), + .expect_error = "Invalid CPU topology: CPU topology parameters must " + "be greater than zero", + }, { + /* + * Test "drawers=0". + * config: -smp 1,drawers=0,books=1,sockets=1,dies=1,\ + * clusters=1,cores=1,threads=1,maxcpus=1 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(1, 0, 1, 1, 1, 1, 1, 1, 1), + .expect_error = "Invalid CPU topology: CPU topology parameters must " + "be greater than zero", + }, { + /* + * Test "books=0". + * config: -smp 1,drawers=1,books=0,sockets=1,dies=1,\ + * clusters=1,cores=1,threads=1,maxcpus=1 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(1, 1, 0, 1, 1, 1, 1, 1, 1), + .expect_error = "Invalid CPU topology: CPU topology parameters must " + "be greater than zero", + }, { + /* + * Test "sockets=0". + * config: -smp 1,drawers=1,books=1,sockets=0,dies=1,\ + * clusters=1,cores=1,threads=1,maxcpus=1 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(1, 1, 1, 0, 1, 1, 1, 1, 1), + .expect_error = "Invalid CPU topology: CPU topology parameters must " + "be greater than zero", + }, { + /* + * Test "dies=0". + * config: -smp 1,drawers=1,books=1,sockets=1,dies=0,\ + * clusters=1,cores=1,threads=1,maxcpus=1 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(1, 1, 1, 1, 0, 1, 1, 1, 1), + .expect_error = "Invalid CPU topology: CPU topology parameters must " + "be greater than zero", + }, { + /* + * Test "clusters=0". + * config: -smp 1,drawers=1,books=1,sockets=1,dies=1,\ + * clusters=0,cores=1,threads=1,maxcpus=1 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(1, 1, 1, 1, 1, 0, 1, 1, 1), + .expect_error = "Invalid CPU topology: CPU topology parameters must " + "be greater than zero", + }, { + /* + * Test "cores=0". + * config: -smp 1,drawers=1,books=1,sockets=1,dies=1,\ + * clusters=1,cores=0,threads=1,maxcpus=1 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(1, 1, 1, 1, 1, 1, 0, 1, 1), + .expect_error = "Invalid CPU topology: CPU topology parameters must " + "be greater than zero", + }, { + /* + * Test "threads=0". + * config: -smp 1,drawers=1,books=1,sockets=1,dies=1,\ + * clusters=1,cores=1,threads=0,maxcpus=1 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(1, 1, 1, 1, 1, 1, 1, 0, 1), + .expect_error = "Invalid CPU topology: CPU topology parameters must " + "be greater than zero", + }, { + /* + * Test "maxcpus=0". + * config: -smp 1,drawers=1,books=1,sockets=1,dies=1,\ + * clusters=1,cores=1,threads=1,maxcpus=0 + */ + .config = SMP_CONFIG_WITH_FULL_TOPO(1, 1, 1, 1, 1, 1, 1, 1, 0), + .expect_error = "Invalid CPU topology: CPU topology parameters must " + "be greater than zero", + }, +}; + static char *smp_config_to_string(const SMPConfiguration *config) { return g_strdup_printf( "(SMPConfiguration) {\n" " .has_cpus = %5s, cpus = %" PRId64 ",\n" + " .has_drawers = %5s, drawers = %" PRId64 ",\n" + " .has_books = %5s, books = %" PRId64 ",\n" " .has_sockets = %5s, sockets = %" PRId64 ",\n" " .has_dies = %5s, dies = %" PRId64 ",\n" " .has_clusters = %5s, clusters = %" PRId64 ",\n" @@ -386,6 +624,8 @@ static char *smp_config_to_string(const SMPConfiguration *config) " .has_maxcpus = %5s, maxcpus = %" PRId64 ",\n" "}", config->has_cpus ? "true" : "false", config->cpus, + config->has_drawers ? "true" : "false", config->drawers, + config->has_books ? "true" : "false", config->books, config->has_sockets ? "true" : "false", config->sockets, config->has_dies ? "true" : "false", config->dies, config->has_clusters ? "true" : "false", config->clusters, @@ -398,10 +638,10 @@ static char *smp_config_to_string(const SMPConfiguration *config) static unsigned int cpu_topology_get_threads_per_socket(const CpuTopology *topo) { /* Check the divisor to avoid invalid topology examples causing SIGFPE. */ - if (!topo->sockets) { + if (!topo->drawers || !topo->books || !topo->sockets) { return 0; } else { - return topo->max_cpus / topo->sockets; + return topo->max_cpus / topo->drawers / topo->books / topo->sockets; } } @@ -418,11 +658,14 @@ static unsigned int cpu_topology_get_cores_per_socket(const CpuTopology *topo) static char *cpu_topology_to_string(const CpuTopology *topo, unsigned int threads_per_socket, - unsigned int cores_per_socket) + unsigned int cores_per_socket, + bool has_clusters) { return g_strdup_printf( "(CpuTopology) {\n" " .cpus = %u,\n" + " .drawers = %u,\n" + " .books = %u,\n" " .sockets = %u,\n" " .dies = %u,\n" " .clusters = %u,\n" @@ -431,16 +674,20 @@ static char *cpu_topology_to_string(const CpuTopology *topo, " .max_cpus = %u,\n" " .threads_per_socket = %u,\n" " .cores_per_socket = %u,\n" + " .has_clusters = %s,\n" "}", - topo->cpus, topo->sockets, topo->dies, topo->clusters, + topo->cpus, topo->drawers, topo->books, + topo->sockets, topo->dies, topo->clusters, topo->cores, topo->threads, topo->max_cpus, - threads_per_socket, cores_per_socket); + threads_per_socket, cores_per_socket, + has_clusters ? "true" : "false"); } static void check_parse(MachineState *ms, const SMPConfiguration *config, const CpuTopology *expect_topo, const char *expect_err, bool is_valid) { + MachineClass *mc = MACHINE_GET_CLASS(ms); g_autofree char *config_str = smp_config_to_string(config); g_autofree char *expect_topo_str = NULL, *output_topo_str = NULL; unsigned int expect_threads_per_socket, expect_cores_per_socket; @@ -453,20 +700,25 @@ static void check_parse(MachineState *ms, const SMPConfiguration *config, cpu_topology_get_cores_per_socket(expect_topo); expect_topo_str = cpu_topology_to_string(expect_topo, expect_threads_per_socket, - expect_cores_per_socket); + expect_cores_per_socket, + config->has_clusters); /* call the generic parser */ machine_parse_smp_config(ms, config, &err); ms_threads_per_socket = machine_topo_get_threads_per_socket(ms); ms_cores_per_socket = machine_topo_get_cores_per_socket(ms); - output_topo_str = cpu_topology_to_string(&ms->smp, ms_threads_per_socket, - ms_cores_per_socket); + output_topo_str = cpu_topology_to_string(&ms->smp, + ms_threads_per_socket, + ms_cores_per_socket, + mc->smp_props.has_clusters); /* when the configuration is supposed to be valid */ if (is_valid) { if ((err == NULL) && (ms->smp.cpus == expect_topo->cpus) && + (ms->smp.drawers == expect_topo->drawers) && + (ms->smp.books == expect_topo->books) && (ms->smp.sockets == expect_topo->sockets) && (ms->smp.dies == expect_topo->dies) && (ms->smp.clusters == expect_topo->clusters) && @@ -474,7 +726,8 @@ static void check_parse(MachineState *ms, const SMPConfiguration *config, (ms->smp.threads == expect_topo->threads) && (ms->smp.max_cpus == expect_topo->max_cpus) && (ms_threads_per_socket == expect_threads_per_socket) && - (ms_cores_per_socket == expect_cores_per_socket)) { + (ms_cores_per_socket == expect_cores_per_socket) && + (mc->smp_props.has_clusters == config->has_clusters)) { return; } @@ -558,6 +811,16 @@ static void unsupported_params_init(const MachineClass *mc, SMPTestData *data) data->expect_prefer_sockets.clusters = 1; data->expect_prefer_cores.clusters = 1; } + + if (!mc->smp_props.books_supported) { + data->expect_prefer_sockets.books = 1; + data->expect_prefer_cores.books = 1; + } + + if (!mc->smp_props.drawers_supported) { + data->expect_prefer_sockets.drawers = 1; + data->expect_prefer_cores.drawers = 1; + } } static void machine_base_class_init(ObjectClass *oc, void *data) @@ -575,8 +838,8 @@ static void machine_generic_invalid_class_init(ObjectClass *oc, void *data) MachineClass *mc = MACHINE_CLASS(oc); /* Force invalid min CPUs and max CPUs */ - mc->min_cpus = 2; - mc->max_cpus = 511; + mc->min_cpus = MIN_CPUS + 1; + mc->max_cpus = MAX_CPUS - 1; } static void machine_with_dies_class_init(ObjectClass *oc, void *data) @@ -593,6 +856,38 @@ static void machine_with_clusters_class_init(ObjectClass *oc, void *data) mc->smp_props.clusters_supported = true; } +static void machine_with_books_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->smp_props.books_supported = true; +} + +static void machine_with_drawers_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->smp_props.drawers_supported = true; +} + +static void machine_with_drawers_books_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->smp_props.drawers_supported = true; + mc->smp_props.books_supported = true; +} + +static void machine_full_topo_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->smp_props.drawers_supported = true; + mc->smp_props.books_supported = true; + mc->smp_props.dies_supported = true; + mc->smp_props.clusters_supported = true; +} + static void test_generic_valid(const void *opaque) { const char *machine_type = opaque; @@ -607,11 +902,6 @@ static void test_generic_valid(const void *opaque) unsupported_params_init(mc, &data); smp_parse_test(ms, &data, true); - - /* Unsupported parameters can be provided with their values as 1 */ - data.config.has_dies = true; - data.config.dies = 1; - smp_parse_test(ms, &data, true); } object_unref(obj); @@ -736,6 +1026,248 @@ static void test_with_clusters(const void *opaque) object_unref(obj); } +static void test_with_books(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData data = {}; + unsigned int num_books = 2; + int i; + + for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { + data = data_generic_valid[i]; + unsupported_params_init(mc, &data); + + /* when books parameter is omitted, it will be set as 1 */ + data.expect_prefer_sockets.books = 1; + data.expect_prefer_cores.books = 1; + + smp_parse_test(ms, &data, true); + + /* when books parameter is specified */ + data.config.has_books = true; + data.config.books = num_books; + if (data.config.has_cpus) { + data.config.cpus *= num_books; + } + if (data.config.has_maxcpus) { + data.config.maxcpus *= num_books; + } + + data.expect_prefer_sockets.books = num_books; + data.expect_prefer_sockets.cpus *= num_books; + data.expect_prefer_sockets.max_cpus *= num_books; + data.expect_prefer_cores.books = num_books; + data.expect_prefer_cores.cpus *= num_books; + data.expect_prefer_cores.max_cpus *= num_books; + + smp_parse_test(ms, &data, true); + } + + for (i = 0; i < ARRAY_SIZE(data_with_books_invalid); i++) { + data = data_with_books_invalid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, false); + } + + object_unref(obj); +} + +static void test_with_drawers(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData data = {}; + unsigned int num_drawers = 2; + int i; + + for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { + data = data_generic_valid[i]; + unsupported_params_init(mc, &data); + + /* when drawers parameter is omitted, it will be set as 1 */ + data.expect_prefer_sockets.drawers = 1; + data.expect_prefer_cores.drawers = 1; + + smp_parse_test(ms, &data, true); + + /* when drawers parameter is specified */ + data.config.has_drawers = true; + data.config.drawers = num_drawers; + if (data.config.has_cpus) { + data.config.cpus *= num_drawers; + } + if (data.config.has_maxcpus) { + data.config.maxcpus *= num_drawers; + } + + data.expect_prefer_sockets.drawers = num_drawers; + data.expect_prefer_sockets.cpus *= num_drawers; + data.expect_prefer_sockets.max_cpus *= num_drawers; + data.expect_prefer_cores.drawers = num_drawers; + data.expect_prefer_cores.cpus *= num_drawers; + data.expect_prefer_cores.max_cpus *= num_drawers; + + smp_parse_test(ms, &data, true); + } + + for (i = 0; i < ARRAY_SIZE(data_with_drawers_invalid); i++) { + data = data_with_drawers_invalid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, false); + } + + object_unref(obj); +} + +static void test_with_drawers_books(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData data = {}; + unsigned int num_drawers = 5, num_books = 3; + int i; + + for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { + data = data_generic_valid[i]; + unsupported_params_init(mc, &data); + + /* + * when drawers and books parameters are omitted, they will + * be both set as 1. + */ + data.expect_prefer_sockets.drawers = 1; + data.expect_prefer_sockets.books = 1; + data.expect_prefer_cores.drawers = 1; + data.expect_prefer_cores.books = 1; + + smp_parse_test(ms, &data, true); + + /* when drawers and books parameters are both specified */ + data.config.has_drawers = true; + data.config.drawers = num_drawers; + data.config.has_books = true; + data.config.books = num_books; + + if (data.config.has_cpus) { + data.config.cpus *= num_drawers * num_books; + } + if (data.config.has_maxcpus) { + data.config.maxcpus *= num_drawers * num_books; + } + + data.expect_prefer_sockets.drawers = num_drawers; + data.expect_prefer_sockets.books = num_books; + data.expect_prefer_sockets.cpus *= num_drawers * num_books; + data.expect_prefer_sockets.max_cpus *= num_drawers * num_books; + + data.expect_prefer_cores.drawers = num_drawers; + data.expect_prefer_cores.books = num_books; + data.expect_prefer_cores.cpus *= num_drawers * num_books; + data.expect_prefer_cores.max_cpus *= num_drawers * num_books; + + smp_parse_test(ms, &data, true); + } + + for (i = 0; i < ARRAY_SIZE(data_with_drawers_books_invalid); i++) { + data = data_with_drawers_books_invalid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, false); + } + + object_unref(obj); +} + +static void test_full_topo(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData data = {}; + unsigned int drawers = 5, books = 3, dies = 2, clusters = 7, multiplier; + int i; + + multiplier = drawers * books * dies * clusters; + for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { + data = data_generic_valid[i]; + unsupported_params_init(mc, &data); + + /* + * when drawers, books, dies and clusters parameters are omitted, + * they will be set as 1. + */ + data.expect_prefer_sockets.drawers = 1; + data.expect_prefer_sockets.books = 1; + data.expect_prefer_sockets.dies = 1; + data.expect_prefer_sockets.clusters = 1; + data.expect_prefer_cores.drawers = 1; + data.expect_prefer_cores.books = 1; + data.expect_prefer_cores.dies = 1; + data.expect_prefer_cores.clusters = 1; + + smp_parse_test(ms, &data, true); + + /* when drawers, books, dies and clusters parameters are specified. */ + data.config.has_drawers = true; + data.config.drawers = drawers; + data.config.has_books = true; + data.config.books = books; + data.config.has_dies = true; + data.config.dies = dies; + data.config.has_clusters = true; + data.config.clusters = clusters; + + if (data.config.has_cpus) { + data.config.cpus *= multiplier; + } + if (data.config.has_maxcpus) { + data.config.maxcpus *= multiplier; + } + + data.expect_prefer_sockets.drawers = drawers; + data.expect_prefer_sockets.books = books; + data.expect_prefer_sockets.dies = dies; + data.expect_prefer_sockets.clusters = clusters; + data.expect_prefer_sockets.cpus *= multiplier; + data.expect_prefer_sockets.max_cpus *= multiplier; + + data.expect_prefer_cores.drawers = drawers; + data.expect_prefer_cores.books = books; + data.expect_prefer_cores.dies = dies; + data.expect_prefer_cores.clusters = clusters; + data.expect_prefer_cores.cpus *= multiplier; + data.expect_prefer_cores.max_cpus *= multiplier; + + smp_parse_test(ms, &data, true); + } + + for (i = 0; i < ARRAY_SIZE(data_full_topo_invalid); i++) { + data = data_full_topo_invalid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, false); + } + + for (i = 0; i < ARRAY_SIZE(data_zero_topo_invalid); i++) { + data = data_zero_topo_invalid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, false); + } + + object_unref(obj); +} + /* Type info of the tested machine */ static const TypeInfo smp_machine_types[] = { { @@ -760,6 +1292,22 @@ static const TypeInfo smp_machine_types[] = { .name = MACHINE_TYPE_NAME("smp-with-clusters"), .parent = TYPE_MACHINE, .class_init = machine_with_clusters_class_init, + }, { + .name = MACHINE_TYPE_NAME("smp-with-books"), + .parent = TYPE_MACHINE, + .class_init = machine_with_books_class_init, + }, { + .name = MACHINE_TYPE_NAME("smp-with-drawers"), + .parent = TYPE_MACHINE, + .class_init = machine_with_drawers_class_init, + }, { + .name = MACHINE_TYPE_NAME("smp-with-drawers-books"), + .parent = TYPE_MACHINE, + .class_init = machine_with_drawers_books_class_init, + }, { + .name = MACHINE_TYPE_NAME("smp-full-topo"), + .parent = TYPE_MACHINE, + .class_init = machine_full_topo_class_init, } }; @@ -783,6 +1331,18 @@ int main(int argc, char *argv[]) g_test_add_data_func("/test-smp-parse/with_clusters", MACHINE_TYPE_NAME("smp-with-clusters"), test_with_clusters); + g_test_add_data_func("/test-smp-parse/with_books", + MACHINE_TYPE_NAME("smp-with-books"), + test_with_books); + g_test_add_data_func("/test-smp-parse/with_drawers", + MACHINE_TYPE_NAME("smp-with-drawers"), + test_with_drawers); + g_test_add_data_func("/test-smp-parse/with_drawers_books", + MACHINE_TYPE_NAME("smp-with-drawers-books"), + test_with_drawers_books); + g_test_add_data_func("/test-smp-parse/full", + MACHINE_TYPE_NAME("smp-full-topo"), + test_full_topo); g_test_run(); diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c index 2146cfacd36..24032a02667 100644 --- a/tests/unit/test-throttle.c +++ b/tests/unit/test-throttle.c @@ -127,13 +127,13 @@ static void test_compute_wait(void) bkt.avg = 10; bkt.max = 200; for (i = 0; i < 22; i++) { - double units = bkt.max / 10; + double units = bkt.max / 10.0; bkt.level += units; bkt.burst_level += units; throttle_leak_bucket(&bkt, NANOSECONDS_PER_SECOND / 10); wait = throttle_compute_wait(&bkt); g_assert(double_cmp(bkt.burst_level, 0)); - g_assert(double_cmp(bkt.level, (i + 1) * (bkt.max - bkt.avg) / 10)); + g_assert(double_cmp(bkt.level, (i + 1) * (bkt.max - bkt.avg) / 10.0)); /* We can do bursts for the 2 seconds we have configured in * burst_length. We have 100 extra milliseconds of burst * because bkt.level has been leaking during this time. diff --git a/tests/unit/test-util-filemonitor.c b/tests/unit/test-util-filemonitor.c index a22de275955..02e67fc96ac 100644 --- a/tests/unit/test-util-filemonitor.c +++ b/tests/unit/test-util-filemonitor.c @@ -360,6 +360,14 @@ test_file_monitor_events(void) { .type = QFILE_MONITOR_TEST_OP_EVENT, .filesrc = "one.txt", .watchid = &watch4, .eventid = QFILE_MONITOR_EVENT_DELETED }, +#ifdef __FreeBSD__ + { .type = QFILE_MONITOR_TEST_OP_EVENT, + .filesrc = "two.txt", .watchid = &watch0, + .eventid = QFILE_MONITOR_EVENT_DELETED }, + { .type = QFILE_MONITOR_TEST_OP_EVENT, + .filesrc = "two.txt", .watchid = &watch2, + .eventid = QFILE_MONITOR_EVENT_DELETED }, +#endif { .type = QFILE_MONITOR_TEST_OP_EVENT, .filesrc = "two.txt", .watchid = &watch0, .eventid = QFILE_MONITOR_EVENT_CREATED }, diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c index 0b7d5ecd683..63f28f26f45 100644 --- a/tests/unit/test-vmstate.c +++ b/tests/unit/test-vmstate.c @@ -24,7 +24,6 @@ #include "qemu/osdep.h" -#include "../migration/migration.h" #include "migration/vmstate.h" #include "migration/qemu-file-types.h" #include "../migration/qemu-file.h" @@ -197,7 +196,7 @@ static const VMStateDescription vmstate_simple_primitive = { .name = "simple/primitive", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(b_1, TestSimple), VMSTATE_BOOL(b_2, TestSimple), VMSTATE_UINT8(u8_1, TestSimple), @@ -299,7 +298,7 @@ static const VMStateDescription vmstate_simple_arr = { .name = "simple/array", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_ARRAY(u16_1, TestSimpleArray, 3), VMSTATE_END_OF_LIST() } @@ -341,7 +340,7 @@ static const VMStateDescription vmstate_versioned = { .name = "test/versioned", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(a, TestStruct), VMSTATE_UINT32_V(b, TestStruct, 2), /* Versioned field in the middle, so * we catch bugs more easily. @@ -412,7 +411,7 @@ static const VMStateDescription vmstate_skipping = { .name = "test/skip", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(a, TestStruct), VMSTATE_UINT32(b, TestStruct), VMSTATE_UINT32_TEST(c, TestStruct, test_skip), @@ -524,7 +523,7 @@ const VMStateDescription vmsd_tst = { .name = "test/tst", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(i, TestStructTriv), VMSTATE_END_OF_LIST() } @@ -542,7 +541,7 @@ const VMStateDescription vmsd_arps = { .name = "test/arps", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(ar, TestArrayOfPtrToStuct, AR_SIZE, 0, vmsd_tst, TestStructTriv), VMSTATE_END_OF_LIST() @@ -630,7 +629,7 @@ const VMStateDescription vmsd_arpp = { .name = "test/arps", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_OF_POINTER(ar, TestArrayOfPtrToInt, AR_SIZE, 0, vmstate_info_int32, int32_t*), VMSTATE_END_OF_LIST() @@ -685,7 +684,7 @@ static const VMStateDescription vmstate_q_element = { .name = "test/queue-element", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(b, TestQtailqElement), VMSTATE_UINT8(u8, TestQtailqElement), VMSTATE_END_OF_LIST() @@ -696,7 +695,7 @@ static const VMStateDescription vmstate_q = { .name = "test/queue", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT16(i16, TestQtailq), VMSTATE_QTAILQ_V(q, TestQtailq, 1, vmstate_q_element, TestQtailqElement, next), @@ -821,7 +820,7 @@ typedef struct TestGTreeInterval { .name = "interval", \ .version_id = 1, \ .minimum_version_id = 1, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT64(low, TestGTreeInterval), \ VMSTATE_UINT64(high, TestGTreeInterval), \ VMSTATE_END_OF_LIST() \ @@ -839,7 +838,7 @@ typedef struct TestGTreeMapping { .name = "mapping", \ .version_id = 1, \ .minimum_version_id = 1, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT64(phys_addr, TestGTreeMapping), \ VMSTATE_UINT32(flags, TestGTreeMapping), \ VMSTATE_END_OF_LIST() \ @@ -915,7 +914,7 @@ static const VMStateDescription vmstate_domain = { .version_id = 1, .minimum_version_id = 1, .pre_load = domain_preload, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(id, TestGTreeDomain), VMSTATE_GTREE_V(mappings, TestGTreeDomain, 1, vmstate_interval_mapping, @@ -940,7 +939,7 @@ static const VMStateDescription vmstate_qlist_element = { .name = "test/queue list", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, TestQListElement), VMSTATE_END_OF_LIST() } @@ -951,7 +950,7 @@ static const VMStateDescription vmstate_iommu = { .version_id = 1, .minimum_version_id = 1, .pre_load = iommu_preload, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(id, TestGTreeIOMMU), VMSTATE_GTREE_DIRECT_KEY_V(domains, TestGTreeIOMMU, 1, &vmstate_domain, TestGTreeDomain), @@ -963,7 +962,7 @@ static const VMStateDescription vmstate_container = { .name = "test/container/qlist", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, TestQListContainer), VMSTATE_QLIST_V(list, TestQListContainer, 1, vmstate_qlist_element, TestQListElement, next), @@ -1414,7 +1413,7 @@ static int tmp_child_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_tmp_back_to_parent = { .name = "test/tmp_child_parent", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(f, TestStruct), VMSTATE_END_OF_LIST() } @@ -1424,7 +1423,7 @@ static const VMStateDescription vmstate_tmp_child = { .name = "test/tmp_child", .pre_save = tmp_child_pre_save, .post_load = tmp_child_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(diff, TmpTestStruct), VMSTATE_STRUCT_POINTER(parent, TmpTestStruct, vmstate_tmp_back_to_parent, TestStruct), @@ -1435,7 +1434,7 @@ static const VMStateDescription vmstate_tmp_child = { static const VMStateDescription vmstate_with_tmp = { .name = "test/with_tmp", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(a, TestStruct), VMSTATE_UINT64(d, TestStruct), VMSTATE_WITH_TMP(TestStruct, TmpTestStruct, vmstate_tmp_child), diff --git a/tests/vm/basevm.py b/tests/vm/basevm.py index e38159a6fda..4a1af04b9a7 100644 --- a/tests/vm/basevm.py +++ b/tests/vm/basevm.py @@ -423,6 +423,8 @@ def console_ssh_init(self, prompt, user, pw): def console_sshd_config(self, prompt): self.console_wait(prompt) self.console_send("echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config\n") + self.console_wait(prompt) + self.console_send("echo 'UseDNS no' >> /etc/ssh/sshd_config\n") for var in self.envvars: self.console_wait(prompt) self.console_send("echo 'AcceptEnv %s' >> /etc/ssh/sshd_config\n" % var) @@ -604,7 +606,7 @@ def get_default_jobs(): parser.add_argument("--build-qemu", help="build QEMU from source in guest") parser.add_argument("--build-target", - help="QEMU build target", default="check") + help="QEMU build target", default="all check") parser.add_argument("--build-path", default=None, help="Path of build directory, "\ "for using build tree QEMU binary. ") diff --git a/tests/vm/freebsd b/tests/vm/freebsd index b581bd17fb7..1247f40a385 100755 --- a/tests/vm/freebsd +++ b/tests/vm/freebsd @@ -108,6 +108,7 @@ class FreeBSDVM(basevm.BaseVM): prompt = "root@freebsd:~ #" self.console_ssh_init(prompt, "root", self._config["root_pass"]) self.console_sshd_config(prompt) + self.console_wait_send(prompt, "service sshd reload\n") # setup virtio-blk #1 (tarfile) self.console_wait(prompt) diff --git a/tests/vm/netbsd b/tests/vm/netbsd index 649fcad3538..a3f6dd6b3c8 100755 --- a/tests/vm/netbsd +++ b/tests/vm/netbsd @@ -31,7 +31,6 @@ class NetBSDVM(basevm.BaseVM): "pkgconf", "xz", "python311", - "py311-expat", "ninja-build", # gnu tools diff --git a/tools/ebpf/rss.bpf.c b/tools/ebpf/rss.bpf.c index 20f227e2acc..9715d1170e4 100644 --- a/tools/ebpf/rss.bpf.c +++ b/tools/ebpf/rss.bpf.c @@ -81,6 +81,7 @@ struct { __uint(key_size, sizeof(__u32)); __uint(value_size, sizeof(struct rss_config_t)); __uint(max_entries, 1); + __uint(map_flags, BPF_F_MMAPABLE); } tap_rss_map_configurations SEC(".maps"); struct { @@ -88,6 +89,7 @@ struct { __uint(key_size, sizeof(__u32)); __uint(value_size, sizeof(struct toeplitz_key_data_t)); __uint(max_entries, 1); + __uint(map_flags, BPF_F_MMAPABLE); } tap_rss_map_toeplitz_key SEC(".maps"); struct { @@ -95,6 +97,7 @@ struct { __uint(key_size, sizeof(__u32)); __uint(value_size, sizeof(__u16)); __uint(max_entries, INDIRECTION_TABLE_SIZE); + __uint(map_flags, BPF_F_MMAPABLE); } tap_rss_map_indirection_table SEC(".maps"); static inline void net_rx_rss_add_chunk(__u8 *rss_input, size_t *bytes_written, @@ -317,7 +320,7 @@ static inline int parse_packet(struct __sk_buff *skb, info->in_src = ip.saddr; info->in_dst = ip.daddr; - info->is_fragmented = !!ip.frag_off; + info->is_fragmented = !!(bpf_ntohs(ip.frag_off) & (0x2000 | 0x1fff)); l4_protocol = ip.protocol; l4_offset = ip.ihl * 4; @@ -528,7 +531,7 @@ static inline __u32 calculate_rss_hash(struct __sk_buff *skb, return result; } -SEC("tun_rss_steering") +SEC("socket") int tun_rss_steering_prog(struct __sk_buff *skb) { diff --git a/trace/meson.build b/trace/meson.build index b0d31a67e68..c3412dc0ba5 100644 --- a/trace/meson.build +++ b/trace/meson.build @@ -64,7 +64,7 @@ trace_events_all = custom_target('trace-events-all', input: trace_events_files, command: [ 'cat', '@INPUT@' ], capture: true, - install: true, + install: get_option('trace_backends') != [ 'nop' ], install_dir: qemu_datadir) if 'ust' in get_option('trace_backends') diff --git a/ui/cocoa.m b/ui/cocoa.m index cd069da6965..25e0db9dd0b 100644 --- a/ui/cocoa.m +++ b/ui/cocoa.m @@ -54,6 +54,10 @@ #define MAC_OS_X_VERSION_10_13 101300 #endif +#ifndef MAC_OS_VERSION_14_0 +#define MAC_OS_VERSION_14_0 140000 +#endif + /* 10.14 deprecates NSOnState and NSOffState in favor of * NSControlStateValueOn/Off, which were introduced in 10.13. * Define for older versions @@ -89,7 +93,6 @@ static void cocoa_switch(DisplayChangeListener *dcl, static void cocoa_refresh(DisplayChangeListener *dcl); -static NSWindow *normalWindow; static const DisplayChangeListenerOps dcl_ops = { .dpy_name = "cocoa", .dpy_gfx_update = cocoa_update, @@ -99,12 +102,12 @@ static void cocoa_switch(DisplayChangeListener *dcl, static DisplayChangeListener dcl = { .ops = &dcl_ops, }; -static int last_buttons; +static QKbdState *kbd; static int cursor_hide = 1; static int left_command_key_enabled = 1; static bool swap_opt_cmd; -static bool stretch_video; +static CGInterpolationQuality zoom_interpolation = kCGInterpolationNone; static NSTextField *pauseLabel; static bool allow_events; @@ -113,33 +116,33 @@ static void cocoa_switch(DisplayChangeListener *dcl, static QemuClipboardInfo *cbinfo; static QemuEvent cbevent; -// Utility functions to run specified code block with iothread lock held +// Utility functions to run specified code block with the BQL held typedef void (^CodeBlock)(void); typedef bool (^BoolCodeBlock)(void); -static void with_iothread_lock(CodeBlock block) +static void with_bql(CodeBlock block) { - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); if (!locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } block(); if (!locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } } -static bool bool_with_iothread_lock(BoolCodeBlock block) +static bool bool_with_bql(BoolCodeBlock block) { - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); bool val; if (!locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } val = block(); if (!locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } return val; } @@ -305,19 +308,14 @@ static void handleAnyDeviceErrors(Error * err) @interface QemuCocoaView : NSView { QEMUScreen screen; - NSWindow *fullScreenWindow; - float cx,cy,cw,ch,cdx,cdy; pixman_image_t *pixman_image; - QKbdState *kbd; BOOL isMouseGrabbed; - BOOL isFullscreen; BOOL isAbsoluteEnabled; CFMachPortRef eventsTap; } - (void) switchSurface:(pixman_image_t *)image; - (void) grabMouse; - (void) ungrabMouse; -- (void) toggleFullScreen:(id)sender; - (void) setFullGrab:(id)sender; - (void) handleMonitorInput:(NSEvent *)event; - (bool) handleEvent:(NSEvent *)event; @@ -333,8 +331,6 @@ - (void) setAbsoluteEnabled:(BOOL)tIsAbsoluteEnabled; */ - (BOOL) isMouseGrabbed; - (BOOL) isAbsoluteEnabled; -- (float) cdx; -- (float) cdy; - (QEMUScreen) gscreen; - (void) raiseAllKeys; @end @@ -362,9 +358,24 @@ - (id)initWithFrame:(NSRect)frameRect self = [super initWithFrame:frameRect]; if (self) { + NSTrackingAreaOptions options = NSTrackingActiveInKeyWindow | + NSTrackingMouseEnteredAndExited | + NSTrackingMouseMoved | + NSTrackingInVisibleRect; + + NSTrackingArea *trackingArea = + [[NSTrackingArea alloc] initWithRect:CGRectZero + options:options + owner:self + userInfo:nil]; + + [self addTrackingArea:trackingArea]; + [trackingArea release]; screen.width = frameRect.size.width; screen.height = frameRect.size.height; - kbd = qkbd_state_init(dcl.con); +#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_14_0 + [self setClipsToBounds:YES]; +#endif } return self; @@ -378,8 +389,6 @@ - (void) dealloc pixman_image_unref(pixman_image); } - qkbd_state_free(kbd); - if (eventsTap) { CFRelease(eventsTap); } @@ -392,44 +401,23 @@ - (BOOL) isOpaque return YES; } -- (BOOL) screenContainsPoint:(NSPoint) p +- (void) viewDidMoveToWindow { - return (p.x > -1 && p.x < screen.width && p.y > -1 && p.y < screen.height); + [self resizeWindow]; } -/* Get location of event and convert to virtual screen coordinate */ -- (CGPoint) screenLocationOfEvent:(NSEvent *)ev +- (void) selectConsoleLocked:(unsigned int)index { - NSWindow *eventWindow = [ev window]; - // XXX: Use CGRect and -convertRectFromScreen: to support macOS 10.10 - CGRect r = CGRectZero; - r.origin = [ev locationInWindow]; - if (!eventWindow) { - if (!isFullscreen) { - return [[self window] convertRectFromScreen:r].origin; - } else { - CGPoint locationInSelfWindow = [[self window] convertRectFromScreen:r].origin; - CGPoint loc = [self convertPoint:locationInSelfWindow fromView:nil]; - if (stretch_video) { - loc.x /= cdx; - loc.y /= cdy; - } - return loc; - } - } else if ([[self window] isEqual:eventWindow]) { - if (!isFullscreen) { - return r.origin; - } else { - CGPoint loc = [self convertPoint:r.origin fromView:nil]; - if (stretch_video) { - loc.x /= cdx; - loc.y /= cdy; - } - return loc; - } - } else { - return [[self window] convertRectFromScreen:[eventWindow convertRectToScreen:r]].origin; + QemuConsole *con = qemu_console_lookup_by_index(index); + if (!con) { + return; } + + unregister_displaychangelistener(&dcl); + qkbd_state_switch_console(kbd, con); + dcl.con = con; + register_displaychangelistener(&dcl); + [self updateUIInfo]; } - (void) hideCursor @@ -455,7 +443,7 @@ - (void) drawRect:(NSRect) rect // get CoreGraphic context CGContextRef viewContextRef = [[NSGraphicsContext currentContext] CGContext]; - CGContextSetInterpolationQuality (viewContextRef, kCGInterpolationNone); + CGContextSetInterpolationQuality (viewContextRef, zoom_interpolation); CGContextSetShouldAntialias (viewContextRef, NO); // draw screen bitmap directly to Core Graphics context @@ -497,10 +485,8 @@ - (void) drawRect:(NSRect) rect [self getRectsBeingDrawn:&rectList count:&rectCount]; for (i = 0; i < rectCount; i++) { - clipRect.origin.x = rectList[i].origin.x / cdx; - clipRect.origin.y = (float)h - (rectList[i].origin.y + rectList[i].size.height) / cdy; - clipRect.size.width = rectList[i].size.width / cdx; - clipRect.size.height = rectList[i].size.height / cdy; + clipRect = rectList[i]; + clipRect.origin.y = (float)h - (clipRect.origin.y + clipRect.size.height); clipImageRef = CGImageCreateWithImageInRect( imageRef, clipRect @@ -513,42 +499,75 @@ - (void) drawRect:(NSRect) rect } } -- (void) setContentDimensions +- (NSSize)fixAspectRatio:(NSSize)max { - COCOA_DEBUG("QemuCocoaView: setContentDimensions\n"); + NSSize scaled; + NSSize fixed; - if (isFullscreen) { - cdx = [[NSScreen mainScreen] frame].size.width / (float)screen.width; - cdy = [[NSScreen mainScreen] frame].size.height / (float)screen.height; + scaled.width = screen.width * max.height; + scaled.height = screen.height * max.width; - /* stretches video, but keeps same aspect ratio */ - if (stretch_video == true) { - /* use smallest stretch value - prevents clipping on sides */ - if (MIN(cdx, cdy) == cdx) { - cdy = cdx; - } else { - cdx = cdy; - } - } else { /* No stretching */ - cdx = cdy = 1; - } - cw = screen.width * cdx; - ch = screen.height * cdy; - cx = ([[NSScreen mainScreen] frame].size.width - cw) / 2.0; - cy = ([[NSScreen mainScreen] frame].size.height - ch) / 2.0; + /* + * Here screen is our guest's output size, and max is the size of the + * largest possible area of the screen we can display on. + * We want to scale up (screen.width x screen.height) by either: + * 1) max.height / screen.height + * 2) max.width / screen.width + * With the first scale factor the scale will result in an output height of + * max.height (i.e. we will fill the whole height of the available screen + * space and have black bars left and right) and with the second scale + * factor the scaling will result in an output width of max.width (i.e. we + * fill the whole width of the available screen space and have black bars + * top and bottom). We need to pick whichever keeps the whole of the guest + * output on the screen, which is to say the smaller of the two scale + * factors. + * To avoid doing more division than strictly necessary, instead of directly + * comparing scale factors 1 and 2 we instead calculate and compare those + * two scale factors multiplied by (screen.height * screen.width). + */ + if (scaled.width < scaled.height) { + fixed.width = scaled.width / screen.height; + fixed.height = max.height; } else { - cx = 0; - cy = 0; - cw = screen.width; - ch = screen.height; - cdx = 1.0; - cdy = 1.0; + fixed.width = max.width; + fixed.height = scaled.height / screen.width; } + + return fixed; +} + +- (NSSize) screenSafeAreaSize +{ + NSSize size = [[[self window] screen] frame].size; + NSEdgeInsets insets = [[[self window] screen] safeAreaInsets]; + size.width -= insets.left + insets.right; + size.height -= insets.top + insets.bottom; + return size; +} + +- (void) resizeWindow +{ + [[self window] setContentAspectRatio:NSMakeSize(screen.width, screen.height)]; + + if (!([[self window] styleMask] & NSWindowStyleMaskResizable)) { + [[self window] setContentSize:NSMakeSize(screen.width, screen.height)]; + [[self window] center]; + } else if ([[self window] styleMask] & NSWindowStyleMaskFullScreen) { + [[self window] setContentSize:[self fixAspectRatio:[self screenSafeAreaSize]]]; + [[self window] center]; + } else { + [[self window] setContentSize:[self fixAspectRatio:[self frame].size]]; + } +} + +- (void) updateBounds +{ + [self setBoundsSize:NSMakeSize(screen.width, screen.height)]; } - (void) updateUIInfoLocked { - /* Must be called with the iothread lock, i.e. via updateUIInfo */ + /* Must be called with the BQL, i.e. via updateUIInfo */ NSSize frameSize; QemuUIInfo info; @@ -561,9 +580,10 @@ - (void) updateUIInfoLocked CGDirectDisplayID display = [[description objectForKey:@"NSScreenNumber"] unsignedIntValue]; NSSize screenSize = [[[self window] screen] frame].size; CGSize screenPhysicalSize = CGDisplayScreenSize(display); + bool isFullscreen = ([[self window] styleMask] & NSWindowStyleMaskFullScreen) != 0; CVDisplayLinkRef displayLink; - frameSize = isFullscreen ? screenSize : [self frame].size; + frameSize = isFullscreen ? [self screenSafeAreaSize] : [self frame].size; if (!CVDisplayLinkCreateWithCGDisplay(display, &displayLink)) { CVTime period = CVDisplayLinkGetNominalOutputVideoRefreshPeriod(displayLink); @@ -605,36 +625,25 @@ - (void) updateUIInfo return; } - with_iothread_lock(^{ + with_bql(^{ [self updateUIInfoLocked]; }); } -- (void)viewDidMoveToWindow -{ - [self updateUIInfo]; -} - - (void) switchSurface:(pixman_image_t *)image { COCOA_DEBUG("QemuCocoaView: switchSurface\n"); int w = pixman_image_get_width(image); int h = pixman_image_get_height(image); - /* cdx == 0 means this is our very first surface, in which case we need - * to recalculate the content dimensions even if it happens to be the size - * of the initial empty window. - */ - bool isResize = (w != screen.width || h != screen.height || cdx == 0.0); - int oldh = screen.height; - if (isResize) { + if (w != screen.width || h != screen.height) { // Resize before we trigger the redraw, or we'll redraw at the wrong size COCOA_DEBUG("switchSurface: new size %d x %d\n", w, h); screen.width = w; screen.height = h; - [self setContentDimensions]; - [self setFrame:NSMakeRect(cx, cy, cw, ch)]; + [self resizeWindow]; + [self updateBounds]; } // update screenBuffer @@ -643,51 +652,6 @@ - (void) switchSurface:(pixman_image_t *)image } pixman_image = image; - - // update windows - if (isFullscreen) { - [[fullScreenWindow contentView] setFrame:[[NSScreen mainScreen] frame]]; - [normalWindow setFrame:NSMakeRect([normalWindow frame].origin.x, [normalWindow frame].origin.y - h + oldh, w, h + [normalWindow frame].size.height - oldh) display:NO animate:NO]; - } else { - if (qemu_name) - [normalWindow setTitle:[NSString stringWithFormat:@"QEMU %s", qemu_name]]; - [normalWindow setFrame:NSMakeRect([normalWindow frame].origin.x, [normalWindow frame].origin.y - h + oldh, w, h + [normalWindow frame].size.height - oldh) display:YES animate:NO]; - } - - if (isResize) { - [normalWindow center]; - } -} - -- (void) toggleFullScreen:(id)sender -{ - COCOA_DEBUG("QemuCocoaView: toggleFullScreen\n"); - - if (isFullscreen) { // switch from fullscreen to desktop - isFullscreen = FALSE; - [self ungrabMouse]; - [self setContentDimensions]; - [fullScreenWindow close]; - [normalWindow setContentView: self]; - [normalWindow makeKeyAndOrderFront: self]; - [NSMenu setMenuBarVisible:YES]; - } else { // switch from desktop to fullscreen - isFullscreen = TRUE; - [normalWindow orderOut: nil]; /* Hide the window */ - [self grabMouse]; - [self setContentDimensions]; - [NSMenu setMenuBarVisible:NO]; - fullScreenWindow = [[NSWindow alloc] initWithContentRect:[[NSScreen mainScreen] frame] - styleMask:NSWindowStyleMaskBorderless - backing:NSBackingStoreBuffered - defer:NO]; - [fullScreenWindow setAcceptsMouseMovedEvents: YES]; - [fullScreenWindow setHasShadow:NO]; - [fullScreenWindow setBackgroundColor: [NSColor blackColor]]; - [self setFrame:NSMakeRect(cx, cy, cw, ch)]; - [[fullScreenWindow contentView] addSubview: self]; - [fullScreenWindow makeKeyAndOrderFront:self]; - } } - (void) setFullGrab:(id)sender @@ -784,13 +748,14 @@ - (void) handleMonitorInput:(NSEvent *)event } if (keysym) { - qemu_text_console_put_keysym(NULL, keysym); + QemuTextConsole *con = QEMU_TEXT_CONSOLE(dcl.con); + qemu_text_console_put_keysym(con, keysym); } } - (bool) handleEvent:(NSEvent *)event { - return bool_with_iothread_lock(^{ + return bool_with_bql(^{ return [self handleEventLocked:event]; }); } @@ -799,11 +764,8 @@ - (bool) handleEventLocked:(NSEvent *)event { /* Return true if we handled the event, false if it should be given to OSX */ COCOA_DEBUG("QemuCocoaView: handleEvent\n"); - int buttons = 0; + InputButton button; int keycode = 0; - bool mouse_event = false; - // Location of event in virtual screen coordinates - NSPoint p = [self screenLocationOfEvent:event]; NSUInteger modifiers = [event modifierFlags]; /* @@ -947,7 +909,7 @@ - (bool) handleEventLocked:(NSEvent *)event } break; } - break; + return true; case NSEventTypeKeyDown: keycode = cocoa_keycode_to_qemu([event keyCode]); @@ -967,7 +929,7 @@ - (bool) handleEventLocked:(NSEvent *)event // enable graphic console case '1' ... '9': - console_select(key - '0' - 1); /* ascii math */ + [self selectConsoleLocked:key - '0' - 1]; /* ascii math */ return true; // release the mouse grab @@ -978,12 +940,12 @@ - (bool) handleEventLocked:(NSEvent *)event } } - if (qemu_console_is_graphic(NULL)) { + if (qemu_console_is_graphic(dcl.con)) { qkbd_state_key_event(kbd, keycode, true); } else { [self handleMonitorInput: event]; } - break; + return true; case NSEventTypeKeyUp: keycode = cocoa_keycode_to_qemu([event keyCode]); @@ -993,156 +955,154 @@ - (bool) handleEventLocked:(NSEvent *)event return true; } - if (qemu_console_is_graphic(NULL)) { + if (qemu_console_is_graphic(dcl.con)) { qkbd_state_key_event(kbd, keycode, false); } - break; - case NSEventTypeMouseMoved: - if (isAbsoluteEnabled) { - // Cursor re-entered into a window might generate events bound to screen coordinates - // and `nil` window property, and in full screen mode, current window might not be - // key window, where event location alone should suffice. - if (![self screenContainsPoint:p] || !([[self window] isKeyWindow] || isFullscreen)) { - if (isMouseGrabbed) { - [self ungrabMouse]; - } - } else { - if (!isMouseGrabbed) { - [self grabMouse]; - } - } - } - mouse_event = true; - break; - case NSEventTypeLeftMouseDown: - buttons |= MOUSE_EVENT_LBUTTON; - mouse_event = true; - break; - case NSEventTypeRightMouseDown: - buttons |= MOUSE_EVENT_RBUTTON; - mouse_event = true; - break; - case NSEventTypeOtherMouseDown: - buttons |= MOUSE_EVENT_MBUTTON; - mouse_event = true; - break; - case NSEventTypeLeftMouseDragged: - buttons |= MOUSE_EVENT_LBUTTON; - mouse_event = true; - break; - case NSEventTypeRightMouseDragged: - buttons |= MOUSE_EVENT_RBUTTON; - mouse_event = true; - break; - case NSEventTypeOtherMouseDragged: - buttons |= MOUSE_EVENT_MBUTTON; - mouse_event = true; - break; - case NSEventTypeLeftMouseUp: - mouse_event = true; - if (!isMouseGrabbed && [self screenContainsPoint:p]) { - /* - * In fullscreen mode, the window of cocoaView may not be the - * key window, therefore the position relative to the virtual - * screen alone will be sufficient. - */ - if(isFullscreen || [[self window] isKeyWindow]) { - [self grabMouse]; - } - } - break; - case NSEventTypeRightMouseUp: - mouse_event = true; - break; - case NSEventTypeOtherMouseUp: - mouse_event = true; - break; + return true; case NSEventTypeScrollWheel: /* * Send wheel events to the guest regardless of window focus. * This is in-line with standard Mac OS X UI behaviour. */ - /* - * We shouldn't have got a scroll event when deltaY and delta Y - * are zero, hence no harm in dropping the event - */ - if ([event deltaY] != 0 || [event deltaX] != 0) { /* Determine if this is a scroll up or scroll down event */ - if ([event deltaY] != 0) { - buttons = ([event deltaY] > 0) ? + if ([event deltaY] != 0) { + button = ([event deltaY] > 0) ? INPUT_BUTTON_WHEEL_UP : INPUT_BUTTON_WHEEL_DOWN; - } else if ([event deltaX] != 0) { - buttons = ([event deltaX] > 0) ? + } else if ([event deltaX] != 0) { + button = ([event deltaX] > 0) ? INPUT_BUTTON_WHEEL_LEFT : INPUT_BUTTON_WHEEL_RIGHT; - } - - qemu_input_queue_btn(dcl.con, buttons, true); - qemu_input_event_sync(); - qemu_input_queue_btn(dcl.con, buttons, false); - qemu_input_event_sync(); + } else { + /* + * We shouldn't have got a scroll event when deltaY and delta Y + * are zero, hence no harm in dropping the event + */ + return true; } - /* - * Since deltaX/deltaY also report scroll wheel events we prevent mouse - * movement code from executing. - */ - mouse_event = false; - break; + qemu_input_queue_btn(dcl.con, button, true); + qemu_input_event_sync(); + qemu_input_queue_btn(dcl.con, button, false); + qemu_input_event_sync(); + + return true; default: return false; } +} - if (mouse_event) { - /* Don't send button events to the guest unless we've got a - * mouse grab or window focus. If we have neither then this event - * is the user clicking on the background window to activate and - * bring us to the front, which will be done by the sendEvent - * call below. We definitely don't want to pass that click through - * to the guest. - */ - if ((isMouseGrabbed || [[self window] isKeyWindow]) && - (last_buttons != buttons)) { - static uint32_t bmap[INPUT_BUTTON__MAX] = { - [INPUT_BUTTON_LEFT] = MOUSE_EVENT_LBUTTON, - [INPUT_BUTTON_MIDDLE] = MOUSE_EVENT_MBUTTON, - [INPUT_BUTTON_RIGHT] = MOUSE_EVENT_RBUTTON - }; - qemu_input_update_buttons(dcl.con, bmap, last_buttons, buttons); - last_buttons = buttons; - } - if (isMouseGrabbed) { - if (isAbsoluteEnabled) { - /* Note that the origin for Cocoa mouse coords is bottom left, not top left. - * The check on screenContainsPoint is to avoid sending out of range values for - * clicks in the titlebar. - */ - if ([self screenContainsPoint:p]) { - qemu_input_queue_abs(dcl.con, INPUT_AXIS_X, p.x, 0, screen.width); - qemu_input_queue_abs(dcl.con, INPUT_AXIS_Y, screen.height - p.y, 0, screen.height); - } - } else { - qemu_input_queue_rel(dcl.con, INPUT_AXIS_X, (int)[event deltaX]); - qemu_input_queue_rel(dcl.con, INPUT_AXIS_Y, (int)[event deltaY]); - } +- (void) handleMouseEvent:(NSEvent *)event button:(InputButton)button down:(bool)down +{ + if (!isMouseGrabbed) { + return; + } + + with_bql(^{ + qemu_input_queue_btn(dcl.con, button, down); + }); + + [self handleMouseEvent:event]; +} + +- (void) handleMouseEvent:(NSEvent *)event +{ + if (!isMouseGrabbed) { + return; + } + + with_bql(^{ + if (isAbsoluteEnabled) { + CGFloat d = (CGFloat)screen.height / [self frame].size.height; + NSPoint p = [event locationInWindow]; + + /* Note that the origin for Cocoa mouse coords is bottom left, not top left. */ + qemu_input_queue_abs(dcl.con, INPUT_AXIS_X, p.x * d, 0, screen.width); + qemu_input_queue_abs(dcl.con, INPUT_AXIS_Y, screen.height - p.y * d, 0, screen.height); } else { - return false; + qemu_input_queue_rel(dcl.con, INPUT_AXIS_X, [event deltaX]); + qemu_input_queue_rel(dcl.con, INPUT_AXIS_Y, [event deltaY]); } + qemu_input_event_sync(); + }); +} + +- (void) mouseExited:(NSEvent *)event +{ + if (isAbsoluteEnabled && isMouseGrabbed) { + [self ungrabMouse]; } - return true; +} + +- (void) mouseEntered:(NSEvent *)event +{ + if (isAbsoluteEnabled && !isMouseGrabbed) { + [self grabMouse]; + } +} + +- (void) mouseMoved:(NSEvent *)event +{ + [self handleMouseEvent:event]; +} + +- (void) mouseDown:(NSEvent *)event +{ + [self handleMouseEvent:event button:INPUT_BUTTON_LEFT down:true]; +} + +- (void) rightMouseDown:(NSEvent *)event +{ + [self handleMouseEvent:event button:INPUT_BUTTON_RIGHT down:true]; +} + +- (void) otherMouseDown:(NSEvent *)event +{ + [self handleMouseEvent:event button:INPUT_BUTTON_MIDDLE down:true]; +} + +- (void) mouseDragged:(NSEvent *)event +{ + [self handleMouseEvent:event]; +} + +- (void) rightMouseDragged:(NSEvent *)event +{ + [self handleMouseEvent:event]; +} + +- (void) otherMouseDragged:(NSEvent *)event +{ + [self handleMouseEvent:event]; +} + +- (void) mouseUp:(NSEvent *)event +{ + if (!isMouseGrabbed) { + [self grabMouse]; + } + + [self handleMouseEvent:event button:INPUT_BUTTON_LEFT down:false]; +} + +- (void) rightMouseUp:(NSEvent *)event +{ + [self handleMouseEvent:event button:INPUT_BUTTON_RIGHT down:false]; +} + +- (void) otherMouseUp:(NSEvent *)event +{ + [self handleMouseEvent:event button:INPUT_BUTTON_MIDDLE down:false]; } - (void) grabMouse { COCOA_DEBUG("QemuCocoaView: grabMouse\n"); - if (!isFullscreen) { - if (qemu_name) - [normalWindow setTitle:[NSString stringWithFormat:@"QEMU %s - (Press " UC_CTRL_KEY " " UC_ALT_KEY " G to release Mouse)", qemu_name]]; - else - [normalWindow setTitle:@"QEMU - (Press " UC_CTRL_KEY " " UC_ALT_KEY " G to release Mouse)"]; - } + if (qemu_name) + [[self window] setTitle:[NSString stringWithFormat:@"QEMU %s - (Press " UC_CTRL_KEY " " UC_ALT_KEY " G to release Mouse)", qemu_name]]; + else + [[self window] setTitle:@"QEMU - (Press " UC_CTRL_KEY " " UC_ALT_KEY " G to release Mouse)"]; [self hideCursor]; CGAssociateMouseAndMouseCursorPosition(isAbsoluteEnabled); isMouseGrabbed = TRUE; // while isMouseGrabbed = TRUE, QemuCocoaApp sends all events to [cocoaView handleEvent:] @@ -1152,15 +1112,14 @@ - (void) ungrabMouse { COCOA_DEBUG("QemuCocoaView: ungrabMouse\n"); - if (!isFullscreen) { - if (qemu_name) - [normalWindow setTitle:[NSString stringWithFormat:@"QEMU %s", qemu_name]]; - else - [normalWindow setTitle:@"QEMU"]; - } + if (qemu_name) + [[self window] setTitle:[NSString stringWithFormat:@"QEMU %s", qemu_name]]; + else + [[self window] setTitle:@"QEMU"]; [self unhideCursor]; CGAssociateMouseAndMouseCursorPosition(TRUE); isMouseGrabbed = FALSE; + [self raiseAllButtons]; } - (void) setAbsoluteEnabled:(BOOL)tIsAbsoluteEnabled { @@ -1171,8 +1130,6 @@ - (void) setAbsoluteEnabled:(BOOL)tIsAbsoluteEnabled { } - (BOOL) isMouseGrabbed {return isMouseGrabbed;} - (BOOL) isAbsoluteEnabled {return isAbsoluteEnabled;} -- (float) cdx {return cdx;} -- (float) cdy {return cdy;} - (QEMUScreen) gscreen {return screen;} /* @@ -1182,10 +1139,19 @@ - (QEMUScreen) gscreen {return screen;} */ - (void) raiseAllKeys { - with_iothread_lock(^{ + with_bql(^{ qkbd_state_lift_all_keys(kbd); }); } + +- (void) raiseAllButtons +{ + with_bql(^{ + qemu_input_queue_btn(dcl.con, INPUT_BUTTON_LEFT, false); + qemu_input_queue_btn(dcl.con, INPUT_BUTTON_RIGHT, false); + qemu_input_queue_btn(dcl.con, INPUT_BUTTON_MIDDLE, false); + }); +} @end @@ -1200,7 +1166,6 @@ @interface QemuCocoaAppController : NSObject { } - (void)doToggleFullScreen:(id)sender; -- (void)toggleFullScreen:(id)sender; - (void)showQEMUDoc:(id)sender; - (void)zoomToFit:(id) sender; - (void)displayConsole:(id)sender; @@ -1221,6 +1186,8 @@ - (void)adjustSpeed:(id)sender; @implementation QemuCocoaAppController - (id) init { + NSWindow *window; + COCOA_DEBUG("QemuCocoaAppController: init\n"); self = [super init]; @@ -1234,19 +1201,20 @@ - (id) init } // create a window - normalWindow = [[NSWindow alloc] initWithContentRect:[cocoaView frame] + window = [[NSWindow alloc] initWithContentRect:[cocoaView frame] styleMask:NSWindowStyleMaskTitled|NSWindowStyleMaskMiniaturizable|NSWindowStyleMaskClosable backing:NSBackingStoreBuffered defer:NO]; - if(!normalWindow) { + if(!window) { error_report("(cocoa) can't create window"); exit(1); } - [normalWindow setAcceptsMouseMovedEvents:YES]; - [normalWindow setTitle:@"QEMU"]; - [normalWindow setContentView:cocoaView]; - [normalWindow makeKeyAndOrderFront:self]; - [normalWindow center]; - [normalWindow setDelegate: self]; + [window setAcceptsMouseMovedEvents:YES]; + [window setCollectionBehavior:NSWindowCollectionBehaviorFullScreenPrimary]; + [window setTitle:qemu_name ? [NSString stringWithFormat:@"QEMU %s", qemu_name] : @"QEMU"]; + [window setContentView:cocoaView]; + [window makeKeyAndOrderFront:self]; + [window center]; + [window setDelegate: self]; /* Used for displaying pause on the screen */ pauseLabel = [NSTextField new]; @@ -1282,7 +1250,7 @@ - (void)applicationWillTerminate:(NSNotification *)aNotification { COCOA_DEBUG("QemuCocoaAppController: applicationWillTerminate\n"); - with_iothread_lock(^{ + with_bql(^{ shutdown_action = SHUTDOWN_ACTION_POWEROFF; qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_UI); }); @@ -1312,8 +1280,20 @@ - (void)windowDidChangeScreen:(NSNotification *)notification [cocoaView updateUIInfo]; } +- (void)windowDidEnterFullScreen:(NSNotification *)notification +{ + [cocoaView grabMouse]; +} + +- (void)windowDidExitFullScreen:(NSNotification *)notification +{ + [cocoaView resizeWindow]; + [cocoaView ungrabMouse]; +} + - (void)windowDidResize:(NSNotification *)notification { + [cocoaView updateBounds]; [cocoaView updateUIInfo]; } @@ -1330,6 +1310,14 @@ - (BOOL)windowShouldClose:(id)sender return NO; } +- (NSApplicationPresentationOptions) window:(NSWindow *)window + willUseFullScreenPresentationOptions:(NSApplicationPresentationOptions)proposedOptions; + +{ + return (proposedOptions & ~(NSApplicationPresentationAutoHideDock | NSApplicationPresentationAutoHideMenuBar)) | + NSApplicationPresentationHideDock | NSApplicationPresentationHideMenuBar; +} + /* * Called when QEMU goes into the background. Note that * [-NSWindowDelegate windowDidResignKey:] is used here instead of @@ -1349,14 +1337,7 @@ - (void) windowDidResignKey: (NSNotification *)aNotification */ - (void) doToggleFullScreen:(id)sender { - [self toggleFullScreen:(id)sender]; -} - -- (void)toggleFullScreen:(id)sender -{ - COCOA_DEBUG("QemuCocoaAppController: toggleFullScreen\n"); - - [cocoaView toggleFullScreen:sender]; + [[cocoaView window] toggleFullScreen:sender]; } - (void) setFullGrab:(id)sender @@ -1403,10 +1384,20 @@ - (void)showQEMUDoc:(id)sender /* Stretches video to fit host monitor size */ - (void)zoomToFit:(id) sender { - stretch_video = !stretch_video; - if (stretch_video == true) { + NSWindowStyleMask styleMask = [[cocoaView window] styleMask] ^ NSWindowStyleMaskResizable; + + [[cocoaView window] setStyleMask:styleMask]; + [sender setState:styleMask & NSWindowStyleMaskResizable ? NSControlStateValueOn : NSControlStateValueOff]; + [cocoaView resizeWindow]; +} + +- (void)toggleZoomInterpolation:(id) sender +{ + if (zoom_interpolation == kCGInterpolationNone) { + zoom_interpolation = kCGInterpolationLow; [sender setState: NSControlStateValueOn]; } else { + zoom_interpolation = kCGInterpolationNone; [sender setState: NSControlStateValueOff]; } } @@ -1414,13 +1405,15 @@ - (void)zoomToFit:(id) sender /* Displays the console on the screen */ - (void)displayConsole:(id)sender { - console_select([sender tag]); + with_bql(^{ + [cocoaView selectConsoleLocked:[sender tag]]; + }); } /* Pause the guest */ - (void)pauseQEMU:(id)sender { - with_iothread_lock(^{ + with_bql(^{ qmp_stop(NULL); }); [sender setEnabled: NO]; @@ -1431,7 +1424,7 @@ - (void)pauseQEMU:(id)sender /* Resume running the guest operating system */ - (void)resumeQEMU:(id) sender { - with_iothread_lock(^{ + with_bql(^{ qmp_cont(NULL); }); [sender setEnabled: NO]; @@ -1444,8 +1437,8 @@ - (void)displayPause { /* Coordinates have to be calculated each time because the window can change its size */ int xCoord, yCoord, width, height; - xCoord = ([normalWindow frame].size.width - [pauseLabel frame].size.width)/2; - yCoord = [normalWindow frame].size.height - [pauseLabel frame].size.height - ([pauseLabel frame].size.height * .5); + xCoord = ([cocoaView frame].size.width - [pauseLabel frame].size.width)/2; + yCoord = [cocoaView frame].size.height - [pauseLabel frame].size.height - ([pauseLabel frame].size.height * .5); width = [pauseLabel frame].size.width; height = [pauseLabel frame].size.height; [pauseLabel setFrame: NSMakeRect(xCoord, yCoord, width, height)]; @@ -1461,7 +1454,7 @@ - (void)removePause /* Restarts QEMU */ - (void)restartQEMU:(id)sender { - with_iothread_lock(^{ + with_bql(^{ qmp_system_reset(NULL); }); } @@ -1469,7 +1462,7 @@ - (void)restartQEMU:(id)sender /* Powers down QEMU */ - (void)powerDownQEMU:(id)sender { - with_iothread_lock(^{ + with_bql(^{ qmp_system_powerdown(NULL); }); } @@ -1488,7 +1481,7 @@ - (void)ejectDeviceMedia:(id)sender } __block Error *err = NULL; - with_iothread_lock(^{ + with_bql(^{ qmp_eject([drive cStringUsingEncoding: NSASCIIStringEncoding], NULL, false, false, &err); }); @@ -1523,7 +1516,7 @@ - (void)changeDeviceMedia:(id)sender } __block Error *err = NULL; - with_iothread_lock(^{ + with_bql(^{ qmp_blockdev_change_medium([drive cStringUsingEncoding: NSASCIIStringEncoding], NULL, @@ -1605,7 +1598,7 @@ - (void)adjustSpeed:(id)sender // get the throttle percentage throttle_pct = [sender tag]; - with_iothread_lock(^{ + with_bql(^{ cpu_throttle_set(throttle_pct); }); COCOA_DEBUG("cpu throttling at %d%c\n", cpu_throttle_get_percentage(), '%'); @@ -1671,7 +1664,10 @@ static void create_initial_menus(void) menu = [[NSMenu alloc] initWithTitle:@"View"]; [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"Enter Fullscreen" action:@selector(doToggleFullScreen:) keyEquivalent:@"f"] autorelease]]; // Fullscreen menuItem = [[[NSMenuItem alloc] initWithTitle:@"Zoom To Fit" action:@selector(zoomToFit:) keyEquivalent:@""] autorelease]; - [menuItem setState: stretch_video ? NSControlStateValueOn : NSControlStateValueOff]; + [menuItem setState: [[cocoaView window] styleMask] & NSWindowStyleMaskResizable ? NSControlStateValueOn : NSControlStateValueOff]; + [menu addItem: menuItem]; + menuItem = [[[NSMenuItem alloc] initWithTitle:@"Zoom Interpolation" action:@selector(toggleZoomInterpolation:) keyEquivalent:@""] autorelease]; + [menuItem setState: zoom_interpolation == kCGInterpolationLow ? NSControlStateValueOn : NSControlStateValueOff]; [menu addItem: menuItem]; menuItem = [[[NSMenuItem alloc] initWithTitle:@"View" action:nil keyEquivalent:@""] autorelease]; [menuItem setSubmenu:menu]; @@ -1819,7 +1815,7 @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t return; } - with_iothread_lock(^{ + with_bql(^{ QemuClipboardInfo *info = qemu_clipboard_info_ref(cbinfo); qemu_event_reset(&cbevent); qemu_clipboard_request(info, QEMU_CLIPBOARD_TYPE_TEXT); @@ -1827,9 +1823,9 @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t while (info == cbinfo && info->types[QEMU_CLIPBOARD_TYPE_TEXT].available && info->types[QEMU_CLIPBOARD_TYPE_TEXT].data == NULL) { - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_event_wait(&cbevent); - qemu_mutex_lock_iothread(); + bql_lock(); } if (info == cbinfo) { @@ -1927,9 +1923,9 @@ static void cocoa_clipboard_request(QemuClipboardInfo *info, int status; COCOA_DEBUG("Second thread: calling qemu_default_main()\n"); - qemu_mutex_lock_iothread(); + bql_lock(); status = qemu_default_main(); - qemu_mutex_unlock_iothread(); + bql_unlock(); COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n"); [cbowner release]; exit(status); @@ -1941,7 +1937,7 @@ static int cocoa_main(void) COCOA_DEBUG("Entered %s()\n", __func__); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_thread_create(&thread, "qemu_main", call_qemu_main, NULL, QEMU_THREAD_DETACHED); @@ -1962,16 +1958,7 @@ static void cocoa_update(DisplayChangeListener *dcl, COCOA_DEBUG("qemu_cocoa: cocoa_update\n"); dispatch_async(dispatch_get_main_queue(), ^{ - NSRect rect; - if ([cocoaView cdx] == 1.0) { - rect = NSMakeRect(x, [cocoaView gscreen].height - y - h, w, h); - } else { - rect = NSMakeRect( - x * [cocoaView cdx], - ([cocoaView gscreen].height - y - h) * [cocoaView cdy], - w * [cocoaView cdx], - h * [cocoaView cdy]); - } + NSRect rect = NSMakeRect(x, [cocoaView gscreen].height - y - h, w, h); [cocoaView setNeedsDisplayInRect:rect]; }); } @@ -1990,7 +1977,6 @@ static void cocoa_switch(DisplayChangeListener *dcl, pixman_image_ref(image); dispatch_async(dispatch_get_main_queue(), ^{ - [cocoaView updateUIInfo]; [cocoaView switchSurface:image]; }); } @@ -2000,7 +1986,7 @@ static void cocoa_refresh(DisplayChangeListener *dcl) NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; COCOA_DEBUG("qemu_cocoa: cocoa_refresh\n"); - graphic_hw_update(NULL); + graphic_hw_update(dcl->con); if (qemu_input_is_absolute(dcl->con)) { dispatch_async(dispatch_get_main_queue(), ^{ @@ -2048,8 +2034,7 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) /* if fullscreen mode is to be used */ if (opts->has_full_screen && opts->full_screen) { - [NSApp activateIgnoringOtherApps: YES]; - [controller toggleFullScreen: nil]; + [[cocoaView window] toggleFullScreen: nil]; } if (opts->u.cocoa.has_full_grab && opts->u.cocoa.full_grab) { [controller setFullGrab: nil]; @@ -2067,7 +2052,11 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) } if (opts->u.cocoa.has_zoom_to_fit && opts->u.cocoa.zoom_to_fit) { - stretch_video = true; + [cocoaView window].styleMask |= NSWindowStyleMaskResizable; + } + + if (opts->u.cocoa.has_zoom_interpolation && opts->u.cocoa.zoom_interpolation) { + zoom_interpolation = kCGInterpolationLow; } create_initial_menus(); @@ -2075,14 +2064,18 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) * Create the menu entries which depend on QEMU state (for consoles * and removable devices). These make calls back into QEMU functions, * which is OK because at this point we know that the second thread - * holds the iothread lock and is synchronously waiting for us to + * holds the BQL and is synchronously waiting for us to * finish. */ add_console_menu_entries(); addRemovableDevicesMenuItems(); + dcl.con = qemu_console_lookup_default(); + kbd = qkbd_state_init(dcl.con); + // register vga output callbacks register_displaychangelistener(&dcl); + [cocoaView updateUIInfo]; qemu_event_init(&cbevent, false); cbowner = [[QemuCocoaPasteboardTypeOwner alloc] init]; diff --git a/ui/console-priv.h b/ui/console-priv.h index 88569ed2cc4..43ceb8122f1 100644 --- a/ui/console-priv.h +++ b/ui/console-priv.h @@ -35,7 +35,7 @@ struct QemuConsole { QTAILQ_ENTRY(QemuConsole) next; }; -void qemu_text_console_select(QemuTextConsole *c); +void qemu_text_console_update_size(QemuTextConsole *c); const char * qemu_text_console_get_label(QemuTextConsole *c); void qemu_text_console_update_cursor(void); void qemu_text_console_handle_keysym(QemuTextConsole *s, int keysym); diff --git a/ui/console-vc-stubs.c b/ui/console-vc-stubs.c index 2afc52329f0..b63e2fb2345 100644 --- a/ui/console-vc-stubs.c +++ b/ui/console-vc-stubs.c @@ -10,7 +10,7 @@ #include "chardev/char.h" #include "ui/console-priv.h" -void qemu_text_console_select(QemuTextConsole *c) +void qemu_text_console_update_size(QemuTextConsole *c) { } diff --git a/ui/console-vc.c b/ui/console-vc.c index 9c13cc2981b..899fa11c948 100644 --- a/ui/console-vc.c +++ b/ui/console-vc.c @@ -958,10 +958,9 @@ static void vc_chr_set_echo(Chardev *chr, bool echo) drv->console->echo = echo; } -void qemu_text_console_select(QemuTextConsole *c) +void qemu_text_console_update_size(QemuTextConsole *c) { dpy_text_resize(QEMU_CONSOLE(c), c->width, c->height); - qemu_text_console_update_cursor(); } static void vc_chr_open(Chardev *chr, @@ -990,8 +989,8 @@ static void vc_chr_open(Chardev *chr, trace_console_txt_new(width, height); if (width == 0 || height == 0) { s = QEMU_TEXT_CONSOLE(object_new(TYPE_QEMU_TEXT_CONSOLE)); - width = qemu_console_get_width(NULL, 80 * FONT_WIDTH); - height = qemu_console_get_height(NULL, 24 * FONT_HEIGHT); + width = 80 * FONT_WIDTH; + height = 24 * FONT_HEIGHT; } else { s = QEMU_TEXT_CONSOLE(object_new(TYPE_QEMU_FIXED_TEXT_CONSOLE)); } diff --git a/ui/console.c b/ui/console.c index 832055675c5..43226c5c145 100644 --- a/ui/console.c +++ b/ui/console.c @@ -66,7 +66,6 @@ struct DisplayState { }; static DisplayState *display_state; -static QemuConsole *active_console; static QTAILQ_HEAD(, QemuConsole) consoles = QTAILQ_HEAD_INITIALIZER(consoles); @@ -135,7 +134,6 @@ void graphic_hw_update_done(QemuConsole *con) void graphic_hw_update(QemuConsole *con) { bool async = false; - con = con ? con : active_console; if (!con) { return; } @@ -209,9 +207,6 @@ void qemu_console_set_window_id(QemuConsole *con, int window_id) void graphic_hw_invalidate(QemuConsole *con) { - if (!con) { - con = active_console; - } if (con && con->hw_ops->invalidate) { con->hw_ops->invalidate(con->hw); } @@ -219,9 +214,6 @@ void graphic_hw_invalidate(QemuConsole *con) void graphic_hw_text_update(QemuConsole *con, console_ch_t *chardata) { - if (!con) { - con = active_console; - } if (con && con->hw_ops->text_update) { con->hw_ops->text_update(con->hw, chardata); } @@ -265,12 +257,12 @@ static void dpy_gfx_update_texture(QemuConsole *con, DisplaySurface *surface, } static void displaychangelistener_display_console(DisplayChangeListener *dcl, - QemuConsole *con, Error **errp) { static const char nodev[] = "This VM has no graphic display device."; static DisplaySurface *dummy; + QemuConsole *con = dcl->con; if (!con || !console_compatible_with(con, dcl, errp)) { if (!dummy) { @@ -305,39 +297,8 @@ static void displaychangelistener_display_console(DisplayChangeListener *dcl, } } -void console_select(unsigned int index) -{ - DisplayChangeListener *dcl; - QemuConsole *s; - - trace_console_select(index); - s = qemu_console_lookup_by_index(index); - if (s) { - DisplayState *ds = s->ds; - - active_console = s; - QLIST_FOREACH (dcl, &ds->listeners, next) { - if (dcl->con != NULL) { - continue; - } - displaychangelistener_display_console(dcl, s, NULL); - } - - if (QEMU_IS_TEXT_CONSOLE(s)) { - qemu_text_console_select(QEMU_TEXT_CONSOLE(s)); - } - } -} - void qemu_text_console_put_keysym(QemuTextConsole *s, int keysym) { - if (!s) { - if (!QEMU_IS_TEXT_CONSOLE(active_console)) { - return; - } - s = QEMU_TEXT_CONSOLE(active_console); - } - qemu_text_console_handle_keysym(s, keysym); } @@ -392,11 +353,6 @@ qemu_console_register(QemuConsole *c) { int i; - if (!active_console || (!QEMU_IS_GRAPHIC_CONSOLE(active_console) && - QEMU_IS_GRAPHIC_CONSOLE(c))) { - active_console = c; - } - if (QTAILQ_EMPTY(&consoles)) { c->index = 0; QTAILQ_INSERT_TAIL(&consoles, c, next); @@ -751,8 +707,6 @@ dcl_set_graphic_cursor(DisplayChangeListener *dcl, QemuGraphicConsole *con) void register_displaychangelistener(DisplayChangeListener *dcl) { - QemuConsole *con; - assert(!dcl->ds); trace_displaychangelistener_register(dcl, dcl->ops->dpy_name); @@ -761,13 +715,12 @@ void register_displaychangelistener(DisplayChangeListener *dcl) gui_setup_refresh(dcl->ds); if (dcl->con) { dcl->con->dcls++; - con = dcl->con; - } else { - con = active_console; } - displaychangelistener_display_console(dcl, con, dcl->con ? &error_fatal : NULL); - if (QEMU_IS_GRAPHIC_CONSOLE(con)) { - dcl_set_graphic_cursor(dcl, QEMU_GRAPHIC_CONSOLE(con)); + displaychangelistener_display_console(dcl, &error_fatal); + if (QEMU_IS_GRAPHIC_CONSOLE(dcl->con)) { + dcl_set_graphic_cursor(dcl, QEMU_GRAPHIC_CONSOLE(dcl->con)); + } else if (QEMU_IS_TEXT_CONSOLE(dcl->con)) { + qemu_text_console_update_size(QEMU_TEXT_CONSOLE(dcl->con)); } qemu_text_console_update_cursor(); } @@ -805,9 +758,6 @@ static void dpy_set_ui_info_timer(void *opaque) bool dpy_ui_info_supported(const QemuConsole *con) { - if (con == NULL) { - con = active_console; - } if (con == NULL) { return false; } @@ -819,19 +769,11 @@ const QemuUIInfo *dpy_get_ui_info(const QemuConsole *con) { assert(dpy_ui_info_supported(con)); - if (con == NULL) { - con = active_console; - } - return &con->ui_info; } int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info, bool delay) { - if (con == NULL) { - con = active_console; - } - if (!dpy_ui_info_supported(con)) { return -1; } @@ -870,7 +812,7 @@ void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h) } dpy_gfx_update_texture(con, con->surface, x, y, w, h); QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_gfx_update) { @@ -916,7 +858,7 @@ void dpy_gfx_replace_surface(QemuConsole *con, con->surface = new_surface; dpy_gfx_create_texture(con, new_surface); QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } displaychangelistener_gfx_switch(dcl, new_surface, surface ? FALSE : TRUE); @@ -970,7 +912,7 @@ void dpy_text_cursor(QemuConsole *con, int x, int y) return; } QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_text_cursor) { @@ -988,7 +930,7 @@ void dpy_text_update(QemuConsole *con, int x, int y, int w, int h) return; } QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_text_update) { @@ -1006,7 +948,7 @@ void dpy_text_resize(QemuConsole *con, int w, int h) return; } QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_text_resize) { @@ -1028,7 +970,7 @@ void dpy_mouse_set(QemuConsole *c, int x, int y, int on) return; } QLIST_FOREACH(dcl, &s->listeners, next) { - if (c != (dcl->con ? dcl->con : active_console)) { + if (c != dcl->con) { continue; } if (dcl->ops->dpy_mouse_set) { @@ -1049,7 +991,7 @@ void dpy_cursor_define(QemuConsole *c, QEMUCursor *cursor) return; } QLIST_FOREACH(dcl, &s->listeners, next) { - if (c != (dcl->con ? dcl->con : active_console)) { + if (c != dcl->con) { continue; } if (dcl->ops->dpy_cursor_define) { @@ -1099,7 +1041,7 @@ void dpy_gl_scanout_disable(QemuConsole *con) con->scanout.kind = SCANOUT_NONE; } QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_gl_scanout_disable) { @@ -1126,7 +1068,7 @@ void dpy_gl_scanout_texture(QemuConsole *con, x, y, width, height, d3d_tex2d, }; QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_gl_scanout_texture) { @@ -1148,7 +1090,7 @@ void dpy_gl_scanout_dmabuf(QemuConsole *con, con->scanout.kind = SCANOUT_DMABUF; con->scanout.dmabuf = dmabuf; QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_gl_scanout_dmabuf) { @@ -1164,7 +1106,7 @@ void dpy_gl_cursor_dmabuf(QemuConsole *con, QemuDmaBuf *dmabuf, DisplayChangeListener *dcl; QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_gl_cursor_dmabuf) { @@ -1181,7 +1123,7 @@ void dpy_gl_cursor_position(QemuConsole *con, DisplayChangeListener *dcl; QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_gl_cursor_position) { @@ -1197,7 +1139,7 @@ void dpy_gl_release_dmabuf(QemuConsole *con, DisplayChangeListener *dcl; QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_gl_release_dmabuf) { @@ -1216,7 +1158,7 @@ void dpy_gl_update(QemuConsole *con, graphic_hw_gl_block(con, true); QLIST_FOREACH(dcl, &s->listeners, next) { - if (con != (dcl->con ? dcl->con : active_console)) { + if (con != dcl->con) { continue; } if (dcl->ops->dpy_gl_update) { @@ -1325,6 +1267,18 @@ void graphic_console_close(QemuConsole *con) dpy_gfx_replace_surface(con, surface); } +QemuConsole *qemu_console_lookup_default(void) +{ + QemuConsole *con; + + QTAILQ_FOREACH(con, &consoles, next) { + if (QEMU_IS_GRAPHIC_CONSOLE(con)) { + return con; + } + } + return QTAILQ_FIRST(&consoles); +} + QemuConsole *qemu_console_lookup_by_index(unsigned int index) { QemuConsole *con; @@ -1403,30 +1357,21 @@ static QemuConsole *qemu_graphic_console_lookup_unused(void) QEMUCursor *qemu_console_get_cursor(QemuConsole *con) { - if (con == NULL) { - con = active_console; - } return QEMU_IS_GRAPHIC_CONSOLE(con) ? QEMU_GRAPHIC_CONSOLE(con)->cursor : NULL; } bool qemu_console_is_visible(QemuConsole *con) { - return (con == active_console) || (con->dcls > 0); + return con->dcls > 0; } bool qemu_console_is_graphic(QemuConsole *con) { - if (con == NULL) { - con = active_console; - } return con && QEMU_IS_GRAPHIC_CONSOLE(con); } bool qemu_console_is_fixedsize(QemuConsole *con) { - if (con == NULL) { - con = active_console; - } return con && (QEMU_IS_GRAPHIC_CONSOLE(con) || QEMU_IS_FIXED_TEXT_CONSOLE(con)); } @@ -1493,17 +1438,11 @@ char *qemu_console_get_label(QemuConsole *con) int qemu_console_get_index(QemuConsole *con) { - if (con == NULL) { - con = active_console; - } return con ? con->index : -1; } uint32_t qemu_console_get_head(QemuConsole *con) { - if (con == NULL) { - con = active_console; - } if (con == NULL) { return -1; } @@ -1515,9 +1454,6 @@ uint32_t qemu_console_get_head(QemuConsole *con) int qemu_console_get_width(QemuConsole *con, int fallback) { - if (con == NULL) { - con = active_console; - } if (con == NULL) { return fallback; } @@ -1535,9 +1471,6 @@ int qemu_console_get_width(QemuConsole *con, int fallback) int qemu_console_get_height(QemuConsole *con, int fallback) { - if (con == NULL) { - con = active_console; - } if (con == NULL) { return fallback; } diff --git a/ui/curses.c b/ui/curses.c index 8bde8c5cf7c..ec61615f7c1 100644 --- a/ui/curses.c +++ b/ui/curses.c @@ -98,7 +98,7 @@ static void curses_update(DisplayChangeListener *dcl, static void curses_calc_pad(void) { - if (qemu_console_is_fixedsize(NULL)) { + if (qemu_console_is_fixedsize(dcl->con)) { width = gwidth; height = gheight; } else { @@ -201,7 +201,7 @@ static void curses_cursor_position(DisplayChangeListener *dcl, curs_set(1); /* it seems that curs_set(1) must always be called before * curs_set(2) for the latter to have effect */ - if (!qemu_console_is_graphic(NULL)) { + if (!qemu_console_is_graphic(dcl->con)) { curs_set(2); } return; @@ -274,11 +274,11 @@ static void curses_refresh(DisplayChangeListener *dcl) clear(); refresh(); curses_calc_pad(); - graphic_hw_invalidate(NULL); + graphic_hw_invalidate(dcl->con); invalidate = 0; } - graphic_hw_text_update(NULL, screen); + graphic_hw_text_update(dcl->con, screen); while (1) { /* while there are any pending key strokes to process */ @@ -318,11 +318,16 @@ static void curses_refresh(DisplayChangeListener *dcl) /* process keys reserved for qemu */ if (keycode >= QEMU_KEY_CONSOLE0 && keycode < QEMU_KEY_CONSOLE0 + 9) { - erase(); - wnoutrefresh(stdscr); - console_select(keycode - QEMU_KEY_CONSOLE0); - - invalidate = 1; + QemuConsole *con = qemu_console_lookup_by_index(keycode - QEMU_KEY_CONSOLE0); + if (con) { + erase(); + wnoutrefresh(stdscr); + unregister_displaychangelistener(dcl); + dcl->con = con; + register_displaychangelistener(dcl); + + invalidate = 1; + } continue; } } @@ -354,45 +359,45 @@ static void curses_refresh(DisplayChangeListener *dcl) if (keycode == -1) continue; - if (qemu_console_is_graphic(NULL)) { + if (qemu_console_is_graphic(dcl->con)) { /* since terminals don't know about key press and release * events, we need to emit both for each key received */ if (keycode & SHIFT) { - qemu_input_event_send_key_number(NULL, SHIFT_CODE, true); + qemu_input_event_send_key_number(dcl->con, SHIFT_CODE, true); qemu_input_event_send_key_delay(0); } if (keycode & CNTRL) { - qemu_input_event_send_key_number(NULL, CNTRL_CODE, true); + qemu_input_event_send_key_number(dcl->con, CNTRL_CODE, true); qemu_input_event_send_key_delay(0); } if (keycode & ALT) { - qemu_input_event_send_key_number(NULL, ALT_CODE, true); + qemu_input_event_send_key_number(dcl->con, ALT_CODE, true); qemu_input_event_send_key_delay(0); } if (keycode & ALTGR) { - qemu_input_event_send_key_number(NULL, GREY | ALT_CODE, true); + qemu_input_event_send_key_number(dcl->con, GREY | ALT_CODE, true); qemu_input_event_send_key_delay(0); } - qemu_input_event_send_key_number(NULL, keycode & KEY_MASK, true); + qemu_input_event_send_key_number(dcl->con, keycode & KEY_MASK, true); qemu_input_event_send_key_delay(0); - qemu_input_event_send_key_number(NULL, keycode & KEY_MASK, false); + qemu_input_event_send_key_number(dcl->con, keycode & KEY_MASK, false); qemu_input_event_send_key_delay(0); if (keycode & ALTGR) { - qemu_input_event_send_key_number(NULL, GREY | ALT_CODE, false); + qemu_input_event_send_key_number(dcl->con, GREY | ALT_CODE, false); qemu_input_event_send_key_delay(0); } if (keycode & ALT) { - qemu_input_event_send_key_number(NULL, ALT_CODE, false); + qemu_input_event_send_key_number(dcl->con, ALT_CODE, false); qemu_input_event_send_key_delay(0); } if (keycode & CNTRL) { - qemu_input_event_send_key_number(NULL, CNTRL_CODE, false); + qemu_input_event_send_key_number(dcl->con, CNTRL_CODE, false); qemu_input_event_send_key_delay(0); } if (keycode & SHIFT) { - qemu_input_event_send_key_number(NULL, SHIFT_CODE, false); + qemu_input_event_send_key_number(dcl->con, SHIFT_CODE, false); qemu_input_event_send_key_delay(0); } } else { @@ -400,7 +405,7 @@ static void curses_refresh(DisplayChangeListener *dcl) if (keysym == -1) keysym = chr; - qemu_text_console_put_keysym(NULL, keysym); + qemu_text_console_put_keysym(QEMU_TEXT_CONSOLE(dcl->con), keysym); } } } @@ -798,6 +803,7 @@ static void curses_display_init(DisplayState *ds, DisplayOptions *opts) curses_winch_init(); dcl = g_new0(DisplayChangeListener, 1); + dcl->con = qemu_console_lookup_default(); dcl->ops = &dcl_ops; register_displaychangelistener(dcl); diff --git a/ui/dbus-display1.xml b/ui/dbus-display1.xml index f0e2fac2127..ce35d64eea1 100644 --- a/ui/dbus-display1.xml +++ b/ui/dbus-display1.xml @@ -71,7 +71,7 @@ :dbus:iface:`org.qemu.Display1.Listener` interface. --> - + @@ -370,7 +370,7 @@ - + - + @@ -715,7 +715,7 @@ :dbus:iface:`org.qemu.Display1.AudioInListener` interface. --> - + @@ -976,7 +976,7 @@ The current handler, if any, will be replaced. --> - + diff --git a/ui/dbus-listener.c b/ui/dbus-listener.c index 18f556aa73a..4a0a5d78f93 100644 --- a/ui/dbus-listener.c +++ b/ui/dbus-listener.c @@ -83,6 +83,9 @@ struct _DBusDisplayListener { egl_fb fb; #endif #endif + + guint dbus_filter; + guint32 out_serial_to_discard; }; G_DEFINE_TYPE(DBusDisplayListener, dbus_display_listener, G_TYPE_OBJECT) @@ -90,6 +93,12 @@ G_DEFINE_TYPE(DBusDisplayListener, dbus_display_listener, G_TYPE_OBJECT) static void dbus_gfx_update(DisplayChangeListener *dcl, int x, int y, int w, int h); +static void ddl_discard_pending_messages(DBusDisplayListener *ddl) +{ + ddl->out_serial_to_discard = g_dbus_connection_get_last_serial( + g_dbus_proxy_get_connection(G_DBUS_PROXY(ddl->proxy))); +} + #ifdef CONFIG_OPENGL static void dbus_scanout_disable(DisplayChangeListener *dcl) { @@ -276,6 +285,8 @@ static void dbus_scanout_dmabuf(DisplayChangeListener *dcl, return; } + ddl_discard_pending_messages(ddl); + /* FIXME: add missing x/y/w/h support */ qemu_dbus_display1_listener_call_scanout_dmabuf( ddl->proxy, @@ -323,6 +334,8 @@ static bool dbus_scanout_map(DBusDisplayListener *ddl) return false; } + ddl_discard_pending_messages(ddl); + if (!qemu_dbus_display1_listener_win32_map_call_scanout_map_sync( ddl->map_proxy, GPOINTER_TO_UINT(target_handle), @@ -384,6 +397,8 @@ dbus_scanout_share_d3d_texture( return false; } + ddl_discard_pending_messages(ddl); + qemu_dbus_display1_listener_win32_d3d11_call_scanout_texture2d( ddl->d3d11_proxy, GPOINTER_TO_INT(target_handle), @@ -630,11 +645,28 @@ static void dbus_gfx_update_sub(DBusDisplayListener *ddl, DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL); } +static void ddl_scanout(DBusDisplayListener *ddl) +{ + GVariant *v_data; + + v_data = g_variant_new_from_data( + G_VARIANT_TYPE("ay"), surface_data(ddl->ds), + surface_stride(ddl->ds) * surface_height(ddl->ds), TRUE, + (GDestroyNotify)pixman_image_unref, pixman_image_ref(ddl->ds->image)); + + ddl_discard_pending_messages(ddl); + + qemu_dbus_display1_listener_call_scanout( + ddl->proxy, surface_width(ddl->ds), surface_height(ddl->ds), + surface_stride(ddl->ds), surface_format(ddl->ds), v_data, + G_DBUS_CALL_FLAGS_NONE, DBUS_DEFAULT_TIMEOUT, NULL, NULL, + g_object_ref(ddl)); +} + static void dbus_gfx_update(DisplayChangeListener *dcl, int x, int y, int w, int h) { DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); - GVariant *v_data; assert(ddl->ds); @@ -652,23 +684,7 @@ static void dbus_gfx_update(DisplayChangeListener *dcl, #endif if (x == 0 && y == 0 && w == surface_width(ddl->ds) && h == surface_height(ddl->ds)) { - v_data = g_variant_new_from_data( - G_VARIANT_TYPE("ay"), - surface_data(ddl->ds), - surface_stride(ddl->ds) * surface_height(ddl->ds), - TRUE, - (GDestroyNotify)pixman_image_unref, - pixman_image_ref(ddl->ds->image)); - qemu_dbus_display1_listener_call_scanout( - ddl->proxy, - surface_width(ddl->ds), - surface_height(ddl->ds), - surface_stride(ddl->ds), - surface_format(ddl->ds), - v_data, - G_DBUS_CALL_FLAGS_NONE, - DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL); - return; + return ddl_scanout(ddl); } dbus_gfx_update_sub(ddl, x, y, w, h); @@ -964,6 +980,28 @@ dbus_display_listener_setup_shared_map(DBusDisplayListener *ddl) #endif } +static GDBusMessage * +dbus_filter(GDBusConnection *connection, + GDBusMessage *message, + gboolean incoming, + gpointer user_data) +{ + DBusDisplayListener *ddl = DBUS_DISPLAY_LISTENER(user_data); + guint32 serial; + + if (incoming) { + return message; + } + + serial = g_dbus_message_get_serial(message); + if (serial <= ddl->out_serial_to_discard) { + trace_dbus_filter(serial, ddl->out_serial_to_discard); + return NULL; + } + + return message; +} + DBusDisplayListener * dbus_display_listener_new(const char *bus_name, GDBusConnection *conn, @@ -988,6 +1026,7 @@ dbus_display_listener_new(const char *bus_name, return NULL; } + ddl->dbus_filter = g_dbus_connection_add_filter(conn, dbus_filter, g_object_ref(ddl), g_object_unref); ddl->bus_name = g_strdup(bus_name); ddl->conn = conn; ddl->console = console; diff --git a/ui/kbd-state.c b/ui/kbd-state.c index 62d42a7a22e..52ed28b8a89 100644 --- a/ui/kbd-state.c +++ b/ui/kbd-state.c @@ -117,6 +117,12 @@ void qkbd_state_lift_all_keys(QKbdState *kbd) } } +void qkbd_state_switch_console(QKbdState *kbd, QemuConsole *con) +{ + qkbd_state_lift_all_keys(kbd); + kbd->con = con; +} + void qkbd_state_set_delay(QKbdState *kbd, int delay_ms) { kbd->key_delay_ms = delay_ms; diff --git a/ui/meson.build b/ui/meson.build index 0f09d31c609..a5ce22a678b 100644 --- a/ui/meson.build +++ b/ui/meson.build @@ -25,10 +25,9 @@ endif system_ss.add([spice_headers, files('spice-module.c')]) system_ss.add(when: spice_protocol, if_true: files('vdagent.c')) -system_ss.add(when: 'CONFIG_LINUX', if_true: files( - 'input-linux.c', - 'udmabuf.c', -)) +if host_os == 'linux' + system_ss.add(files('input-linux.c', 'udmabuf.c')) +endif system_ss.add(when: cocoa, if_true: files('cocoa.m')) vnc_ss = ss.source_set() @@ -76,7 +75,7 @@ endif if dbus_display dbus_ss = ss.source_set() env = environment() - env.set('TARGETOS', targetos) + env.set('HOST_OS', host_os) xml = custom_target('dbus-display preprocess', input: 'dbus-display1.xml', output: 'dbus-display1.xml', @@ -91,8 +90,7 @@ if dbus_display '--interface-prefix', 'org.qemu.', '--c-namespace', 'QemuDBus', '--generate-c-code', '@BASENAME@']) - dbus_display1_lib = static_library('dbus-display1', dbus_display1, dependencies: gio) - dbus_display1_dep = declare_dependency(link_with: dbus_display1_lib, sources: dbus_display1[0]) + dbus_display1_dep = declare_dependency(sources: dbus_display1, dependencies: gio) dbus_ss.add(when: [gio, dbus_display1_dep], if_true: [files( 'dbus-chardev.c', @@ -106,7 +104,9 @@ if dbus_display endif if gtk.found() - system_ss.add(when: 'CONFIG_WIN32', if_true: files('win32-kbd-hook.c')) + if host_os == 'windows' + system_ss.add(files('win32-kbd-hook.c')) + endif gtk_ss = ss.source_set() gtk_ss.add(gtk, vte, pixman, files('gtk.c')) @@ -120,7 +120,9 @@ if gtk.found() endif if sdl.found() - system_ss.add(when: 'CONFIG_WIN32', if_true: files('win32-kbd-hook.c')) + if host_os == 'windows' + system_ss.add(files('win32-kbd-hook.c')) + endif sdl_ss = ss.source_set() sdl_ss.add(sdl, sdl_image, pixman, glib, files( diff --git a/ui/spice-core.c b/ui/spice-core.c index db21db2c942..15be640286b 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -42,7 +42,7 @@ /* core bits */ static SpiceServer *spice_server; -static Notifier migration_state; +static NotifierWithReturn migration_state; static const char *auth = "spice"; static char *auth_passwd; static time_t auth_expires = TIME_MAX; @@ -217,12 +217,12 @@ static void channel_event(int event, SpiceChannelEventInfo *info) * not do that. It isn't that easy to fix it in spice and even * when it is fixed we still should cover the already released * spice versions. So detect that we've been called from another - * thread and grab the iothread lock if so before calling qemu + * thread and grab the BQL if so before calling qemu * functions. */ bool need_lock = !qemu_thread_is_self(&me); if (need_lock) { - qemu_mutex_lock_iothread(); + bql_lock(); } if (info->flags & SPICE_CHANNEL_EVENT_FLAG_ADDR_EXT) { @@ -260,7 +260,7 @@ static void channel_event(int event, SpiceChannelEventInfo *info) } if (need_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } qapi_free_SpiceServerInfo(server); @@ -568,24 +568,23 @@ static SpiceInfo *qmp_query_spice_real(Error **errp) return info; } -static void migration_state_notifier(Notifier *notifier, void *data) +static int migration_state_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, Error **errp) { - MigrationState *s = data; - if (!spice_have_target_host) { - return; + return 0; } - if (migration_in_setup(s)) { + if (e->type == MIG_EVENT_PRECOPY_SETUP) { spice_server_migrate_start(spice_server); - } else if (migration_has_finished(s) || - migration_in_postcopy_after_devices(s)) { + } else if (e->type == MIG_EVENT_PRECOPY_DONE) { spice_server_migrate_end(spice_server, true); spice_have_target_host = false; - } else if (migration_has_failed(s)) { + } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { spice_server_migrate_end(spice_server, false); spice_have_target_host = false; } + return 0; } int qemu_spice_migrate_info(const char *hostname, int port, int tls_port, diff --git a/ui/trace-events b/ui/trace-events index 16c35c9fd6f..e6a28943036 100644 --- a/ui/trace-events +++ b/ui/trace-events @@ -161,6 +161,7 @@ dbus_clipboard_register(const char *bus_name) "peer %s" dbus_clipboard_unregister(const char *bus_name) "peer %s" dbus_scanout_texture(uint32_t tex_id, bool backing_y_0_top, uint32_t backing_width, uint32_t backing_height, uint32_t x, uint32_t y, uint32_t w, uint32_t h) "tex_id:%u y0top:%d back:%ux%u %u+%u-%ux%u" dbus_gl_gfx_switch(void *p) "surf: %p" +dbus_filter(unsigned int serial, unsigned int filter) "serial=%u (<= %u)" # egl-helpers.c egl_init_d3d11_device(void *p) "d3d device: %p" diff --git a/ui/vnc.c b/ui/vnc.c index 3b2c71e6537..b3fd78022b1 100644 --- a/ui/vnc.c +++ b/ui/vnc.c @@ -1872,12 +1872,16 @@ static void do_key_event(VncState *vs, int down, int keycode, int sym) /* QEMU console switch */ switch (qcode) { case Q_KEY_CODE_1 ... Q_KEY_CODE_9: /* '1' to '9' keys */ - if (vs->vd->dcl.con == NULL && down && + if (down && qkbd_state_modifier_get(vs->vd->kbd, QKBD_MOD_CTRL) && qkbd_state_modifier_get(vs->vd->kbd, QKBD_MOD_ALT)) { - /* Reset the modifiers sent to the current console */ - qkbd_state_lift_all_keys(vs->vd->kbd); - console_select(qcode - Q_KEY_CODE_1); + QemuConsole *con = qemu_console_lookup_by_index(qcode - Q_KEY_CODE_1); + if (con) { + unregister_displaychangelistener(&vs->vd->dcl); + qkbd_state_switch_console(vs->vd->kbd, con); + vs->vd->dcl.con = con; + register_displaychangelistener(&vs->vd->dcl); + } return; } default: @@ -1931,7 +1935,8 @@ static void do_key_event(VncState *vs, int down, int keycode, int sym) } qkbd_state_key_event(vs->vd->kbd, qcode, down); - if (!qemu_console_is_graphic(NULL)) { + if (!qemu_console_is_graphic(vs->vd->dcl.con)) { + QemuTextConsole *con = QEMU_TEXT_CONSOLE(vs->vd->dcl.con); bool numlock = qkbd_state_modifier_get(vs->vd->kbd, QKBD_MOD_NUMLOCK); bool control = qkbd_state_modifier_get(vs->vd->kbd, QKBD_MOD_CTRL); /* QEMU console emulation */ @@ -1945,88 +1950,88 @@ static void do_key_event(VncState *vs, int down, int keycode, int sym) case 0xb8: /* Right ALT */ break; case 0xc8: - qemu_text_console_put_keysym(NULL, QEMU_KEY_UP); + qemu_text_console_put_keysym(con, QEMU_KEY_UP); break; case 0xd0: - qemu_text_console_put_keysym(NULL, QEMU_KEY_DOWN); + qemu_text_console_put_keysym(con, QEMU_KEY_DOWN); break; case 0xcb: - qemu_text_console_put_keysym(NULL, QEMU_KEY_LEFT); + qemu_text_console_put_keysym(con, QEMU_KEY_LEFT); break; case 0xcd: - qemu_text_console_put_keysym(NULL, QEMU_KEY_RIGHT); + qemu_text_console_put_keysym(con, QEMU_KEY_RIGHT); break; case 0xd3: - qemu_text_console_put_keysym(NULL, QEMU_KEY_DELETE); + qemu_text_console_put_keysym(con, QEMU_KEY_DELETE); break; case 0xc7: - qemu_text_console_put_keysym(NULL, QEMU_KEY_HOME); + qemu_text_console_put_keysym(con, QEMU_KEY_HOME); break; case 0xcf: - qemu_text_console_put_keysym(NULL, QEMU_KEY_END); + qemu_text_console_put_keysym(con, QEMU_KEY_END); break; case 0xc9: - qemu_text_console_put_keysym(NULL, QEMU_KEY_PAGEUP); + qemu_text_console_put_keysym(con, QEMU_KEY_PAGEUP); break; case 0xd1: - qemu_text_console_put_keysym(NULL, QEMU_KEY_PAGEDOWN); + qemu_text_console_put_keysym(con, QEMU_KEY_PAGEDOWN); break; case 0x47: - qemu_text_console_put_keysym(NULL, numlock ? '7' : QEMU_KEY_HOME); + qemu_text_console_put_keysym(con, numlock ? '7' : QEMU_KEY_HOME); break; case 0x48: - qemu_text_console_put_keysym(NULL, numlock ? '8' : QEMU_KEY_UP); + qemu_text_console_put_keysym(con, numlock ? '8' : QEMU_KEY_UP); break; case 0x49: - qemu_text_console_put_keysym(NULL, numlock ? '9' : QEMU_KEY_PAGEUP); + qemu_text_console_put_keysym(con, numlock ? '9' : QEMU_KEY_PAGEUP); break; case 0x4b: - qemu_text_console_put_keysym(NULL, numlock ? '4' : QEMU_KEY_LEFT); + qemu_text_console_put_keysym(con, numlock ? '4' : QEMU_KEY_LEFT); break; case 0x4c: - qemu_text_console_put_keysym(NULL, '5'); + qemu_text_console_put_keysym(con, '5'); break; case 0x4d: - qemu_text_console_put_keysym(NULL, numlock ? '6' : QEMU_KEY_RIGHT); + qemu_text_console_put_keysym(con, numlock ? '6' : QEMU_KEY_RIGHT); break; case 0x4f: - qemu_text_console_put_keysym(NULL, numlock ? '1' : QEMU_KEY_END); + qemu_text_console_put_keysym(con, numlock ? '1' : QEMU_KEY_END); break; case 0x50: - qemu_text_console_put_keysym(NULL, numlock ? '2' : QEMU_KEY_DOWN); + qemu_text_console_put_keysym(con, numlock ? '2' : QEMU_KEY_DOWN); break; case 0x51: - qemu_text_console_put_keysym(NULL, numlock ? '3' : QEMU_KEY_PAGEDOWN); + qemu_text_console_put_keysym(con, numlock ? '3' : QEMU_KEY_PAGEDOWN); break; case 0x52: - qemu_text_console_put_keysym(NULL, '0'); + qemu_text_console_put_keysym(con, '0'); break; case 0x53: - qemu_text_console_put_keysym(NULL, numlock ? '.' : QEMU_KEY_DELETE); + qemu_text_console_put_keysym(con, numlock ? '.' : QEMU_KEY_DELETE); break; case 0xb5: - qemu_text_console_put_keysym(NULL, '/'); + qemu_text_console_put_keysym(con, '/'); break; case 0x37: - qemu_text_console_put_keysym(NULL, '*'); + qemu_text_console_put_keysym(con, '*'); break; case 0x4a: - qemu_text_console_put_keysym(NULL, '-'); + qemu_text_console_put_keysym(con, '-'); break; case 0x4e: - qemu_text_console_put_keysym(NULL, '+'); + qemu_text_console_put_keysym(con, '+'); break; case 0x9c: - qemu_text_console_put_keysym(NULL, '\n'); + qemu_text_console_put_keysym(con, '\n'); break; default: if (control) { - qemu_text_console_put_keysym(NULL, sym & 0x1f); + qemu_text_console_put_keysym(con, sym & 0x1f); } else { - qemu_text_console_put_keysym(NULL, sym); + qemu_text_console_put_keysym(con, sym); } break; } @@ -2044,7 +2049,7 @@ static void key_event(VncState *vs, int down, uint32_t sym) int keycode; int lsym = sym; - if (lsym >= 'A' && lsym <= 'Z' && qemu_console_is_graphic(NULL)) { + if (lsym >= 'A' && lsym <= 'Z' && qemu_console_is_graphic(vs->vd->dcl.con)) { lsym = lsym - 'A' + 'a'; } @@ -2144,16 +2149,16 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings) vs->vnc_encoding = enc; break; case VNC_ENCODING_HEXTILE: - vs->features |= VNC_FEATURE_HEXTILE_MASK; + vnc_set_feature(vs, VNC_FEATURE_HEXTILE); vs->vnc_encoding = enc; break; case VNC_ENCODING_TIGHT: - vs->features |= VNC_FEATURE_TIGHT_MASK; + vnc_set_feature(vs, VNC_FEATURE_TIGHT); vs->vnc_encoding = enc; break; #ifdef CONFIG_PNG case VNC_ENCODING_TIGHT_PNG: - vs->features |= VNC_FEATURE_TIGHT_PNG_MASK; + vnc_set_feature(vs, VNC_FEATURE_TIGHT_PNG); vs->vnc_encoding = enc; break; #endif @@ -2163,57 +2168,57 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings) * So prioritize ZRLE, even if the client hints that it prefers * ZLIB. */ - if ((vs->features & VNC_FEATURE_ZRLE_MASK) == 0) { - vs->features |= VNC_FEATURE_ZLIB_MASK; + if (!vnc_has_feature(vs, VNC_FEATURE_ZRLE)) { + vnc_set_feature(vs, VNC_FEATURE_ZLIB); vs->vnc_encoding = enc; } break; case VNC_ENCODING_ZRLE: - vs->features |= VNC_FEATURE_ZRLE_MASK; + vnc_set_feature(vs, VNC_FEATURE_ZRLE); vs->vnc_encoding = enc; break; case VNC_ENCODING_ZYWRLE: - vs->features |= VNC_FEATURE_ZYWRLE_MASK; + vnc_set_feature(vs, VNC_FEATURE_ZYWRLE); vs->vnc_encoding = enc; break; case VNC_ENCODING_DESKTOPRESIZE: - vs->features |= VNC_FEATURE_RESIZE_MASK; + vnc_set_feature(vs, VNC_FEATURE_RESIZE); break; case VNC_ENCODING_DESKTOP_RESIZE_EXT: - vs->features |= VNC_FEATURE_RESIZE_EXT_MASK; + vnc_set_feature(vs, VNC_FEATURE_RESIZE_EXT); break; case VNC_ENCODING_POINTER_TYPE_CHANGE: - vs->features |= VNC_FEATURE_POINTER_TYPE_CHANGE_MASK; + vnc_set_feature(vs, VNC_FEATURE_POINTER_TYPE_CHANGE); break; case VNC_ENCODING_RICH_CURSOR: - vs->features |= VNC_FEATURE_RICH_CURSOR_MASK; + vnc_set_feature(vs, VNC_FEATURE_RICH_CURSOR); break; case VNC_ENCODING_ALPHA_CURSOR: - vs->features |= VNC_FEATURE_ALPHA_CURSOR_MASK; + vnc_set_feature(vs, VNC_FEATURE_ALPHA_CURSOR); break; case VNC_ENCODING_EXT_KEY_EVENT: send_ext_key_event_ack(vs); break; case VNC_ENCODING_AUDIO: if (vs->vd->audio_state) { - vs->features |= VNC_FEATURE_AUDIO_MASK; + vnc_set_feature(vs, VNC_FEATURE_AUDIO); send_ext_audio_ack(vs); } break; case VNC_ENCODING_WMVi: - vs->features |= VNC_FEATURE_WMVI_MASK; + vnc_set_feature(vs, VNC_FEATURE_WMVI); break; case VNC_ENCODING_LED_STATE: - vs->features |= VNC_FEATURE_LED_STATE_MASK; + vnc_set_feature(vs, VNC_FEATURE_LED_STATE); break; case VNC_ENCODING_XVP: if (vs->vd->power_control) { - vs->features |= VNC_FEATURE_XVP_MASK; + vnc_set_feature(vs, VNC_FEATURE_XVP); send_xvp_message(vs, VNC_XVP_CODE_INIT); } break; case VNC_ENCODING_CLIPBOARD_EXT: - vs->features |= VNC_FEATURE_CLIPBOARD_EXT_MASK; + vnc_set_feature(vs, VNC_FEATURE_CLIPBOARD_EXT); vnc_server_cut_text_caps(vs); break; case VNC_ENCODING_COMPRESSLEVEL0 ... VNC_ENCODING_COMPRESSLEVEL0 + 9: @@ -4205,7 +4210,7 @@ void vnc_display_open(const char *id, Error **errp) goto fail; } } else { - con = NULL; + con = qemu_console_lookup_default(); } if (con != vd->dcl.con) { diff --git a/ui/vnc.h b/ui/vnc.h index 96d19dce199..4521dc88f79 100644 --- a/ui/vnc.h +++ b/ui/vnc.h @@ -467,23 +467,6 @@ enum VncFeatures { VNC_FEATURE_AUDIO, }; -#define VNC_FEATURE_RESIZE_MASK (1 << VNC_FEATURE_RESIZE) -#define VNC_FEATURE_RESIZE_EXT_MASK (1 << VNC_FEATURE_RESIZE_EXT) -#define VNC_FEATURE_HEXTILE_MASK (1 << VNC_FEATURE_HEXTILE) -#define VNC_FEATURE_POINTER_TYPE_CHANGE_MASK (1 << VNC_FEATURE_POINTER_TYPE_CHANGE) -#define VNC_FEATURE_WMVI_MASK (1 << VNC_FEATURE_WMVI) -#define VNC_FEATURE_TIGHT_MASK (1 << VNC_FEATURE_TIGHT) -#define VNC_FEATURE_ZLIB_MASK (1 << VNC_FEATURE_ZLIB) -#define VNC_FEATURE_RICH_CURSOR_MASK (1 << VNC_FEATURE_RICH_CURSOR) -#define VNC_FEATURE_ALPHA_CURSOR_MASK (1 << VNC_FEATURE_ALPHA_CURSOR) -#define VNC_FEATURE_TIGHT_PNG_MASK (1 << VNC_FEATURE_TIGHT_PNG) -#define VNC_FEATURE_ZRLE_MASK (1 << VNC_FEATURE_ZRLE) -#define VNC_FEATURE_ZYWRLE_MASK (1 << VNC_FEATURE_ZYWRLE) -#define VNC_FEATURE_LED_STATE_MASK (1 << VNC_FEATURE_LED_STATE) -#define VNC_FEATURE_XVP_MASK (1 << VNC_FEATURE_XVP) -#define VNC_FEATURE_CLIPBOARD_EXT_MASK (1 << VNC_FEATURE_CLIPBOARD_EXT) -#define VNC_FEATURE_AUDIO_MASK (1 << VNC_FEATURE_AUDIO) - /* Client -> Server message IDs */ #define VNC_MSG_CLIENT_SET_PIXEL_FORMAT 0 @@ -599,6 +582,11 @@ static inline uint32_t vnc_has_feature(VncState *vs, int feature) { return (vs->features & (1 << feature)); } +static inline void vnc_set_feature(VncState *vs, enum VncFeatures feature) +{ + vs->features |= (1 << feature); +} + /* Framebuffer */ void vnc_framebuffer_update(VncState *vs, int x, int y, int w, int h, int32_t encoding); diff --git a/util/aio-posix.c b/util/aio-posix.c index 7f2c99729d4..266c9dd35fa 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -777,8 +777,7 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, aio_notify(ctx); } -void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, - Error **errp) +void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch) { /* * No thread synchronization here, it doesn't matter if an incorrect value diff --git a/util/aio-win32.c b/util/aio-win32.c index 948ef47a4d3..d144f9391fb 100644 --- a/util/aio-win32.c +++ b/util/aio-win32.c @@ -438,7 +438,6 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, } } -void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, - Error **errp) +void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch) { } diff --git a/util/async.c b/util/async.c index 8f90ddc3047..0467890052a 100644 --- a/util/async.c +++ b/util/async.c @@ -94,13 +94,15 @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) } aio_notify(ctx); - /* - * Workaround for record/replay. - * vCPU execution should be suspended when new BH is set. - * This is needed to avoid guest timeouts caused - * by the long cycles of the execution. - */ - icount_notify_exit(); + if (unlikely(icount_enabled())) { + /* + * Workaround for record/replay. + * vCPU execution should be suspended when new BH is set. + * This is needed to avoid guest timeouts caused + * by the long cycles of the execution. + */ + icount_notify_exit(); + } } /* Only called from aio_bh_poll() and aio_ctx_finalize() */ @@ -562,12 +564,10 @@ static void co_schedule_bh_cb(void *opaque) Coroutine *co = QSLIST_FIRST(&straight); QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); trace_aio_co_schedule_bh_cb(ctx, co); - aio_context_acquire(ctx); /* Protected by write barrier in qemu_aio_coroutine_enter */ qatomic_set(&co->scheduled, NULL); qemu_aio_coroutine_enter(ctx, co); - aio_context_release(ctx); } } @@ -707,9 +707,7 @@ void aio_co_enter(AioContext *ctx, Coroutine *co) assert(self != co); QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); } else { - aio_context_acquire(ctx); qemu_aio_coroutine_enter(ctx, co); - aio_context_release(ctx); } } @@ -723,16 +721,6 @@ void aio_context_unref(AioContext *ctx) g_source_unref(&ctx->source); } -void aio_context_acquire(AioContext *ctx) -{ - qemu_rec_mutex_lock(&ctx->lock); -} - -void aio_context_release(AioContext *ctx) -{ - qemu_rec_mutex_unlock(&ctx->lock); -} - QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext) AioContext *qemu_get_current_aio_context(void) @@ -741,7 +729,7 @@ AioContext *qemu_get_current_aio_context(void) if (ctx) { return ctx; } - if (qemu_mutex_iothread_locked()) { + if (bql_locked()) { /* Possibly in a vCPU thread. */ return qemu_get_aio_context(); } diff --git a/util/chardev_open.c b/util/chardev_open.c new file mode 100644 index 00000000000..f7764297882 --- /dev/null +++ b/util/chardev_open.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019, Mellanox Technologies. All rights reserved. + * Copyright (C) 2023 Intel Corporation. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: Yi Liu + * + * Copied from + * https://github.com/linux-rdma/rdma-core/blob/master/util/open_cdev.c + * + */ + +#include "qemu/osdep.h" +#include "qemu/chardev_open.h" + +static int open_cdev_internal(const char *path, dev_t cdev) +{ + struct stat st; + int fd; + + fd = qemu_open_old(path, O_RDWR); + if (fd == -1) { + return -1; + } + if (fstat(fd, &st) || !S_ISCHR(st.st_mode) || + (cdev != 0 && st.st_rdev != cdev)) { + close(fd); + return -1; + } + return fd; +} + +static int open_cdev_robust(dev_t cdev) +{ + g_autofree char *devpath = NULL; + + /* + * This assumes that udev is being used and is creating the /dev/char/ + * symlinks. + */ + devpath = g_strdup_printf("/dev/char/%u:%u", major(cdev), minor(cdev)); + return open_cdev_internal(devpath, cdev); +} + +int open_cdev(const char *devpath, dev_t cdev) +{ + int fd; + + fd = open_cdev_internal(devpath, cdev); + if (fd == -1 && cdev != 0) { + return open_cdev_robust(cdev); + } + return fd; +} diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c index 16054c5ede3..b0d68bdc44d 100644 --- a/util/fdmon-io_uring.c +++ b/util/fdmon-io_uring.c @@ -180,7 +180,7 @@ static void add_poll_remove_sqe(AioContext *ctx, AioHandler *node) struct io_uring_sqe *sqe = get_sqe(ctx); #ifdef LIBURING_HAVE_DATA64 - io_uring_prep_poll_remove(sqe, (__u64)(uintptr_t)node); + io_uring_prep_poll_remove(sqe, (uintptr_t)node); #else io_uring_prep_poll_remove(sqe, node); #endif diff --git a/util/fifo8.c b/util/fifo8.c index d4d1c135e03..4e01b532d9d 100644 --- a/util/fifo8.c +++ b/util/fifo8.c @@ -66,19 +66,37 @@ uint8_t fifo8_pop(Fifo8 *fifo) return ret; } -const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num) +static const uint8_t *fifo8_peekpop_buf(Fifo8 *fifo, uint32_t max, + uint32_t *numptr, bool do_pop) { uint8_t *ret; + uint32_t num; assert(max > 0 && max <= fifo->num); - *num = MIN(fifo->capacity - fifo->head, max); + num = MIN(fifo->capacity - fifo->head, max); ret = &fifo->data[fifo->head]; - fifo->head += *num; - fifo->head %= fifo->capacity; - fifo->num -= *num; + + if (do_pop) { + fifo->head += num; + fifo->head %= fifo->capacity; + fifo->num -= num; + } + if (numptr) { + *numptr = num; + } return ret; } +const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr) +{ + return fifo8_peekpop_buf(fifo, max, numptr, false); +} + +const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr) +{ + return fifo8_peekpop_buf(fifo, max, numptr, true); +} + void fifo8_reset(Fifo8 *fifo) { fifo->num = 0; @@ -109,7 +127,7 @@ const VMStateDescription vmstate_fifo8 = { .name = "Fifo8", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VBUFFER_UINT32(data, Fifo8, 1, NULL, capacity), VMSTATE_UINT32(head, Fifo8), VMSTATE_UINT32(num, Fifo8), diff --git a/util/main-loop.c b/util/main-loop.c index 797b640c415..a0386cfeb60 100644 --- a/util/main-loop.c +++ b/util/main-loop.c @@ -192,10 +192,7 @@ static void main_loop_update_params(EventLoopBase *base, Error **errp) return; } - aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp); - if (*errp) { - return; - } + aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch); aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min, base->thread_pool_max, errp); @@ -302,13 +299,13 @@ static int os_host_main_loop_wait(int64_t timeout) glib_pollfds_fill(&timeout); - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_unlock(); ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout); replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); glib_pollfds_poll(); @@ -517,7 +514,7 @@ static int os_host_main_loop_wait(int64_t timeout) poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout); - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_unlock(); @@ -525,7 +522,7 @@ static int os_host_main_loop_wait(int64_t timeout) replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); if (g_poll_ret > 0) { for (i = 0; i < w->num; i++) { w->revents[i] = poll_fds[n_poll_fds + i].revents; diff --git a/util/meson.build b/util/meson.build index c2322ef6e71..0ef9886be04 100644 --- a/util/meson.build +++ b/util/meson.build @@ -3,28 +3,31 @@ util_ss.add(files('thread-context.c'), numa) if not config_host_data.get('CONFIG_ATOMIC64') util_ss.add(files('atomic64.c')) endif -util_ss.add(when: 'CONFIG_POSIX', if_true: files('aio-posix.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('fdmon-poll.c')) -if config_host_data.get('CONFIG_EPOLL_CREATE1') - util_ss.add(files('fdmon-epoll.c')) +if host_os != 'windows' + util_ss.add(files('aio-posix.c')) + util_ss.add(files('fdmon-poll.c')) + if config_host_data.get('CONFIG_EPOLL_CREATE1') + util_ss.add(files('fdmon-epoll.c')) + endif + util_ss.add(files('compatfd.c')) + util_ss.add(files('event_notifier-posix.c')) + util_ss.add(files('mmap-alloc.c')) + freebsd_dep = [] + if host_os == 'freebsd' + freebsd_dep = util + endif + util_ss.add(files('oslib-posix.c'), freebsd_dep) + util_ss.add(files('qemu-thread-posix.c')) + util_ss.add(files('memfd.c')) + util_ss.add(files('drm.c')) +else + util_ss.add(files('aio-win32.c')) + util_ss.add(files('event_notifier-win32.c')) + util_ss.add(files('oslib-win32.c')) + util_ss.add(files('qemu-thread-win32.c')) + util_ss.add(winmm, pathcch) endif util_ss.add(when: linux_io_uring, if_true: files('fdmon-io_uring.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('compatfd.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('event_notifier-posix.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('mmap-alloc.c')) -freebsd_dep = [] -if targetos == 'freebsd' - freebsd_dep = util -endif -util_ss.add(when: 'CONFIG_POSIX', if_true: [files('oslib-posix.c'), freebsd_dep]) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('qemu-thread-posix.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('memfd.c')) -util_ss.add(when: 'CONFIG_WIN32', if_true: files('aio-win32.c')) -util_ss.add(when: 'CONFIG_WIN32', if_true: files('event_notifier-win32.c')) -util_ss.add(when: 'CONFIG_WIN32', if_true: files('oslib-win32.c')) -util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c')) -util_ss.add(when: 'CONFIG_WIN32', if_true: winmm) -util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch) if glib_has_gslice util_ss.add(files('qtree.c')) endif @@ -56,7 +59,6 @@ util_ss.add(files('reserved-region.c')) util_ss.add(files('stats64.c')) util_ss.add(files('systemd.c')) util_ss.add(files('transactions.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('drm.c')) util_ss.add(files('guest-random.c')) util_ss.add(files('yank.c')) util_ss.add(files('int128.c')) @@ -71,7 +73,9 @@ endif if have_system util_ss.add(files('crc-ccitt.c')) util_ss.add(when: gio, if_true: files('dbus.c')) - util_ss.add(when: 'CONFIG_LINUX', if_true: files('userfaultfd.c')) + if host_os == 'linux' + util_ss.add(files('userfaultfd.c')) + endif endif if have_block or have_ga @@ -92,9 +96,6 @@ if have_block util_ss.add(files('iova-tree.c')) util_ss.add(files('iov.c', 'uri.c')) util_ss.add(files('nvdimm-utils.c')) - util_ss.add(when: 'CONFIG_LINUX', if_true: [ - files('vhost-user-server.c'), vhost_user - ]) util_ss.add(files('block-helpers.c')) util_ss.add(files('qemu-coroutine-sleep.c')) util_ss.add(files('qemu-co-shared-resource.c')) @@ -103,11 +104,19 @@ if have_block util_ss.add(files('throttle.c')) util_ss.add(files('timed-average.c')) if config_host_data.get('CONFIG_INOTIFY1') - util_ss.add(files('filemonitor-inotify.c')) + freebsd_dep = [] + if host_os == 'freebsd' + freebsd_dep = inotify + endif + util_ss.add(files('filemonitor-inotify.c'), freebsd_dep) else util_ss.add(files('filemonitor-stub.c')) endif - util_ss.add(when: 'CONFIG_LINUX', if_true: files('vfio-helpers.c')) + if host_os == 'linux' + util_ss.add(files('vhost-user-server.c'), vhost_user) + util_ss.add(files('vfio-helpers.c')) + util_ss.add(files('chardev_open.c')) + endif endif if cpu == 'aarch64' diff --git a/util/notify.c b/util/notify.c index 76bab212ae9..c6e158ffb33 100644 --- a/util/notify.c +++ b/util/notify.c @@ -61,13 +61,14 @@ void notifier_with_return_remove(NotifierWithReturn *notifier) QLIST_REMOVE(notifier, node); } -int notifier_with_return_list_notify(NotifierWithReturnList *list, void *data) +int notifier_with_return_list_notify(NotifierWithReturnList *list, void *data, + Error **errp) { NotifierWithReturn *notifier, *next; int ret = 0; QLIST_FOREACH_SAFE(notifier, &list->notifiers, node, next) { - ret = notifier->notify(notifier, data); + ret = notifier->notify(notifier, data, errp); if (ret != 0) { break; } diff --git a/util/oslib-posix.c b/util/oslib-posix.c index e86fd64e099..e76441695bd 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -42,6 +42,7 @@ #include "qemu/cutils.h" #include "qemu/units.h" #include "qemu/thread-context.h" +#include "qemu/main-loop.h" #ifdef CONFIG_LINUX #include @@ -63,11 +64,15 @@ struct MemsetThread; +static QLIST_HEAD(, MemsetContext) memset_contexts = + QLIST_HEAD_INITIALIZER(memset_contexts); + typedef struct MemsetContext { bool all_threads_created; bool any_thread_failed; struct MemsetThread *threads; int num_threads; + QLIST_ENTRY(MemsetContext) next; } MemsetContext; struct MemsetThread { @@ -412,19 +417,44 @@ static inline int get_memset_num_threads(size_t hpagesize, size_t numpages, return ret; } +static int wait_and_free_mem_prealloc_context(MemsetContext *context) +{ + int i, ret = 0, tmp; + + for (i = 0; i < context->num_threads; i++) { + tmp = (uintptr_t)qemu_thread_join(&context->threads[i].pgthread); + + if (tmp) { + ret = tmp; + } + } + g_free(context->threads); + g_free(context); + return ret; +} + static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, - int max_threads, ThreadContext *tc, + int max_threads, ThreadContext *tc, bool async, bool use_madv_populate_write) { static gsize initialized = 0; - MemsetContext context = { - .num_threads = get_memset_num_threads(hpagesize, numpages, max_threads), - }; + MemsetContext *context = g_malloc0(sizeof(MemsetContext)); size_t numpages_per_thread, leftover; void *(*touch_fn)(void *); - int ret = 0, i = 0; + int ret, i = 0; char *addr = area; + /* + * Asynchronous preallocation is only allowed when using MADV_POPULATE_WRITE + * and prealloc context for thread placement. + */ + if (!use_madv_populate_write || !tc) { + async = false; + } + + context->num_threads = + get_memset_num_threads(hpagesize, numpages, max_threads); + if (g_once_init_enter(&initialized)) { qemu_mutex_init(&page_mutex); qemu_cond_init(&page_cond); @@ -432,63 +462,104 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, } if (use_madv_populate_write) { - /* Avoid creating a single thread for MADV_POPULATE_WRITE */ - if (context.num_threads == 1) { + /* + * Avoid creating a single thread for MADV_POPULATE_WRITE when + * preallocating synchronously. + */ + if (context->num_threads == 1 && !async) { + ret = 0; if (qemu_madvise(area, hpagesize * numpages, QEMU_MADV_POPULATE_WRITE)) { - return -errno; + ret = -errno; } - return 0; + g_free(context); + return ret; } touch_fn = do_madv_populate_write_pages; } else { touch_fn = do_touch_pages; } - context.threads = g_new0(MemsetThread, context.num_threads); - numpages_per_thread = numpages / context.num_threads; - leftover = numpages % context.num_threads; - for (i = 0; i < context.num_threads; i++) { - context.threads[i].addr = addr; - context.threads[i].numpages = numpages_per_thread + (i < leftover); - context.threads[i].hpagesize = hpagesize; - context.threads[i].context = &context; + context->threads = g_new0(MemsetThread, context->num_threads); + numpages_per_thread = numpages / context->num_threads; + leftover = numpages % context->num_threads; + for (i = 0; i < context->num_threads; i++) { + context->threads[i].addr = addr; + context->threads[i].numpages = numpages_per_thread + (i < leftover); + context->threads[i].hpagesize = hpagesize; + context->threads[i].context = context; if (tc) { - thread_context_create_thread(tc, &context.threads[i].pgthread, + thread_context_create_thread(tc, &context->threads[i].pgthread, "touch_pages", - touch_fn, &context.threads[i], + touch_fn, &context->threads[i], QEMU_THREAD_JOINABLE); } else { - qemu_thread_create(&context.threads[i].pgthread, "touch_pages", - touch_fn, &context.threads[i], + qemu_thread_create(&context->threads[i].pgthread, "touch_pages", + touch_fn, &context->threads[i], QEMU_THREAD_JOINABLE); } - addr += context.threads[i].numpages * hpagesize; + addr += context->threads[i].numpages * hpagesize; + } + + if (async) { + /* + * async requests currently require the BQL. Add it to the list and kick + * preallocation off during qemu_finish_async_prealloc_mem(). + */ + assert(bql_locked()); + QLIST_INSERT_HEAD(&memset_contexts, context, next); + return 0; } if (!use_madv_populate_write) { - sigbus_memset_context = &context; + sigbus_memset_context = context; } qemu_mutex_lock(&page_mutex); - context.all_threads_created = true; + context->all_threads_created = true; qemu_cond_broadcast(&page_cond); qemu_mutex_unlock(&page_mutex); - for (i = 0; i < context.num_threads; i++) { - int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread); + ret = wait_and_free_mem_prealloc_context(context); + + if (!use_madv_populate_write) { + sigbus_memset_context = NULL; + } + return ret; +} + +bool qemu_finish_async_prealloc_mem(Error **errp) +{ + int ret = 0, tmp; + MemsetContext *context, *next_context; + + /* Waiting for preallocation requires the BQL. */ + assert(bql_locked()); + if (QLIST_EMPTY(&memset_contexts)) { + return true; + } + qemu_mutex_lock(&page_mutex); + QLIST_FOREACH(context, &memset_contexts, next) { + context->all_threads_created = true; + } + qemu_cond_broadcast(&page_cond); + qemu_mutex_unlock(&page_mutex); + + QLIST_FOREACH_SAFE(context, &memset_contexts, next, next_context) { + QLIST_REMOVE(context, next); + tmp = wait_and_free_mem_prealloc_context(context); if (tmp) { ret = tmp; } } - if (!use_madv_populate_write) { - sigbus_memset_context = NULL; + if (ret) { + error_setg_errno(errp, -ret, + "qemu_prealloc_mem: preallocating memory failed"); + return false; } - g_free(context.threads); - - return ret; + return true; } static bool madv_populate_write_possible(char *area, size_t pagesize) @@ -497,8 +568,8 @@ static bool madv_populate_write_possible(char *area, size_t pagesize) errno != EINVAL; } -void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, - ThreadContext *tc, Error **errp) +bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, + ThreadContext *tc, bool async, Error **errp) { static gsize initialized; int ret; @@ -506,6 +577,7 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, size_t numpages = DIV_ROUND_UP(sz, hpagesize); bool use_madv_populate_write; struct sigaction act; + bool rv = true; /* * Sense on every invocation, as MADV_POPULATE_WRITE cannot be used for @@ -534,16 +606,17 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, qemu_mutex_unlock(&sigbus_mutex); error_setg_errno(errp, errno, "qemu_prealloc_mem: failed to install signal handler"); - return; + return false; } } /* touch pages simultaneously */ - ret = touch_all_pages(area, hpagesize, numpages, max_threads, tc, + ret = touch_all_pages(area, hpagesize, numpages, max_threads, tc, async, use_madv_populate_write); if (ret) { error_setg_errno(errp, -ret, "qemu_prealloc_mem: preallocating memory failed"); + rv = false; } if (!use_madv_populate_write) { @@ -555,6 +628,7 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, } qemu_mutex_unlock(&sigbus_mutex); } + return rv; } char *qemu_get_pid_name(pid_t pid) diff --git a/util/oslib-win32.c b/util/oslib-win32.c index 55b0189dc30..b623830d624 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -264,8 +264,8 @@ int getpagesize(void) return system_info.dwPageSize; } -void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, - ThreadContext *tc, Error **errp) +bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, + ThreadContext *tc, bool async, Error **errp) { int i; size_t pagesize = qemu_real_host_page_size(); @@ -274,6 +274,14 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, for (i = 0; i < sz / pagesize; i++) { memset(area + pagesize * i, 0, 1); } + + return true; +} + +bool qemu_finish_async_prealloc_mem(Error **errp) +{ + /* async prealloc not supported, there is nothing to finish */ + return true; } char *qemu_get_pid_name(pid_t pid) diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c index 5fd2dbaf8bb..eb4eebefdfb 100644 --- a/util/qemu-coroutine.c +++ b/util/qemu-coroutine.c @@ -18,39 +18,200 @@ #include "qemu/atomic.h" #include "qemu/coroutine_int.h" #include "qemu/coroutine-tls.h" +#include "qemu/cutils.h" #include "block/aio.h" -/** - * The minimal batch size is always 64, coroutines from the release_pool are - * reused as soon as there are 64 coroutines in it. The maximum pool size starts - * with 64 and is increased on demand so that coroutines are not deleted even if - * they are not immediately reused. - */ enum { - POOL_MIN_BATCH_SIZE = 64, - POOL_INITIAL_MAX_SIZE = 64, + COROUTINE_POOL_BATCH_MAX_SIZE = 128, }; -/** Free list to speed up creation */ -static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool); -static unsigned int pool_max_size = POOL_INITIAL_MAX_SIZE; -static unsigned int release_pool_size; +/* + * Coroutine creation and deletion is expensive so a pool of unused coroutines + * is kept as a cache. When the pool has coroutines available, they are + * recycled instead of creating new ones from scratch. Coroutines are added to + * the pool upon termination. + * + * The pool is global but each thread maintains a small local pool to avoid + * global pool contention. Threads fetch and return batches of coroutines from + * the global pool to maintain their local pool. The local pool holds up to two + * batches whereas the maximum size of the global pool is controlled by the + * qemu_coroutine_inc_pool_size() API. + * + * .-----------------------------------. + * | Batch 1 | Batch 2 | Batch 3 | ... | global_pool + * `-----------------------------------' + * + * .-------------------. + * | Batch 1 | Batch 2 | per-thread local_pool (maximum 2 batches) + * `-------------------' + */ +typedef struct CoroutinePoolBatch { + /* Batches are kept in a list */ + QSLIST_ENTRY(CoroutinePoolBatch) next; + + /* This batch holds up to @COROUTINE_POOL_BATCH_MAX_SIZE coroutines */ + QSLIST_HEAD(, Coroutine) list; + unsigned int size; +} CoroutinePoolBatch; + +typedef QSLIST_HEAD(, CoroutinePoolBatch) CoroutinePool; + +/* Host operating system limit on number of pooled coroutines */ +static unsigned int global_pool_hard_max_size; -typedef QSLIST_HEAD(, Coroutine) CoroutineQSList; -QEMU_DEFINE_STATIC_CO_TLS(CoroutineQSList, alloc_pool); -QEMU_DEFINE_STATIC_CO_TLS(unsigned int, alloc_pool_size); -QEMU_DEFINE_STATIC_CO_TLS(Notifier, coroutine_pool_cleanup_notifier); +static QemuMutex global_pool_lock; /* protects the following variables */ +static CoroutinePool global_pool = QSLIST_HEAD_INITIALIZER(global_pool); +static unsigned int global_pool_size; +static unsigned int global_pool_max_size = COROUTINE_POOL_BATCH_MAX_SIZE; -static void coroutine_pool_cleanup(Notifier *n, void *value) +QEMU_DEFINE_STATIC_CO_TLS(CoroutinePool, local_pool); +QEMU_DEFINE_STATIC_CO_TLS(Notifier, local_pool_cleanup_notifier); + +static CoroutinePoolBatch *coroutine_pool_batch_new(void) +{ + CoroutinePoolBatch *batch = g_new(CoroutinePoolBatch, 1); + + QSLIST_INIT(&batch->list); + batch->size = 0; + return batch; +} + +static void coroutine_pool_batch_delete(CoroutinePoolBatch *batch) { Coroutine *co; Coroutine *tmp; - CoroutineQSList *alloc_pool = get_ptr_alloc_pool(); - QSLIST_FOREACH_SAFE(co, alloc_pool, pool_next, tmp) { - QSLIST_REMOVE_HEAD(alloc_pool, pool_next); + QSLIST_FOREACH_SAFE(co, &batch->list, pool_next, tmp) { + QSLIST_REMOVE_HEAD(&batch->list, pool_next); qemu_coroutine_delete(co); } + g_free(batch); +} + +static void local_pool_cleanup(Notifier *n, void *value) +{ + CoroutinePool *local_pool = get_ptr_local_pool(); + CoroutinePoolBatch *batch; + CoroutinePoolBatch *tmp; + + QSLIST_FOREACH_SAFE(batch, local_pool, next, tmp) { + QSLIST_REMOVE_HEAD(local_pool, next); + coroutine_pool_batch_delete(batch); + } +} + +/* Ensure the atexit notifier is registered */ +static void local_pool_cleanup_init_once(void) +{ + Notifier *notifier = get_ptr_local_pool_cleanup_notifier(); + if (!notifier->notify) { + notifier->notify = local_pool_cleanup; + qemu_thread_atexit_add(notifier); + } +} + +/* Helper to get the next unused coroutine from the local pool */ +static Coroutine *coroutine_pool_get_local(void) +{ + CoroutinePool *local_pool = get_ptr_local_pool(); + CoroutinePoolBatch *batch = QSLIST_FIRST(local_pool); + Coroutine *co; + + if (unlikely(!batch)) { + return NULL; + } + + co = QSLIST_FIRST(&batch->list); + QSLIST_REMOVE_HEAD(&batch->list, pool_next); + batch->size--; + + if (batch->size == 0) { + QSLIST_REMOVE_HEAD(local_pool, next); + coroutine_pool_batch_delete(batch); + } + return co; +} + +/* Get the next batch from the global pool */ +static void coroutine_pool_refill_local(void) +{ + CoroutinePool *local_pool = get_ptr_local_pool(); + CoroutinePoolBatch *batch; + + WITH_QEMU_LOCK_GUARD(&global_pool_lock) { + batch = QSLIST_FIRST(&global_pool); + + if (batch) { + QSLIST_REMOVE_HEAD(&global_pool, next); + global_pool_size -= batch->size; + } + } + + if (batch) { + QSLIST_INSERT_HEAD(local_pool, batch, next); + local_pool_cleanup_init_once(); + } +} + +/* Add a batch of coroutines to the global pool */ +static void coroutine_pool_put_global(CoroutinePoolBatch *batch) +{ + WITH_QEMU_LOCK_GUARD(&global_pool_lock) { + unsigned int max = MIN(global_pool_max_size, + global_pool_hard_max_size); + + if (global_pool_size < max) { + QSLIST_INSERT_HEAD(&global_pool, batch, next); + + /* Overshooting the max pool size is allowed */ + global_pool_size += batch->size; + return; + } + } + + /* The global pool was full, so throw away this batch */ + coroutine_pool_batch_delete(batch); +} + +/* Get the next unused coroutine from the pool or return NULL */ +static Coroutine *coroutine_pool_get(void) +{ + Coroutine *co; + + co = coroutine_pool_get_local(); + if (!co) { + coroutine_pool_refill_local(); + co = coroutine_pool_get_local(); + } + return co; +} + +static void coroutine_pool_put(Coroutine *co) +{ + CoroutinePool *local_pool = get_ptr_local_pool(); + CoroutinePoolBatch *batch = QSLIST_FIRST(local_pool); + + if (unlikely(!batch)) { + batch = coroutine_pool_batch_new(); + QSLIST_INSERT_HEAD(local_pool, batch, next); + local_pool_cleanup_init_once(); + } + + if (unlikely(batch->size >= COROUTINE_POOL_BATCH_MAX_SIZE)) { + CoroutinePoolBatch *next = QSLIST_NEXT(batch, next); + + /* Is the local pool full? */ + if (next) { + QSLIST_REMOVE_HEAD(local_pool, next); + coroutine_pool_put_global(batch); + } + + batch = coroutine_pool_batch_new(); + QSLIST_INSERT_HEAD(local_pool, batch, next); + } + + QSLIST_INSERT_HEAD(&batch->list, co, pool_next); + batch->size++; } Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque) @@ -58,31 +219,7 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque) Coroutine *co = NULL; if (IS_ENABLED(CONFIG_COROUTINE_POOL)) { - CoroutineQSList *alloc_pool = get_ptr_alloc_pool(); - - co = QSLIST_FIRST(alloc_pool); - if (!co) { - if (release_pool_size > POOL_MIN_BATCH_SIZE) { - /* Slow path; a good place to register the destructor, too. */ - Notifier *notifier = get_ptr_coroutine_pool_cleanup_notifier(); - if (!notifier->notify) { - notifier->notify = coroutine_pool_cleanup; - qemu_thread_atexit_add(notifier); - } - - /* This is not exact; there could be a little skew between - * release_pool_size and the actual size of release_pool. But - * it is just a heuristic, it does not need to be perfect. - */ - set_alloc_pool_size(qatomic_xchg(&release_pool_size, 0)); - QSLIST_MOVE_ATOMIC(alloc_pool, &release_pool); - co = QSLIST_FIRST(alloc_pool); - } - } - if (co) { - QSLIST_REMOVE_HEAD(alloc_pool, pool_next); - set_alloc_pool_size(get_alloc_pool_size() - 1); - } + co = coroutine_pool_get(); } if (!co) { @@ -100,19 +237,10 @@ static void coroutine_delete(Coroutine *co) co->caller = NULL; if (IS_ENABLED(CONFIG_COROUTINE_POOL)) { - if (release_pool_size < qatomic_read(&pool_max_size) * 2) { - QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next); - qatomic_inc(&release_pool_size); - return; - } - if (get_alloc_pool_size() < qatomic_read(&pool_max_size)) { - QSLIST_INSERT_HEAD(get_ptr_alloc_pool(), co, pool_next); - set_alloc_pool_size(get_alloc_pool_size() + 1); - return; - } + coroutine_pool_put(co); + } else { + qemu_coroutine_delete(co); } - - qemu_coroutine_delete(co); } void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co) @@ -223,10 +351,51 @@ AioContext *qemu_coroutine_get_aio_context(Coroutine *co) void qemu_coroutine_inc_pool_size(unsigned int additional_pool_size) { - qatomic_add(&pool_max_size, additional_pool_size); + QEMU_LOCK_GUARD(&global_pool_lock); + global_pool_max_size += additional_pool_size; } void qemu_coroutine_dec_pool_size(unsigned int removing_pool_size) { - qatomic_sub(&pool_max_size, removing_pool_size); + QEMU_LOCK_GUARD(&global_pool_lock); + global_pool_max_size -= removing_pool_size; +} + +static unsigned int get_global_pool_hard_max_size(void) +{ +#ifdef __linux__ + g_autofree char *contents = NULL; + int max_map_count; + + /* + * Linux processes can have up to max_map_count virtual memory areas + * (VMAs). mmap(2), mprotect(2), etc fail with ENOMEM beyond this limit. We + * must limit the coroutine pool to a safe size to avoid running out of + * VMAs. + */ + if (g_file_get_contents("/proc/sys/vm/max_map_count", &contents, NULL, + NULL) && + qemu_strtoi(contents, NULL, 10, &max_map_count) == 0) { + /* + * This is an upper bound that avoids exceeding max_map_count. Leave a + * fixed amount for non-coroutine users like library dependencies, + * vhost-user, etc. Each coroutine takes up 2 VMAs so halve the + * remaining amount. + */ + if (max_map_count > 5000) { + return (max_map_count - 5000) / 2; + } else { + /* Disable the global pool but threads still have local pools */ + return 0; + } + } +#endif + + return UINT_MAX; +} + +static void __attribute__((constructor)) qemu_coroutine_init(void) +{ + qemu_mutex_init(&global_pool_lock); + global_pool_hard_max_size = get_global_pool_hard_max_size(); } diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c index 83e84b11869..60c44b2b56b 100644 --- a/util/qemu-sockets.c +++ b/util/qemu-sockets.c @@ -1464,7 +1464,8 @@ SocketAddress *socket_address_flatten(SocketAddressLegacy *addr_legacy) break; case SOCKET_ADDRESS_TYPE_FD: addr->type = SOCKET_ADDRESS_TYPE_FD; - QAPI_CLONE_MEMBERS(String, &addr->u.fd, addr_legacy->u.fd.data); + QAPI_CLONE_MEMBERS(FdSocketAddress, &addr->u.fd, + addr_legacy->u.fd.data); break; default: abort(); diff --git a/util/qsp.c b/util/qsp.c index 2fe3764906c..6b783e2e7f8 100644 --- a/util/qsp.c +++ b/util/qsp.c @@ -124,7 +124,7 @@ static const char * const qsp_typenames[] = { [QSP_CONDVAR] = "condvar", }; -QemuMutexLockFunc qemu_bql_mutex_lock_func = qemu_mutex_lock_impl; +QemuMutexLockFunc bql_mutex_lock_func = qemu_mutex_lock_impl; QemuMutexLockFunc qemu_mutex_lock_func = qemu_mutex_lock_impl; QemuMutexTrylockFunc qemu_mutex_trylock_func = qemu_mutex_trylock_impl; QemuRecMutexLockFunc qemu_rec_mutex_lock_func = qemu_rec_mutex_lock_impl; @@ -439,7 +439,7 @@ void qsp_enable(void) { qatomic_set(&qemu_mutex_lock_func, qsp_mutex_lock); qatomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock); - qatomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock); + qatomic_set(&bql_mutex_lock_func, qsp_bql_mutex_lock); qatomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock); qatomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock); qatomic_set(&qemu_cond_wait_func, qsp_cond_wait); @@ -450,7 +450,7 @@ void qsp_disable(void) { qatomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl); qatomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl); - qatomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl); + qatomic_set(&bql_mutex_lock_func, qemu_mutex_lock_impl); qatomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl); qatomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl); qatomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl); diff --git a/util/rcu.c b/util/rcu.c index e587bcc4831..fa32c942e4b 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -283,24 +283,24 @@ static void *call_rcu_thread(void *opaque) qatomic_sub(&rcu_call_count, n); synchronize_rcu(); - qemu_mutex_lock_iothread(); + bql_lock(); while (n > 0) { node = try_dequeue(); while (!node) { - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_event_reset(&rcu_call_ready_event); node = try_dequeue(); if (!node) { qemu_event_wait(&rcu_call_ready_event); node = try_dequeue(); } - qemu_mutex_lock_iothread(); + bql_lock(); } n--; node->func(node); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } abort(); } @@ -337,13 +337,13 @@ static void drain_rcu_callback(struct rcu_head *node) void drain_call_rcu(void) { struct rcu_drain rcu_drain; - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); memset(&rcu_drain, 0, sizeof(struct rcu_drain)); qemu_event_init(&rcu_drain.drain_complete_event, false); if (locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } @@ -365,7 +365,7 @@ void drain_call_rcu(void) qatomic_dec(&in_drain_call_rcu); if (locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } } @@ -409,7 +409,7 @@ static void rcu_init_complete(void) qemu_event_init(&rcu_call_ready_event, false); - /* The caller is assumed to have iothread lock, so the call_rcu thread + /* The caller is assumed to have BQL, so the call_rcu thread * must have been quiescent even after forking, just recreate it. */ qemu_thread_create(&thread, "call_rcu", call_rcu_thread, diff --git a/util/uri.c b/util/uri.c index dcb33052364..573174bf475 100644 --- a/util/uri.c +++ b/util/uri.c @@ -163,19 +163,6 @@ static void uri_clean(URI *uri); ((*(p) == '+')) || ((*(p) == ',')) || ((*(p) == ';')) || \ ((*(p) == '=')) || ((*(p) == '\''))) -/* - * gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" - */ -#define ISA_GEN_DELIM(p) \ - (((*(p) == ':')) || ((*(p) == '/')) || ((*(p) == '?')) || \ - ((*(p) == '#')) || ((*(p) == '[')) || ((*(p) == ']')) || \ - ((*(p) == '@'))) - -/* - * reserved = gen-delims / sub-delims - */ -#define ISA_RESERVED(p) (ISA_GEN_DELIM(p) || (ISA_SUB_DELIM(p))) - /* * unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" */ @@ -267,7 +254,7 @@ static int rfc3986_parse_fragment(URI *uri, const char **str) if (uri->cleanup & 2) { uri->fragment = g_strndup(*str, cur - *str); } else { - uri->fragment = uri_string_unescape(*str, cur - *str, NULL); + uri->fragment = g_uri_unescape_segment(*str, cur, NULL); } } *str = cur; @@ -368,7 +355,7 @@ static int rfc3986_parse_user_info(URI *uri, const char **str) if (uri->cleanup & 2) { uri->user = g_strndup(*str, cur - *str); } else { - uri->user = uri_string_unescape(*str, cur - *str, NULL); + uri->user = g_uri_unescape_segment(*str, cur, NULL); } } *str = cur; @@ -496,7 +483,7 @@ static int rfc3986_parse_host(URI *uri, const char **str) if (uri->cleanup & 2) { uri->server = g_strndup(host, cur - host); } else { - uri->server = uri_string_unescape(host, cur - host, NULL); + uri->server = g_uri_unescape_segment(host, cur, NULL); } } else { uri->server = NULL; @@ -614,7 +601,7 @@ static int rfc3986_parse_path_ab_empty(URI *uri, const char **str) if (uri->cleanup & 2) { uri->path = g_strndup(*str, cur - *str); } else { - uri->path = uri_string_unescape(*str, cur - *str, NULL); + uri->path = g_uri_unescape_segment(*str, cur, NULL); } } else { uri->path = NULL; @@ -663,7 +650,7 @@ static int rfc3986_parse_path_absolute(URI *uri, const char **str) if (uri->cleanup & 2) { uri->path = g_strndup(*str, cur - *str); } else { - uri->path = uri_string_unescape(*str, cur - *str, NULL); + uri->path = g_uri_unescape_segment(*str, cur, NULL); } } else { uri->path = NULL; @@ -709,7 +696,7 @@ static int rfc3986_parse_path_rootless(URI *uri, const char **str) if (uri->cleanup & 2) { uri->path = g_strndup(*str, cur - *str); } else { - uri->path = uri_string_unescape(*str, cur - *str, NULL); + uri->path = g_uri_unescape_segment(*str, cur, NULL); } } else { uri->path = NULL; @@ -755,7 +742,7 @@ static int rfc3986_parse_path_no_scheme(URI *uri, const char **str) if (uri->cleanup & 2) { uri->path = g_strndup(*str, cur - *str); } else { - uri->path = uri_string_unescape(*str, cur - *str, NULL); + uri->path = g_uri_unescape_segment(*str, cur, NULL); } } else { uri->path = NULL; @@ -1349,846 +1336,12 @@ void uri_free(URI *uri) g_free(uri); } -/************************************************************************ - * * - * Helper functions * - * * - ************************************************************************/ - -/** - * normalize_uri_path: - * @path: pointer to the path string - * - * Applies the 5 normalization steps to a path string--that is, RFC 2396 - * Section 5.2, steps 6.c through 6.g. - * - * Normalization occurs directly on the string, no new allocation is done - * - * Returns 0 or an error code - */ -static int normalize_uri_path(char *path) -{ - char *cur, *out; - - if (path == NULL) { - return -1; - } - - /* Skip all initial "/" chars. We want to get to the beginning of the - * first non-empty segment. - */ - cur = path; - while (cur[0] == '/') { - ++cur; - } - if (cur[0] == '\0') { - return 0; - } - - /* Keep everything we've seen so far. */ - out = cur; - - /* - * Analyze each segment in sequence for cases (c) and (d). - */ - while (cur[0] != '\0') { - /* - * c) All occurrences of "./", where "." is a complete path segment, - * are removed from the buffer string. - */ - if ((cur[0] == '.') && (cur[1] == '/')) { - cur += 2; - /* '//' normalization should be done at this point too */ - while (cur[0] == '/') { - cur++; - } - continue; - } - - /* - * d) If the buffer string ends with "." as a complete path segment, - * that "." is removed. - */ - if ((cur[0] == '.') && (cur[1] == '\0')) { - break; - } - - /* Otherwise keep the segment. */ - while (cur[0] != '/') { - if (cur[0] == '\0') { - goto done_cd; - } - (out++)[0] = (cur++)[0]; - } - /* nomalize // */ - while ((cur[0] == '/') && (cur[1] == '/')) { - cur++; - } - - (out++)[0] = (cur++)[0]; - } -done_cd: - out[0] = '\0'; - - /* Reset to the beginning of the first segment for the next sequence. */ - cur = path; - while (cur[0] == '/') { - ++cur; - } - if (cur[0] == '\0') { - return 0; - } - - /* - * Analyze each segment in sequence for cases (e) and (f). - * - * e) All occurrences of "/../", where is a - * complete path segment not equal to "..", are removed from the - * buffer string. Removal of these path segments is performed - * iteratively, removing the leftmost matching pattern on each - * iteration, until no matching pattern remains. - * - * f) If the buffer string ends with "/..", where - * is a complete path segment not equal to "..", that - * "/.." is removed. - * - * To satisfy the "iterative" clause in (e), we need to collapse the - * string every time we find something that needs to be removed. Thus, - * we don't need to keep two pointers into the string: we only need a - * "current position" pointer. - */ - while (1) { - char *segp, *tmp; - - /* At the beginning of each iteration of this loop, "cur" points to - * the first character of the segment we want to examine. - */ - - /* Find the end of the current segment. */ - segp = cur; - while ((segp[0] != '/') && (segp[0] != '\0')) { - ++segp; - } - - /* If this is the last segment, we're done (we need at least two - * segments to meet the criteria for the (e) and (f) cases). - */ - if (segp[0] == '\0') { - break; - } - - /* If the first segment is "..", or if the next segment _isn't_ "..", - * keep this segment and try the next one. - */ - ++segp; - if (((cur[0] == '.') && (cur[1] == '.') && (segp == cur + 3)) || - ((segp[0] != '.') || (segp[1] != '.') || - ((segp[2] != '/') && (segp[2] != '\0')))) { - cur = segp; - continue; - } - - /* If we get here, remove this segment and the next one and back up - * to the previous segment (if there is one), to implement the - * "iteratively" clause. It's pretty much impossible to back up - * while maintaining two pointers into the buffer, so just compact - * the whole buffer now. - */ - - /* If this is the end of the buffer, we're done. */ - if (segp[2] == '\0') { - cur[0] = '\0'; - break; - } - /* Valgrind complained, strcpy(cur, segp + 3); */ - /* string will overlap, do not use strcpy */ - tmp = cur; - segp += 3; - while ((*tmp++ = *segp++) != 0) { - /* No further work */ - } - - /* If there are no previous segments, then keep going from here. */ - segp = cur; - while ((segp > path) && ((--segp)[0] == '/')) { - /* No further work */ - } - if (segp == path) { - continue; - } - - /* "segp" is pointing to the end of a previous segment; find it's - * start. We need to back up to the previous segment and start - * over with that to handle things like "foo/bar/../..". If we - * don't do this, then on the first pass we'll remove the "bar/..", - * but be pointing at the second ".." so we won't realize we can also - * remove the "foo/..". - */ - cur = segp; - while ((cur > path) && (cur[-1] != '/')) { - --cur; - } - } - out[0] = '\0'; - - /* - * g) If the resulting buffer string still begins with one or more - * complete path segments of "..", then the reference is - * considered to be in error. Implementations may handle this - * error by retaining these components in the resolved path (i.e., - * treating them as part of the final URI), by removing them from - * the resolved path (i.e., discarding relative levels above the - * root), or by avoiding traversal of the reference. - * - * We discard them from the final path. - */ - if (path[0] == '/') { - cur = path; - while ((cur[0] == '/') && (cur[1] == '.') && (cur[2] == '.') && - ((cur[3] == '/') || (cur[3] == '\0'))) { - cur += 3; - } - - if (cur != path) { - out = path; - while (cur[0] != '\0') { - (out++)[0] = (cur++)[0]; - } - out[0] = 0; - } - } - - return 0; -} - -static int is_hex(char c) -{ - if (((c >= '0') && (c <= '9')) || ((c >= 'a') && (c <= 'f')) || - ((c >= 'A') && (c <= 'F'))) { - return 1; - } - return 0; -} - -/** - * uri_string_unescape: - * @str: the string to unescape - * @len: the length in bytes to unescape (or <= 0 to indicate full string) - * @target: optional destination buffer - * - * Unescaping routine, but does not check that the string is an URI. The - * output is a direct unsigned char translation of %XX values (no encoding) - * Note that the length of the result can only be smaller or same size as - * the input string. - * - * Returns a copy of the string, but unescaped, will return NULL only in case - * of error - */ -char *uri_string_unescape(const char *str, int len, char *target) -{ - char *ret, *out; - const char *in; - - if (str == NULL) { - return NULL; - } - if (len <= 0) { - len = strlen(str); - } - if (len < 0) { - return NULL; - } - - if (target == NULL) { - ret = g_malloc(len + 1); - } else { - ret = target; - } - in = str; - out = ret; - while (len > 0) { - if ((len > 2) && (*in == '%') && (is_hex(in[1])) && (is_hex(in[2]))) { - in++; - if ((*in >= '0') && (*in <= '9')) { - *out = (*in - '0'); - } else if ((*in >= 'a') && (*in <= 'f')) { - *out = (*in - 'a') + 10; - } else if ((*in >= 'A') && (*in <= 'F')) { - *out = (*in - 'A') + 10; - } - in++; - if ((*in >= '0') && (*in <= '9')) { - *out = *out * 16 + (*in - '0'); - } else if ((*in >= 'a') && (*in <= 'f')) { - *out = *out * 16 + (*in - 'a') + 10; - } else if ((*in >= 'A') && (*in <= 'F')) { - *out = *out * 16 + (*in - 'A') + 10; - } - in++; - len -= 3; - out++; - } else { - *out++ = *in++; - len--; - } - } - *out = 0; - return ret; -} - -/** - * uri_string_escape: - * @str: string to escape - * @list: exception list string of chars not to escape - * - * This routine escapes a string to hex, ignoring reserved characters (a-z) - * and the characters in the exception list. - * - * Returns a new escaped string or NULL in case of error. - */ -char *uri_string_escape(const char *str, const char *list) -{ - char *ret, ch; - char *temp; - const char *in; - int len, out; - - if (str == NULL) { - return NULL; - } - if (str[0] == 0) { - return g_strdup(str); - } - len = strlen(str); - if (!(len > 0)) { - return NULL; - } - - len += 20; - ret = g_malloc(len); - in = str; - out = 0; - while (*in != 0) { - if (len - out <= 3) { - temp = realloc2n(ret, &len); - ret = temp; - } - - ch = *in; - - if ((ch != '@') && (!IS_UNRESERVED(ch)) && (!strchr(list, ch))) { - unsigned char val; - ret[out++] = '%'; - val = ch >> 4; - if (val <= 9) { - ret[out++] = '0' + val; - } else { - ret[out++] = 'A' + val - 0xA; - } - val = ch & 0xF; - if (val <= 9) { - ret[out++] = '0' + val; - } else { - ret[out++] = 'A' + val - 0xA; - } - in++; - } else { - ret[out++] = *in++; - } - } - ret[out] = 0; - return ret; -} - /************************************************************************ * * * Public functions * * * ************************************************************************/ -/** - * uri_resolve: - * @URI: the URI instance found in the document - * @base: the base value - * - * Computes he final URI of the reference done by checking that - * the given URI is valid, and building the final URI using the - * base URI. This is processed according to section 5.2 of the - * RFC 2396 - * - * 5.2. Resolving Relative References to Absolute Form - * - * Returns a new URI string (to be freed by the caller) or NULL in case - * of error. - */ -char *uri_resolve(const char *uri, const char *base) -{ - char *val = NULL; - int ret, len, indx, cur, out; - URI *ref = NULL; - URI *bas = NULL; - URI *res = NULL; - - /* - * 1) The URI reference is parsed into the potential four components and - * fragment identifier, as described in Section 4.3. - * - * NOTE that a completely empty URI is treated by modern browsers - * as a reference to "." rather than as a synonym for the current - * URI. Should we do that here? - */ - if (uri == NULL) { - ret = -1; - } else { - if (*uri) { - ref = uri_new(); - ret = uri_parse_into(ref, uri); - } else { - ret = 0; - } - } - if (ret != 0) { - goto done; - } - if ((ref != NULL) && (ref->scheme != NULL)) { - /* - * The URI is absolute don't modify. - */ - val = g_strdup(uri); - goto done; - } - if (base == NULL) { - ret = -1; - } else { - bas = uri_new(); - ret = uri_parse_into(bas, base); - } - if (ret != 0) { - if (ref) { - val = uri_to_string(ref); - } - goto done; - } - if (ref == NULL) { - /* - * the base fragment must be ignored - */ - g_free(bas->fragment); - bas->fragment = NULL; - val = uri_to_string(bas); - goto done; - } - - /* - * 2) If the path component is empty and the scheme, authority, and - * query components are undefined, then it is a reference to the - * current document and we are done. Otherwise, the reference URI's - * query and fragment components are defined as found (or not found) - * within the URI reference and not inherited from the base URI. - * - * NOTE that in modern browsers, the parsing differs from the above - * in the following aspect: the query component is allowed to be - * defined while still treating this as a reference to the current - * document. - */ - res = uri_new(); - if ((ref->scheme == NULL) && (ref->path == NULL) && - ((ref->authority == NULL) && (ref->server == NULL))) { - res->scheme = g_strdup(bas->scheme); - if (bas->authority != NULL) { - res->authority = g_strdup(bas->authority); - } else if (bas->server != NULL) { - res->server = g_strdup(bas->server); - res->user = g_strdup(bas->user); - res->port = bas->port; - } - res->path = g_strdup(bas->path); - if (ref->query != NULL) { - res->query = g_strdup(ref->query); - } else { - res->query = g_strdup(bas->query); - } - res->fragment = g_strdup(ref->fragment); - goto step_7; - } - - /* - * 3) If the scheme component is defined, indicating that the reference - * starts with a scheme name, then the reference is interpreted as an - * absolute URI and we are done. Otherwise, the reference URI's - * scheme is inherited from the base URI's scheme component. - */ - if (ref->scheme != NULL) { - val = uri_to_string(ref); - goto done; - } - res->scheme = g_strdup(bas->scheme); - - res->query = g_strdup(ref->query); - res->fragment = g_strdup(ref->fragment); - - /* - * 4) If the authority component is defined, then the reference is a - * network-path and we skip to step 7. Otherwise, the reference - * URI's authority is inherited from the base URI's authority - * component, which will also be undefined if the URI scheme does not - * use an authority component. - */ - if ((ref->authority != NULL) || (ref->server != NULL)) { - if (ref->authority != NULL) { - res->authority = g_strdup(ref->authority); - } else { - res->server = g_strdup(ref->server); - res->user = g_strdup(ref->user); - res->port = ref->port; - } - res->path = g_strdup(ref->path); - goto step_7; - } - if (bas->authority != NULL) { - res->authority = g_strdup(bas->authority); - } else if (bas->server != NULL) { - res->server = g_strdup(bas->server); - res->user = g_strdup(bas->user); - res->port = bas->port; - } - - /* - * 5) If the path component begins with a slash character ("/"), then - * the reference is an absolute-path and we skip to step 7. - */ - if ((ref->path != NULL) && (ref->path[0] == '/')) { - res->path = g_strdup(ref->path); - goto step_7; - } - - /* - * 6) If this step is reached, then we are resolving a relative-path - * reference. The relative path needs to be merged with the base - * URI's path. Although there are many ways to do this, we will - * describe a simple method using a separate string buffer. - * - * Allocate a buffer large enough for the result string. - */ - len = 2; /* extra / and 0 */ - if (ref->path != NULL) { - len += strlen(ref->path); - } - if (bas->path != NULL) { - len += strlen(bas->path); - } - res->path = g_malloc(len); - res->path[0] = 0; - - /* - * a) All but the last segment of the base URI's path component is - * copied to the buffer. In other words, any characters after the - * last (right-most) slash character, if any, are excluded. - */ - cur = 0; - out = 0; - if (bas->path != NULL) { - while (bas->path[cur] != 0) { - while ((bas->path[cur] != 0) && (bas->path[cur] != '/')) { - cur++; - } - if (bas->path[cur] == 0) { - break; - } - - cur++; - while (out < cur) { - res->path[out] = bas->path[out]; - out++; - } - } - } - res->path[out] = 0; - - /* - * b) The reference's path component is appended to the buffer - * string. - */ - if (ref->path != NULL && ref->path[0] != 0) { - indx = 0; - /* - * Ensure the path includes a '/' - */ - if ((out == 0) && (bas->server != NULL)) { - res->path[out++] = '/'; - } - while (ref->path[indx] != 0) { - res->path[out++] = ref->path[indx++]; - } - } - res->path[out] = 0; - - /* - * Steps c) to h) are really path normalization steps - */ - normalize_uri_path(res->path); - -step_7: - - /* - * 7) The resulting URI components, including any inherited from the - * base URI, are recombined to give the absolute form of the URI - * reference. - */ - val = uri_to_string(res); - -done: - uri_free(ref); - uri_free(bas); - uri_free(res); - return val; -} - -/** - * uri_resolve_relative: - * @URI: the URI reference under consideration - * @base: the base value - * - * Expresses the URI of the reference in terms relative to the - * base. Some examples of this operation include: - * base = "http://site1.com/docs/book1.html" - * URI input URI returned - * docs/pic1.gif pic1.gif - * docs/img/pic1.gif img/pic1.gif - * img/pic1.gif ../img/pic1.gif - * http://site1.com/docs/pic1.gif pic1.gif - * http://site2.com/docs/pic1.gif http://site2.com/docs/pic1.gif - * - * base = "docs/book1.html" - * URI input URI returned - * docs/pic1.gif pic1.gif - * docs/img/pic1.gif img/pic1.gif - * img/pic1.gif ../img/pic1.gif - * http://site1.com/docs/pic1.gif http://site1.com/docs/pic1.gif - * - * - * Note: if the URI reference is really weird or complicated, it may be - * worthwhile to first convert it into a "nice" one by calling - * uri_resolve (using 'base') before calling this routine, - * since this routine (for reasonable efficiency) assumes URI has - * already been through some validation. - * - * Returns a new URI string (to be freed by the caller) or NULL in case - * error. - */ -char *uri_resolve_relative(const char *uri, const char *base) -{ - char *val = NULL; - int ret; - int ix; - int pos = 0; - int nbslash = 0; - int len; - URI *ref = NULL; - URI *bas = NULL; - char *bptr, *uptr, *vptr; - int remove_path = 0; - - if ((uri == NULL) || (*uri == 0)) { - return NULL; - } - - /* - * First parse URI into a standard form - */ - ref = uri_new(); - /* If URI not already in "relative" form */ - if (uri[0] != '.') { - ret = uri_parse_into(ref, uri); - if (ret != 0) { - goto done; /* Error in URI, return NULL */ - } - } else { - ref->path = g_strdup(uri); - } - - /* - * Next parse base into the same standard form - */ - if ((base == NULL) || (*base == 0)) { - val = g_strdup(uri); - goto done; - } - bas = uri_new(); - if (base[0] != '.') { - ret = uri_parse_into(bas, base); - if (ret != 0) { - goto done; /* Error in base, return NULL */ - } - } else { - bas->path = g_strdup(base); - } - - /* - * If the scheme / server on the URI differs from the base, - * just return the URI - */ - if ((ref->scheme != NULL) && - ((bas->scheme == NULL) || (strcmp(bas->scheme, ref->scheme)) || - (strcmp(bas->server, ref->server)))) { - val = g_strdup(uri); - goto done; - } - if (bas->path == ref->path || - (bas->path && ref->path && !strcmp(bas->path, ref->path))) { - val = g_strdup(""); - goto done; - } - if (bas->path == NULL) { - val = g_strdup(ref->path); - goto done; - } - if (ref->path == NULL) { - ref->path = (char *)"/"; - remove_path = 1; - } - - /* - * At this point (at last!) we can compare the two paths - * - * First we take care of the special case where either of the - * two path components may be missing (bug 316224) - */ - if (bas->path == NULL) { - if (ref->path != NULL) { - uptr = ref->path; - if (*uptr == '/') { - uptr++; - } - /* exception characters from uri_to_string */ - val = uri_string_escape(uptr, "/;&=+$,"); - } - goto done; - } - bptr = bas->path; - if (ref->path == NULL) { - for (ix = 0; bptr[ix] != 0; ix++) { - if (bptr[ix] == '/') { - nbslash++; - } - } - uptr = NULL; - len = 1; /* this is for a string terminator only */ - } else { - /* - * Next we compare the two strings and find where they first differ - */ - if ((ref->path[pos] == '.') && (ref->path[pos + 1] == '/')) { - pos += 2; - } - if ((*bptr == '.') && (bptr[1] == '/')) { - bptr += 2; - } else if ((*bptr == '/') && (ref->path[pos] != '/')) { - bptr++; - } - while ((bptr[pos] == ref->path[pos]) && (bptr[pos] != 0)) { - pos++; - } - - if (bptr[pos] == ref->path[pos]) { - val = g_strdup(""); - goto done; /* (I can't imagine why anyone would do this) */ - } - - /* - * In URI, "back up" to the last '/' encountered. This will be the - * beginning of the "unique" suffix of URI - */ - ix = pos; - if ((ref->path[ix] == '/') && (ix > 0)) { - ix--; - } else if ((ref->path[ix] == 0) && (ix > 1) - && (ref->path[ix - 1] == '/')) { - ix -= 2; - } - for (; ix > 0; ix--) { - if (ref->path[ix] == '/') { - break; - } - } - if (ix == 0) { - uptr = ref->path; - } else { - ix++; - uptr = &ref->path[ix]; - } - - /* - * In base, count the number of '/' from the differing point - */ - if (bptr[pos] != ref->path[pos]) { /* check for trivial URI == base */ - for (; bptr[ix] != 0; ix++) { - if (bptr[ix] == '/') { - nbslash++; - } - } - } - len = strlen(uptr) + 1; - } - - if (nbslash == 0) { - if (uptr != NULL) { - /* exception characters from uri_to_string */ - val = uri_string_escape(uptr, "/;&=+$,"); - } - goto done; - } - - /* - * Allocate just enough space for the returned string - - * length of the remainder of the URI, plus enough space - * for the "../" groups, plus one for the terminator - */ - val = g_malloc(len + 3 * nbslash); - vptr = val; - /* - * Put in as many "../" as needed - */ - for (; nbslash > 0; nbslash--) { - *vptr++ = '.'; - *vptr++ = '.'; - *vptr++ = '/'; - } - /* - * Finish up with the end of the URI - */ - if (uptr != NULL) { - if ((vptr > val) && (len > 0) && (uptr[0] == '/') && - (vptr[-1] == '/')) { - memcpy(vptr, uptr + 1, len - 1); - vptr[len - 2] = 0; - } else { - memcpy(vptr, uptr, len); - vptr[len - 1] = 0; - } - } else { - vptr[len - 1] = 0; - } - - /* escape the freshly-built path */ - vptr = val; - /* exception characters from uri_to_string */ - val = uri_string_escape(vptr, "/;&=+$,"); - g_free(vptr); - -done: - /* - * Free the working variables - */ - if (remove_path != 0) { - ref->path = NULL; - } - uri_free(ref); - uri_free(bas); - - return val; -} - /* * Utility functions to help parse and assemble query strings. */ @@ -2274,14 +1427,14 @@ struct QueryParams *query_params_parse(const char *query) * and consistent with CGI.pm we assume value is "". */ else if (!eq) { - name = uri_string_unescape(query, end - query, NULL); + name = g_uri_unescape_segment(query, end, NULL); value = NULL; } /* Or if we have "name=" here (works around annoying * problem when calling uri_string_unescape with len = 0). */ else if (eq + 1 == end) { - name = uri_string_unescape(query, eq - query, NULL); + name = g_uri_unescape_segment(query, eq, NULL); value = g_new0(char, 1); } /* If the '=' character is at the beginning then we have @@ -2293,8 +1446,8 @@ struct QueryParams *query_params_parse(const char *query) /* Otherwise it's "name=value". */ else { - name = uri_string_unescape(query, eq - query, NULL); - value = uri_string_unescape(eq + 1, end - (eq + 1), NULL); + name = g_uri_unescape_segment(query, eq, NULL); + value = g_uri_unescape_segment(eq + 1, end, NULL); } /* Append to the parameter set. */ diff --git a/util/userfaultfd.c b/util/userfaultfd.c index fdff4867e8b..1b2fa949d4d 100644 --- a/util/userfaultfd.c +++ b/util/userfaultfd.c @@ -18,7 +18,6 @@ #include #include #include -#include typedef enum { UFFD_UNINITIALIZED = 0, diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c index a9a48fffb87..3bfb1ad3ec1 100644 --- a/util/vhost-user-server.c +++ b/util/vhost-user-server.c @@ -360,10 +360,7 @@ static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc, qio_channel_set_follow_coroutine_ctx(server->ioc, true); - /* Attaching the AioContext starts the vu_client_trip coroutine */ - aio_context_acquire(server->ctx); vhost_user_server_attach_aio_context(server, server->ctx); - aio_context_release(server->ctx); } /* server->ctx acquired by caller */ diff --git a/util/yank.c b/util/yank.c index abf47c346d1..eaac50539c5 100644 --- a/util/yank.c +++ b/util/yank.c @@ -35,7 +35,7 @@ typedef struct YankInstanceEntry YankInstanceEntry; /* * This lock protects the yank_instance_list below. Because it's taken by * OOB-capable commands, it must be "fast", i.e. it may only be held for a - * bounded, short time. See docs/devel/qapi-code-gen.txt for additional + * bounded, short time. See docs/devel/qapi-code-gen.rst for additional * information. */ static QemuMutex yank_lock;