From d7feb432f70b9a258f9b07ccc27d7c21b13337e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20=C5=BByjewski?= Date: Tue, 25 Apr 2023 16:56:57 +0200 Subject: [PATCH] Add patches for Intel TXT support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tomasz Żyjewski Signed-off-by: Sergii Dmytruk --- ...intel_txt.h-constants-and-accessors-.patch | 325 +++++++++ ...t-add-MLE-header-and-new-entry-point.patch | 107 +++ ...ly-add-early-TXT-tests-and-restore-M.patch | 302 ++++++++ ...table.h-Secure-Launch-Resource-Table.patch | 283 ++++++++ 1304-xen-arch-x86-reserve-TXT-memory.patch | 234 +++++++ 1305-x86-intel_txt.c-restore-boot-MTRRs.patch | 120 ++++ 1306-x86-sha1.c-add-file.patch | 232 +++++++ ...or-early-hashing-and-extending-PCRs-.patch | 645 ++++++++++++++++++ ...oot-choose-AP-stack-based-on-APIC-ID.patch | 165 +++++ 1309-x86-smpboot.c-TXT-AP-bringup.patch | 184 +++++ ...ccess-x86_cpu_to_apicid-directly-use.patch | 265 +++++++ ...p-x86_cpu_to_apicid-use-cpu_data-cpu.patch | 156 +++++ ...-x86-smp-move-stack_base-to-cpu_data.patch | 222 ++++++ ...x86-smp-call-x2apic_ap_setup-earlier.patch | 48 ++ ...n-protect-against-recurrent-machine_.patch | 54 ++ ...ch-x86-smp-drop-booting_cpu-variable.patch | 121 ++++ ...-arch-x86-smp-make-cpu_state-per-CPU.patch | 219 ++++++ ...ove-MONITOR-MWAIT-loop-for-TXT-AP-br.patch | 81 +++ ...-t-send-INIT-SIPI-SIPI-if-AP-is-alre.patch | 82 +++ ...mp-start-APs-in-parallel-during-boot.patch | 128 ++++ 1320-x86-sha256.c-add-file.patch | 281 ++++++++ ...m.c-support-extending-PCRs-of-TPM2.0.patch | 637 +++++++++++++++++ ...tpm.c-implement-event-log-for-TPM2.0.patch | 257 +++++++ 1323-arch-x86-process-DRTM-policy.patch | 272 ++++++++ xen.spec.in | 26 + 25 files changed, 5446 insertions(+) create mode 100644 1300-x86-include-asm-intel_txt.h-constants-and-accessors-.patch create mode 100644 1301-x86-boot-add-MLE-header-and-new-entry-point.patch create mode 100644 1302-x86-boot-txt_early-add-early-TXT-tests-and-restore-M.patch create mode 100644 1303-include-xen-slr_table.h-Secure-Launch-Resource-Table.patch create mode 100644 1304-xen-arch-x86-reserve-TXT-memory.patch create mode 100644 1305-x86-intel_txt.c-restore-boot-MTRRs.patch create mode 100644 1306-x86-sha1.c-add-file.patch create mode 100644 1307-x86-tpm.c-code-for-early-hashing-and-extending-PCRs-.patch create mode 100644 1308-x86-boot-choose-AP-stack-based-on-APIC-ID.patch create mode 100644 1309-x86-smpboot.c-TXT-AP-bringup.patch create mode 100644 1310-arch-x86-don-t-access-x86_cpu_to_apicid-directly-use.patch create mode 100644 1311-arch-x86-smp-drop-x86_cpu_to_apicid-use-cpu_data-cpu.patch create mode 100644 1312-arch-x86-smp-move-stack_base-to-cpu_data.patch create mode 100644 1313-arch-x86-smp-call-x2apic_ap_setup-earlier.patch create mode 100644 1314-arch-x86-shutdown-protect-against-recurrent-machine_.patch create mode 100644 1315-arch-x86-smp-drop-booting_cpu-variable.patch create mode 100644 1316-arch-x86-smp-make-cpu_state-per-CPU.patch create mode 100644 1317-arch-x86-smp-remove-MONITOR-MWAIT-loop-for-TXT-AP-br.patch create mode 100644 1318-arch-x86-smp-don-t-send-INIT-SIPI-SIPI-if-AP-is-alre.patch create mode 100644 1319-arch-x86-smp-start-APs-in-parallel-during-boot.patch create mode 100644 1320-x86-sha256.c-add-file.patch create mode 100644 1321-x86-tpm.c-support-extending-PCRs-of-TPM2.0.patch create mode 100644 1322-x86-tpm.c-implement-event-log-for-TPM2.0.patch create mode 100644 1323-arch-x86-process-DRTM-policy.patch diff --git a/1300-x86-include-asm-intel_txt.h-constants-and-accessors-.patch b/1300-x86-include-asm-intel_txt.h-constants-and-accessors-.patch new file mode 100644 index 00000000..523f6963 --- /dev/null +++ b/1300-x86-include-asm-intel_txt.h-constants-and-accessors-.patch @@ -0,0 +1,325 @@ +From 9e66d00f31b03b91375acbaa3f952e1dfa31475f Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Mon, 17 Apr 2023 20:10:13 +0200 +Subject: [PATCH 1300/1323] x86/include/asm/intel_txt.h: constants and + accessors for TXT registers and heap + +File contains TXT register spaces base address, registers offsets, +error codes and inline functions for accessing structures stored on +TXT heap. + +Signed-off-by: Krystian Hebel +Signed-off-by: Sergii Dmytruk +--- + xen/arch/x86/include/asm/intel_txt.h | 262 +++++++++++++++++++++++++++ + xen/arch/x86/tboot.c | 18 +- + 2 files changed, 263 insertions(+), 17 deletions(-) + create mode 100644 xen/arch/x86/include/asm/intel_txt.h + +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +new file mode 100644 +index 0000000000..61dbdb63ec +--- /dev/null ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -0,0 +1,262 @@ ++/* ++ * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE) ++ */ ++#define TXT_PUB_CONFIG_REGS_BASE 0xfed30000 ++#define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000 ++ ++/* Number of pages for each config regs space. */ ++#define NR_TXT_CONFIG_PAGES ((TXT_PUB_CONFIG_REGS_BASE - \ ++ TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT) ++ ++/* Offsets from pub/priv config space. */ ++#define TXTCR_STS 0x0000 ++#define TXTCR_ESTS 0x0008 ++#define TXTCR_ERRORCODE 0x0030 ++#define TXTCR_CMD_RESET 0x0038 ++#define TXTCR_CMD_CLOSE_PRIVATE 0x0048 ++#define TXTCR_DIDVID 0x0110 ++#define TXTCR_VER_EMIF 0x0200 ++#define TXTCR_CMD_UNLOCK_MEM_CONFIG 0x0218 ++#define TXTCR_SINIT_BASE 0x0270 ++#define TXTCR_SINIT_SIZE 0x0278 ++#define TXTCR_MLE_JOIN 0x0290 ++#define TXTCR_HEAP_BASE 0x0300 ++#define TXTCR_HEAP_SIZE 0x0308 ++#define TXTCR_SCRATCHPAD 0x0378 ++#define TXTCR_CMD_OPEN_LOCALITY1 0x0380 ++#define TXTCR_CMD_CLOSE_LOCALITY1 0x0388 ++#define TXTCR_CMD_OPEN_LOCALITY2 0x0390 ++#define TXTCR_CMD_CLOSE_LOCALITY2 0x0398 ++#define TXTCR_CMD_SECRETS 0x08e0 ++#define TXTCR_CMD_NO_SECRETS 0x08e8 ++#define TXTCR_E2STS 0x08f0 ++ ++/* ++ * Secure Launch Defined Error Codes used in MLE-initiated TXT resets. ++ * ++ * TXT Specification ++ * Appendix I ACM Error Codes ++ */ ++#define SLAUNCH_ERROR_GENERIC 0xc0008001 ++#define SLAUNCH_ERROR_TPM_INIT 0xc0008002 ++#define SLAUNCH_ERROR_TPM_INVALID_LOG20 0xc0008003 ++#define SLAUNCH_ERROR_TPM_LOGGING_FAILED 0xc0008004 ++#define SLAUNCH_ERROR_REGION_STRADDLE_4GB 0xc0008005 ++#define SLAUNCH_ERROR_TPM_EXTEND 0xc0008006 ++#define SLAUNCH_ERROR_MTRR_INV_VCNT 0xc0008007 ++#define SLAUNCH_ERROR_MTRR_INV_DEF_TYPE 0xc0008008 ++#define SLAUNCH_ERROR_MTRR_INV_BASE 0xc0008009 ++#define SLAUNCH_ERROR_MTRR_INV_MASK 0xc000800a ++#define SLAUNCH_ERROR_MSR_INV_MISC_EN 0xc000800b ++#define SLAUNCH_ERROR_INV_AP_INTERRUPT 0xc000800c ++#define SLAUNCH_ERROR_INTEGER_OVERFLOW 0xc000800d ++#define SLAUNCH_ERROR_HEAP_WALK 0xc000800e ++#define SLAUNCH_ERROR_HEAP_MAP 0xc000800f ++#define SLAUNCH_ERROR_REGION_ABOVE_4GB 0xc0008010 ++#define SLAUNCH_ERROR_HEAP_INVALID_DMAR 0xc0008011 ++#define SLAUNCH_ERROR_HEAP_DMAR_SIZE 0xc0008012 ++#define SLAUNCH_ERROR_HEAP_DMAR_MAP 0xc0008013 ++#define SLAUNCH_ERROR_HI_PMR_BASE 0xc0008014 ++#define SLAUNCH_ERROR_HI_PMR_SIZE 0xc0008015 ++#define SLAUNCH_ERROR_LO_PMR_BASE 0xc0008016 ++#define SLAUNCH_ERROR_LO_PMR_MLE 0xc0008017 ++#define SLAUNCH_ERROR_INITRD_TOO_BIG 0xc0008018 ++#define SLAUNCH_ERROR_HEAP_ZERO_OFFSET 0xc0008019 ++#define SLAUNCH_ERROR_WAKE_BLOCK_TOO_SMALL 0xc000801a ++#define SLAUNCH_ERROR_MLE_BUFFER_OVERLAP 0xc000801b ++#define SLAUNCH_ERROR_BUFFER_BEYOND_PMR 0xc000801c ++#define SLAUNCH_ERROR_OS_SINIT_BAD_VERSION 0xc000801d ++#define SLAUNCH_ERROR_EVENTLOG_MAP 0xc000801e ++#define SLAUNCH_ERROR_TPM_NUMBER_ALGS 0xc000801f ++#define SLAUNCH_ERROR_TPM_UNKNOWN_DIGEST 0xc0008020 ++#define SLAUNCH_ERROR_TPM_INVALID_EVENT 0xc0008021 ++ ++#define SLAUNCH_BOOTLOADER_MAGIC 0x4c534254 ++ ++#ifndef __ASSEMBLY__ ++ ++/* We need to differentiate between pre- and post paging enabled. */ ++#ifdef __BOOT_DEFS_H__ ++#define _txt(x) _p(x) ++#else ++#include ++#include // __va() ++#define _txt(x) __va(x) ++#endif ++ ++/* ++ * Always use private space as some of registers are either read-only or not ++ * present in public space. ++ */ ++static inline volatile uint64_t read_txt_reg(int reg_no) ++{ ++ volatile uint64_t *reg = _txt(TXT_PRIV_CONFIG_REGS_BASE + reg_no); ++ return *reg; ++} ++ ++static inline void write_txt_reg(int reg_no, uint64_t val) ++{ ++ volatile uint64_t *reg = _txt(TXT_PRIV_CONFIG_REGS_BASE + reg_no); ++ *reg = val; ++ /* This serves as TXT register barrier */ ++ (void)read_txt_reg(TXTCR_ESTS); ++} ++ ++static inline void txt_reset(uint32_t error) ++{ ++ write_txt_reg(TXTCR_ERRORCODE, error); ++ write_txt_reg(TXTCR_CMD_NO_SECRETS, 1); ++ write_txt_reg(TXTCR_CMD_UNLOCK_MEM_CONFIG, 1); ++ write_txt_reg(TXTCR_CMD_RESET, 1); ++ while (1); ++} ++ ++/* ++ * Secure Launch defined OS/MLE TXT Heap table ++ */ ++struct txt_os_mle_data { ++ uint32_t version; ++ uint32_t boot_params_addr; ++ uint32_t slrt; ++ uint32_t txt_info; ++ uint32_t ap_wake_block; ++ uint32_t ap_wake_block_size; ++ uint8_t mle_scratch[64]; ++} __packed; ++ ++/* ++ * TXT specification defined BIOS data TXT Heap table ++ */ ++struct txt_bios_data { ++ uint32_t version; /* Currently 5 for TPM 1.2 and 6 for TPM 2.0 */ ++ uint32_t bios_sinit_size; ++ uint64_t reserved1; ++ uint64_t reserved2; ++ uint32_t num_logical_procs; ++ /* Versions >= 3 && < 5 */ ++ uint32_t sinit_flags; ++ /* Versions >= 5 with updates in version 6 */ ++ uint32_t mle_flags; ++ /* Versions >= 4 */ ++ /* Ext Data Elements */ ++} __packed; ++ ++/* ++ * TXT specification defined OS/SINIT TXT Heap table ++ */ ++struct txt_os_sinit_data { ++ uint32_t version; /* Currently 6 for TPM 1.2 and 7 for TPM 2.0 */ ++ uint32_t flags; /* Reserved in version 6 */ ++ uint64_t mle_ptab; ++ uint64_t mle_size; ++ uint64_t mle_hdr_base; ++ uint64_t vtd_pmr_lo_base; ++ uint64_t vtd_pmr_lo_size; ++ uint64_t vtd_pmr_hi_base; ++ uint64_t vtd_pmr_hi_size; ++ uint64_t lcp_po_base; ++ uint64_t lcp_po_size; ++ uint32_t capabilities; ++ /* Version = 5 */ ++ uint64_t efi_rsdt_ptr; /* RSD*P* in versions >= 6 */ ++ /* Versions >= 6 */ ++ /* Ext Data Elements */ ++} __packed; ++ ++/* ++ * TXT specification defined SINIT/MLE TXT Heap table ++ */ ++struct txt_sinit_mle_data { ++ uint32_t version; /* Current values are 6 through 9 */ ++ /* Versions <= 8, fields until lcp_policy_control must be 0 for >= 9 */ ++ uint8_t bios_acm_id[20]; ++ uint32_t edx_senter_flags; ++ uint64_t mseg_valid; ++ uint8_t sinit_hash[20]; ++ uint8_t mle_hash[20]; ++ uint8_t stm_hash[20]; ++ uint8_t lcp_policy_hash[20]; ++ uint32_t lcp_policy_control; ++ /* Versions >= 7 */ ++ uint32_t rlp_wakeup_addr; ++ uint32_t reserved; ++ uint32_t num_of_sinit_mdrs; ++ uint32_t sinit_mdrs_table_offset; ++ uint32_t sinit_vtd_dmar_table_size; ++ uint32_t sinit_vtd_dmar_table_offset; ++ /* Versions >= 8 */ ++ uint32_t processor_scrtm_status; ++ /* Versions >= 9 */ ++ /* Ext Data Elements */ ++} __packed; ++ ++/* ++ * Functions to extract data from the Intel TXT Heap Memory. The layout ++ * of the heap is as follows: ++ * +---------------------------------+ ++ * | Size Bios Data table (uint64_t) | ++ * +---------------------------------+ ++ * | Bios Data table | ++ * +---------------------------------+ ++ * | Size OS MLE table (uint64_t) | ++ * +---------------------------------+ ++ * | OS MLE table | ++ * +-------------------------------- + ++ * | Size OS SINIT table (uint64_t) | ++ * +---------------------------------+ ++ * | OS SINIT table | ++ * +---------------------------------+ ++ * | Size SINIT MLE table (uint64_t) | ++ * +---------------------------------+ ++ * | SINIT MLE table | ++ * +---------------------------------+ ++ * ++ * NOTE: the table size fields include the 8 byte size field itself. ++ */ ++static inline uint64_t txt_bios_data_size(void *heap) ++{ ++ return *((uint64_t *)heap); ++} ++ ++static inline void *txt_bios_data_start(void *heap) ++{ ++ return heap + sizeof(uint64_t); ++} ++ ++static inline uint64_t txt_os_mle_data_size(void *heap) ++{ ++ return *((uint64_t *)(heap + txt_bios_data_size(heap))); ++} ++ ++static inline void *txt_os_mle_data_start(void *heap) ++{ ++ return heap + txt_bios_data_size(heap) + sizeof(uint64_t); ++} ++ ++static inline uint64_t txt_os_sinit_data_size(void *heap) ++{ ++ return *((uint64_t *)(heap + txt_bios_data_size(heap) + ++ txt_os_mle_data_size(heap))); ++} ++ ++static inline void *txt_os_sinit_data_start(void *heap) ++{ ++ return heap + txt_bios_data_size(heap) + ++ txt_os_mle_data_size(heap) + sizeof(uint64_t); ++} ++ ++static inline uint64_t txt_sinit_mle_data_size(void *heap) ++{ ++ return *((uint64_t *)(heap + txt_bios_data_size(heap) + ++ txt_os_mle_data_size(heap) + ++ txt_os_sinit_data_size(heap))); ++} ++ ++static inline void *txt_sinit_mle_data_start(void *heap) ++{ ++ return heap + txt_bios_data_size(heap) + ++ txt_os_mle_data_size(heap) + ++ txt_os_sinit_data_size(heap) + sizeof(uint64_t); ++} ++ ++#endif /* __ASSEMBLY__ */ +diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c +index fe1abfdf08..94a2f27cf4 100644 +--- a/xen/arch/x86/tboot.c ++++ b/xen/arch/x86/tboot.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + + /* tboot= */ +@@ -31,23 +32,6 @@ static vmac_t frametable_mac; /* MAC for frame table during S3 */ + static uint64_t __initdata txt_heap_base, __initdata txt_heap_size; + static uint64_t __initdata sinit_base, __initdata sinit_size; + +-/* +- * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE) +- */ +- +-#define TXT_PUB_CONFIG_REGS_BASE 0xfed30000 +-#define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000 +- +-/* # pages for each config regs space - used by fixmap */ +-#define NR_TXT_CONFIG_PAGES ((TXT_PUB_CONFIG_REGS_BASE - \ +- TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT) +- +-/* offsets from pub/priv config space */ +-#define TXTCR_SINIT_BASE 0x0270 +-#define TXTCR_SINIT_SIZE 0x0278 +-#define TXTCR_HEAP_BASE 0x0300 +-#define TXTCR_HEAP_SIZE 0x0308 +- + #define SHA1_SIZE 20 + typedef uint8_t sha1_hash_t[SHA1_SIZE]; + +-- +2.44.0 + diff --git a/1301-x86-boot-add-MLE-header-and-new-entry-point.patch b/1301-x86-boot-add-MLE-header-and-new-entry-point.patch new file mode 100644 index 00000000..cf37ea83 --- /dev/null +++ b/1301-x86-boot-add-MLE-header-and-new-entry-point.patch @@ -0,0 +1,107 @@ +From ec5d2105bbfa80fd9f6e807b6ac672edaf6ccc75 Mon Sep 17 00:00:00 2001 +From: Kacper Stojek +Date: Wed, 31 Aug 2022 15:03:51 +0200 +Subject: [PATCH 1301/1323] x86/boot: add MLE header and new entry point + +MLE header is used with Intel TXT, together with MB2 headers. +Entrypoint is different, but it is used just to differentiate +from other entries by moving a magic number to EAX. Execution +environment is similar to that of Multiboot 2 and code falls +through to MB2's entry point. + +Signed-off-by: Kacper Stojek +Signed-off-by: Krystian Hebel +--- + docs/hypervisor-guide/x86/how-xen-boots.rst | 5 +++ + xen/arch/x86/boot/head.S | 45 +++++++++++++++++++++ + 2 files changed, 50 insertions(+) + +diff --git a/docs/hypervisor-guide/x86/how-xen-boots.rst b/docs/hypervisor-guide/x86/how-xen-boots.rst +index ca77d7c8a3..eb60a1cd80 100644 +--- a/docs/hypervisor-guide/x86/how-xen-boots.rst ++++ b/docs/hypervisor-guide/x86/how-xen-boots.rst +@@ -55,6 +55,11 @@ If ``CONFIG_PVH_GUEST`` was selected at build time, an Elf note is included + which indicates the ability to use the PVH boot protocol, and registers + ``__pvh_start`` as the entrypoint, entered in 32bit mode. + ++MLE header is used with Intel TXT, together with MB2 headers. Entrypoint is ++different, but it is used just to differentiate from other entries by moving ++a magic number to EAX. Execution environment is similar to that of Multiboot 2 ++and code falls through to ``start``. ++ + + xen.gz + ~~~~~~ +diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S +index 245c859dd7..d9a4290ec1 100644 +--- a/xen/arch/x86/boot/head.S ++++ b/xen/arch/x86/boot/head.S +@@ -3,6 +3,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -113,6 +114,25 @@ multiboot2_header: + .size multiboot2_header, . - multiboot2_header + .type multiboot2_header, @object + ++ .balign 16 ++mle_header: ++ .long 0x9082ac5a /* UUID0 */ ++ .long 0x74a7476f /* UUID1 */ ++ .long 0xa2555c0f /* UUID2 */ ++ .long 0x42b651cb /* UUID3 */ ++ .long 0x00000034 /* MLE header size */ ++ .long 0x00020002 /* MLE version 2.2 */ ++ .long (slaunch_stub_entry - start) /* Linear entry point of MLE (SINIT virt. address) */ ++ .long 0x00000000 /* First valid page of MLE */ ++ .long 0x00000000 /* Offset within binary of first byte of MLE */ ++ .long (_end - start) /* Offset within binary of last byte + 1 of MLE */ ++ .long 0x00000223 /* Bit vector of MLE-supported capabilities */ ++ .long 0x00000000 /* Starting linear address of command line (unused) */ ++ .long 0x00000000 /* Ending linear address of command line (unused) */ ++ ++ .size mle_header, .-mle_header ++ .type mle_header, @object ++ + .section .init.rodata, "a", @progbits + + .Lbad_cpu_msg: .asciz "ERR: Not a 64-bit CPU!" +@@ -428,6 +448,31 @@ __pvh_start: + + #endif /* CONFIG_PVH_GUEST */ + ++ /* ++ * Entry point for TrenchBoot Secure Launch on Intel TXT platforms. ++ * ++ * CPU is in 32b protected mode with paging disabled. On entry: ++ * - %ebx = %ebp = SINIT physical base address ++ * - %edx = SENTER control flags ++ * - stack pointer is undefined ++ * - CS is flat 4GB code segment ++ * - DS, ES and SS are flat 4GB data segments ++ * ++ * Additional restrictions: ++ * - some MSRs are partially cleared, among them IA32_MISC_ENABLE, so ++ * some capabilities might be reported as disabled even if they are ++ * supported by CPU ++ * - interrupts (including NMIs and SMIs) are disabled and must be ++ * enabled later ++ * - trying to enter real mode results in reset ++ * - APs must be brought up by MONITOR or GETSEC[WAKEUP], depending on ++ * which is supported by a given SINIT ACM ++ */ ++slaunch_stub_entry: ++ movl $SLAUNCH_BOOTLOADER_MAGIC,%eax ++ ++ /* Fall through to Multiboot entry point. */ ++ + __start: + cld + cli +-- +2.44.0 + diff --git a/1302-x86-boot-txt_early-add-early-TXT-tests-and-restore-M.patch b/1302-x86-boot-txt_early-add-early-TXT-tests-and-restore-M.patch new file mode 100644 index 00000000..baddade3 --- /dev/null +++ b/1302-x86-boot-txt_early-add-early-TXT-tests-and-restore-M.patch @@ -0,0 +1,302 @@ +From 29938842d7552d8d14d107bcfa55d0e9e625900b Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Mon, 17 Apr 2023 20:09:54 +0200 +Subject: [PATCH 1302/1323] x86/boot/txt_early: add early TXT tests and restore + MBI pointer + +These tests validate that important parts of memory are protected +against DMA attacks, including Xen and MBI. Modules can be tested later, +when it is possible to report issues to user before invoking TXT reset. + +TPM event log validation is temporarily disabled due to issue with its +allocation by bootloader (GRUB) which will need to be modified to +address this. Ultimately event log will also have to be validated early +as it is used immediately after these tests to hold MBI measurements. +See larger comment in verify_pmr_ranges(). + +Signed-off-by: Krystian Hebel +Signed-off-by: Sergii Dmytruk +--- + xen/arch/x86/Makefile | 1 + + xen/arch/x86/boot/Makefile | 2 +- + xen/arch/x86/boot/head.S | 25 +++++ + xen/arch/x86/boot/txt_early.c | 131 +++++++++++++++++++++++++++ + xen/arch/x86/include/asm/intel_txt.h | 26 ++++++ + xen/arch/x86/intel_txt.c | 11 +++ + 6 files changed, 195 insertions(+), 1 deletion(-) + create mode 100644 xen/arch/x86/boot/txt_early.c + create mode 100644 xen/arch/x86/intel_txt.c + +diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile +index 4eec765106..674e5a3289 100644 +--- a/xen/arch/x86/Makefile ++++ b/xen/arch/x86/Makefile +@@ -56,6 +56,7 @@ obj-y += percpu.o + obj-y += physdev.o + obj-$(CONFIG_COMPAT) += x86_64/physdev.o + obj-y += psr.o ++obj-y += intel_txt.o + obj-y += setup.o + obj-y += shutdown.o + obj-y += smp.o +diff --git a/xen/arch/x86/boot/Makefile b/xen/arch/x86/boot/Makefile +index d6bc8fc084..34df17664a 100644 +--- a/xen/arch/x86/boot/Makefile ++++ b/xen/arch/x86/boot/Makefile +@@ -1,6 +1,6 @@ + obj-bin-y += head.o + +-head-bin-objs := cmdline.o reloc.o ++head-bin-objs := cmdline.o reloc.o txt_early.o + + nocov-y += $(head-bin-objs) + noubsan-y += $(head-bin-objs) +diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S +index d9a4290ec1..422197af88 100644 +--- a/xen/arch/x86/boot/head.S ++++ b/xen/arch/x86/boot/head.S +@@ -501,6 +501,10 @@ __start: + /* Bootloaders may set multiboot{1,2}.mem_lower to a nonzero value. */ + xor %edx,%edx + ++ /* Check for TrenchBoot slaunch bootloader. */ ++ cmp $SLAUNCH_BOOTLOADER_MAGIC,%eax ++ je .Lslaunch_proto ++ + /* Check for Multiboot2 bootloader. */ + cmp $MULTIBOOT2_BOOTLOADER_MAGIC,%eax + je .Lmultiboot2_proto +@@ -516,6 +520,23 @@ __start: + cmovnz MB_mem_lower(%ebx),%edx + jmp trampoline_bios_setup + ++.Lslaunch_proto: ++ /* Save information that TrenchBoot slaunch was used. */ ++ movb $1, sym_esi(slaunch_active) ++ ++ /* Push arguments to stack and call txt_early_tests(). */ ++ push $sym_offs(__2M_rwdata_end) /* end of target image */ ++ push $sym_offs(_start) /* target base address */ ++ push %esi /* load base address */ ++ call txt_early_tests ++ ++ /* ++ * txt_early_tests() returns MBI address, move it to EBX, move magic ++ * number expected by Multiboot 2 to EAX and fall through. ++ */ ++ movl %eax,%ebx ++ movl $MULTIBOOT2_BOOTLOADER_MAGIC,%eax ++ + .Lmultiboot2_proto: + /* Skip Multiboot2 information fixed part. */ + lea (MB2_fixed_sizeof+MULTIBOOT2_TAG_ALIGN-1)(%ebx),%ecx +@@ -837,6 +858,10 @@ cmdline_parse_early: + reloc: + .incbin "reloc.bin" + ++ ALIGN ++txt_early_tests: ++ .incbin "txt_early.bin" ++ + ENTRY(trampoline_start) + #include "trampoline.S" + ENTRY(trampoline_end) +diff --git a/xen/arch/x86/boot/txt_early.c b/xen/arch/x86/boot/txt_early.c +new file mode 100644 +index 0000000000..c52a600e8d +--- /dev/null ++++ b/xen/arch/x86/boot/txt_early.c +@@ -0,0 +1,131 @@ ++/* ++ * Copyright (c) 2022-2023 3mdeb Sp. z o.o. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program. If not, see . ++ */ ++ ++/* ++ * This entry point is entered from xen/arch/x86/boot/head.S with Xen base at ++ * 0x4(%esp). A pointer to MBI is returned in %eax. ++ */ ++asm ( ++ " .text \n" ++ " .globl _start \n" ++ "_start: \n" ++ " jmp txt_early_tests \n" ++ ); ++ ++#include "defs.h" ++#include "../include/asm/intel_txt.h" ++ ++static void verify_pmr_ranges(struct txt_os_mle_data *os_mle, ++ struct txt_os_sinit_data *os_sinit, ++ uint32_t load_base_addr, uint32_t tgt_base_addr, ++ uint32_t xen_size) ++{ ++ int check_high_pmr = 0; ++ ++ /* Verify the value of the low PMR base. It should always be 0. */ ++ if (os_sinit->vtd_pmr_lo_base != 0) ++ txt_reset(SLAUNCH_ERROR_LO_PMR_BASE); ++ ++ /* ++ * Low PMR size should not be 0 on current platforms. There is an ongoing ++ * transition to TPR-based DMA protection instead of PMR-based; this is not ++ * yet supported by the code. ++ */ ++ if (os_sinit->vtd_pmr_lo_size == 0) ++ txt_reset(SLAUNCH_ERROR_LO_PMR_BASE); ++ ++ /* Check if regions overlap. Treat regions with no hole between as error. */ ++ if (os_sinit->vtd_pmr_hi_size != 0 && ++ os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size) ++ txt_reset(SLAUNCH_ERROR_HI_PMR_BASE); ++ ++ /* All regions accessed by 32b code must be below 4G. */ ++ if (os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <= 0x100000000ull) ++ check_high_pmr = 1; ++ ++ /* ++ * ACM checks that TXT heap and MLE memory is protected against DMA. We have ++ * to check if MBI and whole Xen memory is protected. The latter is done in ++ * case bootloader failed to set whole image as MLE and to make sure that ++ * both pre- and post-relocation code is protected. ++ */ ++ ++ /* Check if all of Xen before relocation is covered by PMR. */ ++ if (!is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr)) ++ txt_reset(SLAUNCH_ERROR_LO_PMR_MLE); ++ ++ /* Check if all of Xen after relocation is covered by PMR. */ ++ if (load_base_addr != tgt_base_addr && ++ !is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr)) ++ txt_reset(SLAUNCH_ERROR_LO_PMR_MLE); ++ ++ /* Check if MBI is covered by PMR. MBI starts with 'uint32_t total_size'. */ ++ if (!is_in_pmr(os_sinit, os_mle->boot_params_addr, ++ *(uint32_t *)os_mle->boot_params_addr, check_high_pmr)) ++ txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR); ++ ++ /* Check if TPM event log (if present) is covered by PMR. */ ++ /* ++ * FIXME: currently commented out as GRUB allocates it in a hole between ++ * PMR and reserved RAM, due to 2MB resolution of PMR. There are no other ++ * easy-to-use DMA protection mechanisms that would allow to protect that ++ * part of memory. TPR (TXT DMA Protection Range) gives 1MB resolution, but ++ * it still wouldn't be enough. ++ * ++ * One possible solution would be for GRUB to allocate log at lower address, ++ * but this would further increase memory space fragmentation. Another ++ * option is to align PMR up instead of down, making PMR cover part of ++ * reserved region, but it is unclear what the consequences may be. ++ * ++ * In tboot this issue was resolved by reserving leftover chunks of memory ++ * in e820 and/or UEFI memory map. This is also a valid solution, but would ++ * require more changes to GRUB than the ones listed above, as event log is ++ * allocated much earlier than PMRs. ++ */ ++ /* ++ if (os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 && ++ !is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size, ++ check_high_pmr)) ++ txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR); ++ */ ++} ++ ++uint32_t __stdcall txt_early_tests(uint32_t load_base_addr, ++ uint32_t tgt_base_addr, ++ uint32_t tgt_end_addr) ++{ ++ void *txt_heap; ++ struct txt_os_mle_data *os_mle; ++ struct txt_os_sinit_data *os_sinit; ++ uint32_t size = tgt_end_addr - tgt_base_addr; ++ ++ /* Clear the TXT error registers for a clean start of day */ ++ write_txt_reg(TXTCR_ERRORCODE, 0); ++ ++ txt_heap = _p(read_txt_reg(TXTCR_HEAP_BASE)); ++ ++ if (txt_os_mle_data_size(txt_heap) < sizeof(*os_mle) || ++ txt_os_sinit_data_size(txt_heap) < sizeof(*os_sinit)) ++ txt_reset(SLAUNCH_ERROR_GENERIC); ++ ++ os_mle = txt_os_mle_data_start(txt_heap); ++ os_sinit = txt_os_sinit_data_start(txt_heap); ++ ++ verify_pmr_ranges(os_mle, os_sinit, load_base_addr, tgt_base_addr, size); ++ ++ return os_mle->boot_params_addr; ++} +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index 61dbdb63ec..3a1af8da39 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -75,6 +75,8 @@ + + #ifndef __ASSEMBLY__ + ++extern bool slaunch_active; ++ + /* We need to differentiate between pre- and post paging enabled. */ + #ifdef __BOOT_DEFS_H__ + #define _txt(x) _p(x) +@@ -259,4 +261,28 @@ static inline void *txt_sinit_mle_data_start(void *heap) + txt_os_sinit_data_size(heap) + sizeof(uint64_t); + } + ++static inline int is_in_pmr(struct txt_os_sinit_data *os_sinit, uint64_t base, ++ uint32_t size, int check_high) ++{ ++ /* Check for size overflow. */ ++ if (base + size < base) ++ txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW); ++ ++ /* Low range always starts at 0, so its size is also end address. */ ++ if (base >= os_sinit->vtd_pmr_lo_base && ++ base + size <= os_sinit->vtd_pmr_lo_size) ++ return 1; ++ ++ if (check_high && os_sinit->vtd_pmr_hi_size != 0) { ++ if (os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size < ++ os_sinit->vtd_pmr_hi_size) ++ txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW); ++ if (base >= os_sinit->vtd_pmr_hi_base && ++ base + size <= os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size) ++ return 1; ++ } ++ ++ return 0; ++} ++ + #endif /* __ASSEMBLY__ */ +diff --git a/xen/arch/x86/intel_txt.c b/xen/arch/x86/intel_txt.c +new file mode 100644 +index 0000000000..d23727cc82 +--- /dev/null ++++ b/xen/arch/x86/intel_txt.c +@@ -0,0 +1,11 @@ ++#include ++#include ++#include ++#include ++ ++bool __initdata slaunch_active; ++ ++static void __maybe_unused compile_time_checks(void) ++{ ++ BUILD_BUG_ON(sizeof(slaunch_active) != 1); ++} +-- +2.44.0 + diff --git a/1303-include-xen-slr_table.h-Secure-Launch-Resource-Table.patch b/1303-include-xen-slr_table.h-Secure-Launch-Resource-Table.patch new file mode 100644 index 00000000..55af9dba --- /dev/null +++ b/1303-include-xen-slr_table.h-Secure-Launch-Resource-Table.patch @@ -0,0 +1,283 @@ +From 7513504592207d25ebd16cb0840399f31bb03954 Mon Sep 17 00:00:00 2001 +From: Sergii Dmytruk +Date: Sun, 29 Oct 2023 00:29:30 +0300 +Subject: [PATCH 1303/1323] include/xen/slr_table.h: Secure Launch Resource + Table definitions + +The file provides constants, structures and several helper functions for +parsing SLRT. + +slr_add_entry() and slr_init_table() were omitted to not have issues +with memcpy() usage (it comes from different places for different +translation units). + +Signed-off-by: Sergii Dmytruk +--- + xen/include/xen/slr_table.h | 255 ++++++++++++++++++++++++++++++++++++ + 1 file changed, 255 insertions(+) + create mode 100644 xen/include/xen/slr_table.h + +diff --git a/xen/include/xen/slr_table.h b/xen/include/xen/slr_table.h +new file mode 100644 +index 0000000000..74dd27b2e4 +--- /dev/null ++++ b/xen/include/xen/slr_table.h +@@ -0,0 +1,255 @@ ++/* SPDX-License-Identifier: GPL-3.0 */ ++ ++/* ++ * Copyright (C) 2023 Oracle and/or its affiliates. ++ * ++ * Secure Launch Resource Table definitions ++ */ ++ ++#ifndef _SLR_TABLE_H ++#define _SLR_TABLE_H ++ ++#define UEFI_SLR_TABLE_GUID \ ++ { 0x877a9b2a, 0x0385, 0x45d1, { 0xa0, 0x34, 0x9d, 0xac, 0x9c, 0x9e, 0x56, 0x5f }} ++ ++/* SLR table header values */ ++#define SLR_TABLE_MAGIC 0x4452544d ++#define SLR_TABLE_REVISION 1 ++ ++/* Current revisions for the policy and UEFI config */ ++#define SLR_POLICY_REVISION 1 ++#define SLR_UEFI_CONFIG_REVISION 1 ++ ++/* SLR defined architectures */ ++#define SLR_INTEL_TXT 1 ++#define SLR_AMD_SKINIT 2 ++ ++/* SLR defined bootloaders */ ++#define SLR_BOOTLOADER_INVALID 0 ++#define SLR_BOOTLOADER_GRUB 1 ++ ++/* Log formats */ ++#define SLR_DRTM_TPM12_LOG 1 ++#define SLR_DRTM_TPM20_LOG 2 ++ ++/* DRTM Policy Entry Flags */ ++#define SLR_POLICY_FLAG_MEASURED 0x1 ++#define SLR_POLICY_IMPLICIT_SIZE 0x2 ++ ++/* Array Lengths */ ++#define TPM_EVENT_INFO_LENGTH 32 ++#define TXT_VARIABLE_MTRRS_LENGTH 32 ++ ++/* Tags */ ++#define SLR_ENTRY_INVALID 0x0000 ++#define SLR_ENTRY_DL_INFO 0x0001 ++#define SLR_ENTRY_LOG_INFO 0x0002 ++#define SLR_ENTRY_DRTM_POLICY 0x0003 ++#define SLR_ENTRY_INTEL_INFO 0x0004 ++#define SLR_ENTRY_AMD_INFO 0x0005 ++#define SLR_ENTRY_ARM_INFO 0x0006 ++#define SLR_ENTRY_UEFI_INFO 0x0007 ++#define SLR_ENTRY_UEFI_CONFIG 0x0008 ++#define SLR_ENTRY_END 0xffff ++ ++/* Entity Types */ ++#define SLR_ET_UNSPECIFIED 0x0000 ++#define SLR_ET_SLRT 0x0001 ++#define SLR_ET_BOOT_PARAMS 0x0002 ++#define SLR_ET_SETUP_DATA 0x0003 ++#define SLR_ET_CMDLINE 0x0004 ++#define SLR_ET_UEFI_MEMMAP 0x0005 ++#define SLR_ET_RAMDISK 0x0006 ++#define SLR_ET_MULTIBOOT2_INFO 0x0007 ++#define SLR_ET_MULTIBOOT2_MODULE 0x0008 ++#define SLR_ET_TXT_OS2MLE 0x0010 ++#define SLR_ET_UNUSED 0xffff ++ ++/* ++ * Primary SLR Table Header ++ */ ++struct slr_table ++{ ++ uint32_t magic; ++ uint16_t revision; ++ uint16_t architecture; ++ uint32_t size; ++ uint32_t max_size; ++ /* entries[] */ ++} __packed; ++ ++/* ++ * Common SLRT Table Header ++ */ ++struct slr_entry_hdr ++{ ++ uint16_t tag; ++ uint16_t size; ++} __packed; ++ ++/* ++ * Boot loader context ++ */ ++struct slr_bl_context ++{ ++ uint16_t bootloader; ++ uint16_t reserved; ++ uint64_t context; ++} __packed; ++ ++/* ++ * DRTM Dynamic Launch Configuration ++ */ ++struct slr_entry_dl_info ++{ ++ struct slr_entry_hdr hdr; ++ struct slr_bl_context bl_context; ++ uint64_t dl_handler; ++ uint64_t dce_base; ++ uint32_t dce_size; ++ uint64_t dlme_entry; ++} __packed; ++ ++/* ++ * TPM Log Information ++ */ ++struct slr_entry_log_info ++{ ++ struct slr_entry_hdr hdr; ++ uint16_t format; ++ uint16_t reserved; ++ uint64_t addr; ++ uint32_t size; ++} __packed; ++ ++/* ++ * DRTM Measurement Policy ++ */ ++struct slr_entry_policy ++{ ++ struct slr_entry_hdr hdr; ++ uint16_t revision; ++ uint16_t nr_entries; ++ /* policy_entries[] */ ++} __packed; ++ ++/* ++ * DRTM Measurement Entry ++ */ ++struct slr_policy_entry ++{ ++ uint16_t pcr; ++ uint16_t entity_type; ++ uint16_t flags; ++ uint16_t reserved; ++ uint64_t entity; ++ uint64_t size; ++ char evt_info[TPM_EVENT_INFO_LENGTH]; ++} __packed; ++ ++/* ++ * Secure Launch defined MTRR saving structures ++ */ ++struct slr_txt_mtrr_pair ++{ ++ uint64_t mtrr_physbase; ++ uint64_t mtrr_physmask; ++} __packed; ++ ++struct slr_txt_mtrr_state ++{ ++ uint64_t default_mem_type; ++ uint64_t mtrr_vcnt; ++ struct slr_txt_mtrr_pair mtrr_pair[TXT_VARIABLE_MTRRS_LENGTH]; ++} __packed; ++ ++/* ++ * Intel TXT Info table ++ */ ++struct slr_entry_intel_info ++{ ++ struct slr_entry_hdr hdr; ++ uint64_t saved_misc_enable_msr; ++ struct slr_txt_mtrr_state saved_bsp_mtrrs; ++} __packed; ++ ++/* ++ * AMD SKINIT Info table ++ */ ++struct slr_entry_amd_info ++{ ++ struct slr_entry_hdr hdr; ++} __packed; ++ ++/* ++ * ARM DRTM Info table ++ */ ++struct slr_entry_arm_info ++{ ++ struct slr_entry_hdr hdr; ++} __packed; ++ ++struct slr_entry_uefi_config ++{ ++ struct slr_entry_hdr hdr; ++ uint16_t revision; ++ uint16_t nr_entries; ++ /* uefi_cfg_entries[] */ ++} __packed; ++ ++struct slr_uefi_cfg_entry ++{ ++ uint16_t pcr; ++ uint16_t reserved; ++ uint64_t cfg; /* address or value */ ++ uint32_t size; ++ char evt_info[TPM_EVENT_INFO_LENGTH]; ++} __packed; ++ ++static inline void * ++slr_end_of_entries(struct slr_table *table) ++{ ++ return (uint8_t *)table + table->size; ++} ++ ++static inline struct slr_entry_hdr * ++slr_next_entry(struct slr_table *table, struct slr_entry_hdr *curr) ++{ ++ struct slr_entry_hdr *next = (struct slr_entry_hdr *) ++ ((uint8_t *)curr + curr->size); ++ ++ if ( (void *)next >= slr_end_of_entries(table) ) ++ return NULL; ++ if ( next->tag == SLR_ENTRY_END ) ++ return NULL; ++ ++ return next; ++} ++ ++static inline struct slr_entry_hdr * ++slr_next_entry_by_tag (struct slr_table *table, ++ struct slr_entry_hdr *entry, ++ uint16_t tag) ++{ ++ if ( !entry ) /* Start from the beginning */ ++ entry = (struct slr_entry_hdr *)((uint8_t *)table + sizeof(*table)); ++ ++ for ( ; ; ) ++ { ++ if ( entry->tag == tag ) ++ return entry; ++ ++ entry = slr_next_entry(table, entry); ++ if ( !entry ) ++ return NULL; ++ } ++ ++ return NULL; ++} ++ ++/* ++ * slr_add_entry() and slr_init_table() were omitted to not have issues with ++ * memcpy() usage. ++ */ ++ ++#endif /* _SLR_TABLE_H */ +-- +2.44.0 + diff --git a/1304-xen-arch-x86-reserve-TXT-memory.patch b/1304-xen-arch-x86-reserve-TXT-memory.patch new file mode 100644 index 00000000..0c07016c --- /dev/null +++ b/1304-xen-arch-x86-reserve-TXT-memory.patch @@ -0,0 +1,234 @@ +From 6992cf06190dce1aad3e20125b240c149b253328 Mon Sep 17 00:00:00 2001 +From: Kacper Stojek +Date: Fri, 2 Sep 2022 08:11:43 +0200 +Subject: [PATCH 1304/1323] xen/arch/x86: reserve TXT memory + +TXT heap is marked as reserved in e820 to protect against being allocated +and overwritten. + +Signed-off-by: Kacper Stojek +Signed-off-by: Krystian Hebel +Signed-off-by: Sergii Dmytruk +--- + xen/arch/x86/include/asm/intel_txt.h | 39 ++++++++++++ + xen/arch/x86/include/asm/mm.h | 3 + + xen/arch/x86/intel_txt.c | 91 ++++++++++++++++++++++++++++ + xen/arch/x86/setup.c | 12 +++- + 4 files changed, 142 insertions(+), 3 deletions(-) + +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index 3a1af8da39..75a550a8a0 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -86,6 +86,8 @@ extern bool slaunch_active; + #define _txt(x) __va(x) + #endif + ++#include ++ + /* + * Always use private space as some of registers are either read-only or not + * present in public space. +@@ -285,4 +287,41 @@ static inline int is_in_pmr(struct txt_os_sinit_data *os_sinit, uint64_t base, + return 0; + } + ++/* ++ * This helper function is used to map memory using L2 page tables by aligning ++ * mapped regions to 2MB. This way page allocator (which at this point isn't ++ * yet initialized) isn't needed for creating new L1 mappings. The function ++ * also checks and skips memory already mapped by the prebuilt tables. ++ * ++ * There is no unmap_l2() because the function is meant to be used for code that ++ * accesses TXT registers and TXT heap soon after which Xen rebuilds memory ++ * maps, effectively dropping all existing mappings. ++ */ ++extern int map_l2(unsigned long paddr, unsigned long size); ++ ++/* evt_log is a physical address and the caller must map it to virtual, if ++ * needed. */ ++static inline void find_evt_log(void **evt_log, uint32_t *evt_log_size) ++{ ++ struct txt_os_mle_data *os_mle; ++ struct slr_table *slrt; ++ struct slr_entry_log_info *log_info; ++ ++ os_mle = txt_os_mle_data_start(_txt(read_txt_reg(TXTCR_HEAP_BASE))); ++ slrt = _txt(os_mle->slrt); ++ ++ log_info = (struct slr_entry_log_info *) ++ slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_LOG_INFO); ++ if ( log_info != NULL ) { ++ *evt_log = _p(log_info->addr); ++ *evt_log_size = log_info->size; ++ } else { ++ *evt_log = NULL; ++ *evt_log_size = 0; ++ } ++} ++ ++extern void map_txt_mem_regions(void); ++extern void protect_txt_mem_regions(void); ++ + #endif /* __ASSEMBLY__ */ +diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h +index d723c7c38f..989fb75038 100644 +--- a/xen/arch/x86/include/asm/mm.h ++++ b/xen/arch/x86/include/asm/mm.h +@@ -98,6 +98,9 @@ + #define _PGC_need_scrub _PGC_allocated + #define PGC_need_scrub PGC_allocated + ++/* How much of the directmap is prebuilt at compile time. */ ++#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT) ++ + #ifndef CONFIG_BIGMEM + /* + * This definition is solely for the use in struct page_info (and +diff --git a/xen/arch/x86/intel_txt.c b/xen/arch/x86/intel_txt.c +index d23727cc82..f7b5d3e576 100644 +--- a/xen/arch/x86/intel_txt.c ++++ b/xen/arch/x86/intel_txt.c +@@ -1,7 +1,15 @@ + #include + #include + #include ++#include ++#include ++#include ++#include + #include ++#include ++#include ++ ++static uint64_t __initdata txt_heap_base, txt_heap_size; + + bool __initdata slaunch_active; + +@@ -9,3 +17,86 @@ static void __maybe_unused compile_time_checks(void) + { + BUILD_BUG_ON(sizeof(slaunch_active) != 1); + } ++ ++int __init map_l2(unsigned long paddr, unsigned long size) ++{ ++ unsigned long aligned_paddr = paddr & ~((1ULL << L2_PAGETABLE_SHIFT) - 1); ++ unsigned long pages = ((paddr + size) - aligned_paddr); ++ pages = ROUNDUP(pages, 1ULL << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT; ++ ++ if ( (aligned_paddr + pages * PAGE_SIZE) <= PREBUILT_MAP_LIMIT ) ++ return 0; ++ ++ if ( aligned_paddr < PREBUILT_MAP_LIMIT ) { ++ pages -= (PREBUILT_MAP_LIMIT - aligned_paddr) >> PAGE_SHIFT; ++ aligned_paddr = PREBUILT_MAP_LIMIT; ++ } ++ ++ return map_pages_to_xen((unsigned long)__va(aligned_paddr), ++ maddr_to_mfn(aligned_paddr), ++ pages, PAGE_HYPERVISOR); ++} ++ ++void __init map_txt_mem_regions(void) ++{ ++ void *evt_log_addr; ++ uint32_t evt_log_size; ++ ++ map_l2(TXT_PRIV_CONFIG_REGS_BASE, NR_TXT_CONFIG_PAGES * PAGE_SIZE); ++ ++ txt_heap_base = read_txt_reg(TXTCR_HEAP_BASE); ++ BUG_ON(txt_heap_base == 0); ++ ++ txt_heap_size = read_txt_reg(TXTCR_HEAP_SIZE); ++ BUG_ON(txt_heap_size == 0); ++ ++ map_l2(txt_heap_base, txt_heap_size); ++ ++ find_evt_log(&evt_log_addr, &evt_log_size); ++ map_l2((unsigned long)evt_log_addr, evt_log_size); ++} ++ ++void __init protect_txt_mem_regions(void) ++{ ++ void *evt_log_addr; ++ uint32_t evt_log_size; ++ ++ uint64_t sinit_base, sinit_size; ++ ++ /* TXT Heap */ ++ BUG_ON(txt_heap_base == 0); ++ printk("SLAUNCH: reserving TXT heap (%#lx - %#lx)\n", txt_heap_base, ++ txt_heap_base + txt_heap_size); ++ e820_change_range_type(&e820_raw, txt_heap_base, ++ txt_heap_base + txt_heap_size, ++ E820_RAM, E820_RESERVED); ++ ++ /* TXT TPM Event Log */ ++ find_evt_log(&evt_log_addr, &evt_log_size); ++ if ( evt_log_addr != 0 ) { ++ printk("SLAUNCH: reserving event log (%#lx - %#lx)\n", ++ (uint64_t)evt_log_addr, ++ (uint64_t)evt_log_addr + evt_log_size); ++ e820_change_range_type(&e820_raw, (uint64_t)evt_log_addr, ++ (uint64_t)evt_log_addr + evt_log_size, ++ E820_RAM, E820_RESERVED); ++ } ++ ++ sinit_base = read_txt_reg(TXTCR_SINIT_BASE); ++ BUG_ON(sinit_base == 0); ++ ++ sinit_size = read_txt_reg(TXTCR_SINIT_SIZE); ++ BUG_ON(sinit_size == 0); ++ ++ /* SINIT */ ++ printk("SLAUNCH: reserving SINIT memory (%#lx - %#lx)\n", sinit_base, ++ sinit_base + sinit_size); ++ e820_change_range_type(&e820_raw, sinit_base, ++ sinit_base + sinit_size, ++ E820_RAM, E820_RESERVED); ++ ++ /* TXT Private Space */ ++ e820_change_range_type(&e820_raw, TXT_PRIV_CONFIG_REGS_BASE, ++ TXT_PRIV_CONFIG_REGS_BASE + NR_TXT_CONFIG_PAGES * PAGE_SIZE, ++ E820_RAM, E820_UNUSABLE); ++} +diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c +index 0c00ea875d..b20d182d98 100644 +--- a/xen/arch/x86/setup.c ++++ b/xen/arch/x86/setup.c +@@ -55,6 +55,7 @@ + #include + #include + #include ++#include + + /* opt_nosmp: If true, secondary processors are ignored. */ + static bool __initdata opt_nosmp; +@@ -849,9 +850,6 @@ static struct domain *__init create_dom0(const module_t *image, + return d; + } + +-/* How much of the directmap is prebuilt at compile time. */ +-#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT) +- + void __init noreturn __start_xen(unsigned long mbi_p) + { + char *memmap_type = NULL; +@@ -1170,6 +1168,14 @@ void __init noreturn __start_xen(unsigned long mbi_p) + #endif + } + ++ if ( slaunch_active ) ++ { ++ /* Prepare for TXT-related code. */ ++ map_txt_mem_regions(); ++ /* Reserve TXT heap and SINIT. */ ++ protect_txt_mem_regions(); ++ } ++ + /* Sanitise the raw E820 map to produce a final clean version. */ + max_page = raw_max_page = init_e820(memmap_type, &e820_raw); + +-- +2.44.0 + diff --git a/1305-x86-intel_txt.c-restore-boot-MTRRs.patch b/1305-x86-intel_txt.c-restore-boot-MTRRs.patch new file mode 100644 index 00000000..eff71e3b --- /dev/null +++ b/1305-x86-intel_txt.c-restore-boot-MTRRs.patch @@ -0,0 +1,120 @@ +From 6630052ac492aa1a942e3e10b30c5fa20f4c21dd Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Wed, 19 Oct 2022 19:52:24 +0200 +Subject: [PATCH 1305/1323] x86/intel_txt.c: restore boot MTRRs + +In preparation for TXT SENTER call, GRUB had to modify MTRR settings +to be UC for everything except SINIT ACM. Old values are restored +from SLRT where they were saved by the bootloader. + +Signed-off-by: Krystian Hebel +Signed-off-by: Sergii Dmytruk +--- + xen/arch/x86/e820.c | 4 ++ + xen/arch/x86/include/asm/intel_txt.h | 1 + + xen/arch/x86/intel_txt.c | 59 ++++++++++++++++++++++++++++ + 3 files changed, 64 insertions(+) + +diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c +index c5911cf48d..f836fec189 100644 +--- a/xen/arch/x86/e820.c ++++ b/xen/arch/x86/e820.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + + /* + * opt_mem: Limit maximum address of physical RAM. +@@ -455,6 +456,9 @@ static uint64_t __init mtrr_top_of_ram(void) + rdmsrl(MSR_MTRRcap, mtrr_cap); + rdmsrl(MSR_MTRRdefType, mtrr_def); + ++ if ( slaunch_active ) ++ txt_restore_mtrrs(e820_verbose); ++ + if ( e820_verbose ) + printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def); + +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index 75a550a8a0..c902cadcfb 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -323,5 +323,6 @@ static inline void find_evt_log(void **evt_log, uint32_t *evt_log_size) + + extern void map_txt_mem_regions(void); + extern void protect_txt_mem_regions(void); ++extern void txt_restore_mtrrs(bool e820_verbose); + + #endif /* __ASSEMBLY__ */ +diff --git a/xen/arch/x86/intel_txt.c b/xen/arch/x86/intel_txt.c +index f7b5d3e576..66897c43dc 100644 +--- a/xen/arch/x86/intel_txt.c ++++ b/xen/arch/x86/intel_txt.c +@@ -100,3 +100,62 @@ void __init protect_txt_mem_regions(void) + TXT_PRIV_CONFIG_REGS_BASE + NR_TXT_CONFIG_PAGES * PAGE_SIZE, + E820_RAM, E820_UNUSABLE); + } ++ ++void __init txt_restore_mtrrs(bool e820_verbose) ++{ ++ struct txt_os_mle_data *os_mle; ++ struct slr_table *slrt; ++ struct slr_entry_intel_info *intel_info; ++ int os_mle_size; ++ uint64_t mtrr_cap, mtrr_def, base, mask; ++ unsigned int i; ++ ++ os_mle_size = txt_os_mle_data_size(__va(txt_heap_base)); ++ os_mle = txt_os_mle_data_start(__va(txt_heap_base)); ++ ++ if ( os_mle_size < sizeof(*os_mle) ) ++ panic("OS-MLE too small\n"); ++ ++ rdmsrl(MSR_MTRRcap, mtrr_cap); ++ rdmsrl(MSR_MTRRdefType, mtrr_def); ++ ++ if ( e820_verbose ) { ++ printk("MTRRs set previously for SINIT ACM:\n"); ++ printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def); ++ ++ for ( i = 0; i < (uint8_t)mtrr_cap; i++ ) ++ { ++ rdmsrl(MSR_IA32_MTRR_PHYSBASE(i), base); ++ rdmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask); ++ ++ printk(" MTRR[%d]: base %"PRIx64" mask %"PRIx64"\n", ++ i, base, mask); ++ } ++ } ++ ++ slrt = __va(os_mle->slrt); ++ intel_info = (struct slr_entry_intel_info *) ++ slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO); ++ ++ if ( (mtrr_cap & 0xFF) != intel_info->saved_bsp_mtrrs.mtrr_vcnt ) { ++ printk("Bootloader saved %ld MTRR values, but there should be %ld\n", ++ intel_info->saved_bsp_mtrrs.mtrr_vcnt, mtrr_cap & 0xFF); ++ /* Choose the smaller one to be on the safe side. */ ++ mtrr_cap = (mtrr_cap & 0xFF) > intel_info->saved_bsp_mtrrs.mtrr_vcnt ? ++ intel_info->saved_bsp_mtrrs.mtrr_vcnt : mtrr_cap; ++ } ++ ++ /* Restore MTRRs saved by bootloader. */ ++ wrmsrl(MSR_MTRRdefType, intel_info->saved_bsp_mtrrs.default_mem_type); ++ ++ for ( i = 0; i < (uint8_t)mtrr_cap; i++ ) ++ { ++ base = intel_info->saved_bsp_mtrrs.mtrr_pair[i].mtrr_physbase; ++ mask = intel_info->saved_bsp_mtrrs.mtrr_pair[i].mtrr_physmask; ++ wrmsrl(MSR_IA32_MTRR_PHYSBASE(i), base); ++ wrmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask); ++ } ++ ++ if ( e820_verbose ) ++ printk("Restored MTRRs:\n"); /* Printed by caller, mtrr_top_of_ram(). */ ++} +-- +2.44.0 + diff --git a/1306-x86-sha1.c-add-file.patch b/1306-x86-sha1.c-add-file.patch new file mode 100644 index 00000000..23148da9 --- /dev/null +++ b/1306-x86-sha1.c-add-file.patch @@ -0,0 +1,232 @@ +From ebb2df4a3acb0d2c25c4ae394d297bf22b6d8037 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Tue, 25 Oct 2022 16:04:17 +0200 +Subject: [PATCH 1306/1323] x86/sha1.c: add file + +File comes from [1] and is licensed under MIT License. Only enough +changes to make it compile under Xen and to swap endianness of result +were made to the original file. + +[1] https://www.nayuki.io/page/fast-sha1-hash-implementation-in-x86-assembly + +Signed-off-by: Krystian Hebel +Signed-off-by: Sergii Dmytruk +--- + xen/lib/Makefile | 1 + + xen/lib/sha1.c | 192 +++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 193 insertions(+) + create mode 100644 xen/lib/sha1.c + +diff --git a/xen/lib/Makefile b/xen/lib/Makefile +index b311ea739c..3820e0bfbb 100644 +--- a/xen/lib/Makefile ++++ b/xen/lib/Makefile +@@ -28,6 +28,7 @@ lib-y += strrchr.o + lib-y += strsep.o + lib-y += strspn.o + lib-y += strstr.o ++lib-$(CONFIG_X86) += sha1.o + lib-$(CONFIG_X86) += xxhash32.o + lib-$(CONFIG_X86) += xxhash64.o + +diff --git a/xen/lib/sha1.c b/xen/lib/sha1.c +new file mode 100644 +index 0000000000..f62305b423 +--- /dev/null ++++ b/xen/lib/sha1.c +@@ -0,0 +1,192 @@ ++/* ++ * SHA-1 hash in C ++ * ++ * Copyright (c) 2021 Project Nayuki. (MIT License) ++ * https://www.nayuki.io/page/fast-sha1-hash-implementation-in-x86-assembly ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy of ++ * this software and associated documentation files (the "Software"), to deal in ++ * the Software without restriction, including without limitation the rights to ++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of ++ * the Software, and to permit persons to whom the Software is furnished to do so, ++ * subject to the following conditions: ++ * - The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * - The Software is provided "as is", without warranty of any kind, express or ++ * implied, including but not limited to the warranties of merchantability, ++ * fitness for a particular purpose and noninfringement. In no event shall the ++ * authors or copyright holders be liable for any claim, damages or other ++ * liability, whether in an action of contract, tort or otherwise, arising from, ++ * out of or in connection with the Software or the use or other dealings in the ++ * Software. ++ */ ++ ++#include ++#include ++ ++#define UINT32_C(v) v ## U ++ ++#define BLOCK_LEN 64 // In bytes ++#define STATE_LEN 5 // In words ++ ++static void sha1_compress(const uint8_t block[static 64], uint32_t state[static 5]) { ++ #define ROTL32(x, n) (((0U + (x)) << (n)) | ((x) >> (32 - (n)))) // Assumes that x is uint32_t and 0 < n < 32 ++ ++ #define LOADSCHEDULE(i) \ ++ schedule[i] = (uint32_t)block[i * 4 + 0] << 24 \ ++ | (uint32_t)block[i * 4 + 1] << 16 \ ++ | (uint32_t)block[i * 4 + 2] << 8 \ ++ | (uint32_t)block[i * 4 + 3] << 0; ++ ++ #define SCHEDULE(i) \ ++ temp = schedule[(i - 3) & 0xF] ^ schedule[(i - 8) & 0xF] ^ schedule[(i - 14) & 0xF] ^ schedule[(i - 16) & 0xF]; \ ++ schedule[i & 0xF] = ROTL32(temp, 1); ++ ++ #define ROUND0a(a, b, c, d, e, i) LOADSCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) | (~b & d)) , i, 0x5A827999) ++ #define ROUND0b(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) | (~b & d)) , i, 0x5A827999) ++ #define ROUND1(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, (b ^ c ^ d) , i, 0x6ED9EBA1) ++ #define ROUND2(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) ^ (b & d) ^ (c & d)), i, 0x8F1BBCDC) ++ #define ROUND3(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, (b ^ c ^ d) , i, 0xCA62C1D6) ++ ++ #define ROUNDTAIL(a, b, e, f, i, k) \ ++ e = 0U + e + ROTL32(a, 5) + f + UINT32_C(k) + schedule[i & 0xF]; \ ++ b = ROTL32(b, 30); ++ ++ uint32_t a = state[0]; ++ uint32_t b = state[1]; ++ uint32_t c = state[2]; ++ uint32_t d = state[3]; ++ uint32_t e = state[4]; ++ ++ uint32_t schedule[16]; ++ uint32_t temp; ++ ROUND0a(a, b, c, d, e, 0) ++ ROUND0a(e, a, b, c, d, 1) ++ ROUND0a(d, e, a, b, c, 2) ++ ROUND0a(c, d, e, a, b, 3) ++ ROUND0a(b, c, d, e, a, 4) ++ ROUND0a(a, b, c, d, e, 5) ++ ROUND0a(e, a, b, c, d, 6) ++ ROUND0a(d, e, a, b, c, 7) ++ ROUND0a(c, d, e, a, b, 8) ++ ROUND0a(b, c, d, e, a, 9) ++ ROUND0a(a, b, c, d, e, 10) ++ ROUND0a(e, a, b, c, d, 11) ++ ROUND0a(d, e, a, b, c, 12) ++ ROUND0a(c, d, e, a, b, 13) ++ ROUND0a(b, c, d, e, a, 14) ++ ROUND0a(a, b, c, d, e, 15) ++ ROUND0b(e, a, b, c, d, 16) ++ ROUND0b(d, e, a, b, c, 17) ++ ROUND0b(c, d, e, a, b, 18) ++ ROUND0b(b, c, d, e, a, 19) ++ ROUND1(a, b, c, d, e, 20) ++ ROUND1(e, a, b, c, d, 21) ++ ROUND1(d, e, a, b, c, 22) ++ ROUND1(c, d, e, a, b, 23) ++ ROUND1(b, c, d, e, a, 24) ++ ROUND1(a, b, c, d, e, 25) ++ ROUND1(e, a, b, c, d, 26) ++ ROUND1(d, e, a, b, c, 27) ++ ROUND1(c, d, e, a, b, 28) ++ ROUND1(b, c, d, e, a, 29) ++ ROUND1(a, b, c, d, e, 30) ++ ROUND1(e, a, b, c, d, 31) ++ ROUND1(d, e, a, b, c, 32) ++ ROUND1(c, d, e, a, b, 33) ++ ROUND1(b, c, d, e, a, 34) ++ ROUND1(a, b, c, d, e, 35) ++ ROUND1(e, a, b, c, d, 36) ++ ROUND1(d, e, a, b, c, 37) ++ ROUND1(c, d, e, a, b, 38) ++ ROUND1(b, c, d, e, a, 39) ++ ROUND2(a, b, c, d, e, 40) ++ ROUND2(e, a, b, c, d, 41) ++ ROUND2(d, e, a, b, c, 42) ++ ROUND2(c, d, e, a, b, 43) ++ ROUND2(b, c, d, e, a, 44) ++ ROUND2(a, b, c, d, e, 45) ++ ROUND2(e, a, b, c, d, 46) ++ ROUND2(d, e, a, b, c, 47) ++ ROUND2(c, d, e, a, b, 48) ++ ROUND2(b, c, d, e, a, 49) ++ ROUND2(a, b, c, d, e, 50) ++ ROUND2(e, a, b, c, d, 51) ++ ROUND2(d, e, a, b, c, 52) ++ ROUND2(c, d, e, a, b, 53) ++ ROUND2(b, c, d, e, a, 54) ++ ROUND2(a, b, c, d, e, 55) ++ ROUND2(e, a, b, c, d, 56) ++ ROUND2(d, e, a, b, c, 57) ++ ROUND2(c, d, e, a, b, 58) ++ ROUND2(b, c, d, e, a, 59) ++ ROUND3(a, b, c, d, e, 60) ++ ROUND3(e, a, b, c, d, 61) ++ ROUND3(d, e, a, b, c, 62) ++ ROUND3(c, d, e, a, b, 63) ++ ROUND3(b, c, d, e, a, 64) ++ ROUND3(a, b, c, d, e, 65) ++ ROUND3(e, a, b, c, d, 66) ++ ROUND3(d, e, a, b, c, 67) ++ ROUND3(c, d, e, a, b, 68) ++ ROUND3(b, c, d, e, a, 69) ++ ROUND3(a, b, c, d, e, 70) ++ ROUND3(e, a, b, c, d, 71) ++ ROUND3(d, e, a, b, c, 72) ++ ROUND3(c, d, e, a, b, 73) ++ ROUND3(b, c, d, e, a, 74) ++ ROUND3(a, b, c, d, e, 75) ++ ROUND3(e, a, b, c, d, 76) ++ ROUND3(d, e, a, b, c, 77) ++ ROUND3(c, d, e, a, b, 78) ++ ROUND3(b, c, d, e, a, 79) ++ ++ state[0] = 0U + state[0] + a; ++ state[1] = 0U + state[1] + b; ++ state[2] = 0U + state[2] + c; ++ state[3] = 0U + state[3] + d; ++ state[4] = 0U + state[4] + e; ++} ++ ++/* Full message hasher */ ++ ++void sha1_hash(const uint8_t message[], size_t len, uint32_t hash[static STATE_LEN]) { ++ uint8_t block[BLOCK_LEN] = {0}; ++ size_t rem; ++ size_t off; ++ ++ hash[0] = UINT32_C(0x67452301); ++ hash[1] = UINT32_C(0xEFCDAB89); ++ hash[2] = UINT32_C(0x98BADCFE); ++ hash[3] = UINT32_C(0x10325476); ++ hash[4] = UINT32_C(0xC3D2E1F0); ++ ++ #define LENGTH_SIZE 8 // In bytes ++ ++ for (off = 0; len - off >= BLOCK_LEN; off += BLOCK_LEN) ++ sha1_compress(&message[off], hash); ++ ++ rem = len - off; ++ ++ if (rem > 0) ++ memcpy(block, &message[off], rem); ++ ++ block[rem] = 0x80; ++ rem++; ++ if (BLOCK_LEN - rem < LENGTH_SIZE) { ++ sha1_compress(block, hash); ++ memset(block, 0, sizeof(block)); ++ } ++ ++ block[BLOCK_LEN - 1] = (uint8_t)((len & 0x1FU) << 3); ++ len >>= 5; ++ for (int i = 1; i < LENGTH_SIZE; i++, len >>= 8) ++ block[BLOCK_LEN - 1 - i] = (uint8_t)(len & 0xFFU); ++ sha1_compress(block, hash); ++ ++ hash[0] = __builtin_bswap32(hash[0]); ++ hash[1] = __builtin_bswap32(hash[1]); ++ hash[2] = __builtin_bswap32(hash[2]); ++ hash[3] = __builtin_bswap32(hash[3]); ++ hash[4] = __builtin_bswap32(hash[4]); ++} +-- +2.44.0 + diff --git a/1307-x86-tpm.c-code-for-early-hashing-and-extending-PCRs-.patch b/1307-x86-tpm.c-code-for-early-hashing-and-extending-PCRs-.patch new file mode 100644 index 00000000..6a012950 --- /dev/null +++ b/1307-x86-tpm.c-code-for-early-hashing-and-extending-PCRs-.patch @@ -0,0 +1,645 @@ +From 35c139100241f5afa5b53df6a79bccc6a6765379 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Fri, 21 Oct 2022 18:46:33 +0200 +Subject: [PATCH 1307/1323] x86/tpm.c: code for early hashing and extending + PCRs (for TPM1.2) + +This file is built twice: for early 32b mode without paging to measure +MBI and for 64b code to measure dom0 kernel and initramfs. Since MBI +is small, the first case uses TPM to do the hashing. Kernel and +initramfs on the other hand are too big, sending them to the TPM would +take multiple minutes. + +Signed-off-by: Krystian Hebel +Signed-off-by: Sergii Dmytruk +--- + xen/arch/x86/Makefile | 1 + + xen/arch/x86/boot/Makefile | 6 +- + xen/arch/x86/boot/head.S | 12 +- + xen/arch/x86/include/asm/intel_txt.h | 16 + + xen/arch/x86/tpm.c | 429 +++++++++++++++++++++++++++ + xen/include/xen/sha1.h | 11 + + xen/lib/sha1.c | 33 ++- + 7 files changed, 491 insertions(+), 17 deletions(-) + create mode 100644 xen/arch/x86/tpm.c + create mode 100644 xen/include/xen/sha1.h + +diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile +index 674e5a3289..76606f7617 100644 +--- a/xen/arch/x86/Makefile ++++ b/xen/arch/x86/Makefile +@@ -65,6 +65,7 @@ obj-y += spec_ctrl.o + obj-y += srat.o + obj-y += string.o + obj-y += time.o ++obj-y += tpm.o + obj-y += traps.o + obj-y += tsx.o + obj-y += usercopy.o +diff --git a/xen/arch/x86/boot/Makefile b/xen/arch/x86/boot/Makefile +index 34df17664a..913fa9d2c6 100644 +--- a/xen/arch/x86/boot/Makefile ++++ b/xen/arch/x86/boot/Makefile +@@ -1,6 +1,6 @@ + obj-bin-y += head.o + +-head-bin-objs := cmdline.o reloc.o txt_early.o ++head-bin-objs := cmdline.o reloc.o txt_early.o tpm_early.o + + nocov-y += $(head-bin-objs) + noubsan-y += $(head-bin-objs) +@@ -29,6 +29,10 @@ LDFLAGS_DIRECT += $(LDFLAGS_DIRECT-y) + %.bin: %.lnk + $(OBJCOPY) -j .text -O binary $< $@ + ++$(obj)/tpm_early.o: XEN_CFLAGS += -D__EARLY_TPM__ ++$(obj)/tpm_early.o: $(src)/../tpm.c FORCE ++ $(call if_changed_rule,cc_o_c) ++ + %.lnk: %.o $(src)/build32.lds + $(LD) $(subst x86_64,i386,$(LDFLAGS_DIRECT)) -N -T $(filter %.lds,$^) -o $@ $< + +diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S +index 422197af88..873d068d09 100644 +--- a/xen/arch/x86/boot/head.S ++++ b/xen/arch/x86/boot/head.S +@@ -531,10 +531,14 @@ __start: + call txt_early_tests + + /* +- * txt_early_tests() returns MBI address, move it to EBX, move magic +- * number expected by Multiboot 2 to EAX and fall through. ++ * txt_early_tests() returns MBI address, pass it to tpm_extend_mbi() ++ * and store for later in EBX. + */ ++ push %eax + movl %eax,%ebx ++ call tpm_extend_mbi ++ ++ /* Move magic number expected by Multiboot 2 to EAX and fall through. */ + movl $MULTIBOOT2_BOOTLOADER_MAGIC,%eax + + .Lmultiboot2_proto: +@@ -862,6 +866,10 @@ reloc: + txt_early_tests: + .incbin "txt_early.bin" + ++ ALIGN ++tpm_extend_mbi: ++ .incbin "tpm_early.bin" ++ + ENTRY(trampoline_start) + #include "trampoline.S" + ENTRY(trampoline_end) +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index c902cadcfb..50b2f84def 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -325,4 +325,20 @@ extern void map_txt_mem_regions(void); + extern void protect_txt_mem_regions(void); + extern void txt_restore_mtrrs(bool e820_verbose); + ++#define DRTM_LOC 2 ++#define DRTM_CODE_PCR 17 ++#define DRTM_DATA_PCR 18 ++ ++/* ++ * Secure Launch event log entry type. The TXT specification defines the ++ * base event value as 0x400 for DRTM values. ++ */ ++#define TXT_EVTYPE_BASE 0x400 ++#define TXT_EVTYPE_SLAUNCH (TXT_EVTYPE_BASE + 0x102) ++#define TXT_EVTYPE_SLAUNCH_START (TXT_EVTYPE_BASE + 0x103) ++#define TXT_EVTYPE_SLAUNCH_END (TXT_EVTYPE_BASE + 0x104) ++ ++void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, ++ uint32_t type, uint8_t *log_data, unsigned log_data_size); ++ + #endif /* __ASSEMBLY__ */ +diff --git a/xen/arch/x86/tpm.c b/xen/arch/x86/tpm.c +new file mode 100644 +index 0000000000..8281796d20 +--- /dev/null ++++ b/xen/arch/x86/tpm.c +@@ -0,0 +1,429 @@ ++/* ++ * Copyright (c) 2022 3mdeb Sp. z o.o. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program. If not, see . ++ */ ++ ++#ifdef __EARLY_TPM__ ++/* ++ * This entry point is entered from xen/arch/x86/boot/head.S with MBI base at ++ * 0x4(%esp). ++ */ ++asm ( ++ " .text \n" ++ " .globl _start \n" ++ "_start: \n" ++ " jmp tpm_extend_mbi \n" ++ ); ++ ++#include "boot/defs.h" ++#include "include/asm/intel_txt.h" ++#ifdef __va ++#error "__va defined in non-paged mode!" ++#endif ++#define __va(x) _p(x) ++ ++/* ++ * The implementation is necessary if compiler chooses to not use an inline ++ * builtin. ++ */ ++void *memcpy(void *dest, const void *src, size_t n) ++{ ++ const uint8_t *s = src; ++ uint8_t *d = dest; ++ ++ while ( n-- ) ++ *d++ = *s++; ++ ++ return dest; ++} ++ ++#else /* __EARLY_TPM__ */ ++ ++#include ++#include ++#include ++#include ++ ++#endif /* __EARLY_TPM__ */ ++ ++#include ++ ++#define TPM_TIS_BASE 0xFED40000 ++#define TPM_LOC_REG(loc, reg) (0x1000 * (loc) + (reg)) ++ ++#define TPM_ACCESS_(x) TPM_LOC_REG(x, 0x00) ++#define ACCESS_REQUEST_USE (1 << 1) ++#define ACCESS_ACTIVE_LOCALITY (1 << 5) ++#define TPM_INTF_CAPABILITY_(x) TPM_LOC_REG(x, 0x14) ++#define INTF_VERSION_MASK 0x70000000 ++#define TPM_STS_(x) TPM_LOC_REG(x, 0x18) ++#define TPM_FAMILY_MASK 0x0C000000 ++#define STS_DATA_AVAIL (1 << 4) ++#define STS_TPM_GO (1 << 5) ++#define STS_COMMAND_READY (1 << 6) ++#define STS_VALID (1 << 7) ++#define TPM_DATA_FIFO_(x) TPM_LOC_REG(x, 0x24) ++ ++#define swap16(x) __builtin_bswap16(x) ++#define swap32(x) __builtin_bswap32(x) ++#define memcpy(d, s, n) __builtin_memcpy(d, s, n) ++ ++static inline volatile uint32_t tis_read32(unsigned reg) ++{ ++ return *(volatile uint32_t *)__va(TPM_TIS_BASE + reg); ++} ++ ++static inline volatile uint8_t tis_read8(unsigned reg) ++{ ++ return *(volatile uint8_t *)__va(TPM_TIS_BASE + reg); ++} ++ ++static inline void tis_write8(unsigned reg, uint8_t val) ++{ ++ *(volatile uint8_t *)__va(TPM_TIS_BASE + reg) = val; ++} ++ ++static inline void request_locality(unsigned loc) ++{ ++ tis_write8(TPM_ACCESS_(loc), ACCESS_REQUEST_USE); ++ /* Check that locality was actually activated. */ ++ while ( (tis_read8(TPM_ACCESS_(loc)) & ACCESS_ACTIVE_LOCALITY) == 0 ); ++} ++ ++static inline void relinquish_locality(unsigned loc) ++{ ++ tis_write8(TPM_ACCESS_(loc), ACCESS_ACTIVE_LOCALITY); ++} ++ ++static void send_cmd(unsigned loc, uint8_t *buf, unsigned i_size, ++ unsigned *o_size) ++{ ++ /* ++ * Value of "data available" bit counts only when "valid" field is set as ++ * well. ++ */ ++ const unsigned data_avail = STS_VALID | STS_DATA_AVAIL; ++ ++ unsigned i; ++ ++ /* Make sure TPM can accept a command. */ ++ if ( (tis_read8(TPM_STS_(loc)) & STS_COMMAND_READY) == 0 ) { ++ /* Abort current command. */ ++ tis_write8(TPM_STS_(loc), STS_COMMAND_READY); ++ /* Wait until TPM is ready for a new one. */ ++ while ( (tis_read8(TPM_STS_(loc)) & STS_COMMAND_READY) == 0 ); ++ } ++ ++ for ( i = 0; i < i_size; i++ ) ++ tis_write8(TPM_DATA_FIFO_(loc), buf[i]); ++ ++ tis_write8(TPM_STS_(loc), STS_TPM_GO); ++ ++ /* Wait for the first byte of response. */ ++ while ( (tis_read8(TPM_STS_(loc)) & data_avail) != data_avail); ++ ++ for ( i = 0; i < *o_size && tis_read8(TPM_STS_(loc)) & data_avail; i++ ) ++ buf[i] = tis_read8(TPM_DATA_FIFO_(loc)); ++ ++ if ( i < *o_size ) ++ *o_size = i; ++ ++ tis_write8(TPM_STS_(loc), STS_COMMAND_READY); ++} ++ ++static inline bool is_tpm12(void) ++{ ++ /* ++ * If one of these conditions is true: ++ * - INTF_CAPABILITY_x.interfaceVersion is 0 (TIS <= 1.21) ++ * - INTF_CAPABILITY_x.interfaceVersion is 2 (TIS == 1.3) ++ * - STS_x.tpmFamily is 0 ++ * we're dealing with TPM1.2. ++ */ ++ uint32_t intf_version = tis_read32(TPM_INTF_CAPABILITY_(0)) ++ & INTF_VERSION_MASK; ++ return (intf_version == 0x00000000 || intf_version == 0x20000000 || ++ (tis_read32(TPM_STS_(0)) & TPM_FAMILY_MASK) == 0); ++} ++ ++/****************************** TPM1.2 specific *******************************/ ++#define TPM_ORD_Extend 0x00000014 ++#define TPM_ORD_SHA1Start 0x000000A0 ++#define TPM_ORD_SHA1Update 0x000000A1 ++#define TPM_ORD_SHA1CompleteExtend 0x000000A3 ++ ++#define TPM_TAG_RQU_COMMAND 0x00C1 ++#define TPM_TAG_RSP_COMMAND 0x00C4 ++ ++/* All fields of following structs are big endian. */ ++struct tpm_cmd_hdr { ++ uint16_t tag; ++ uint32_t paramSize; ++ uint32_t ordinal; ++} __packed; ++ ++struct tpm_rsp_hdr { ++ uint16_t tag; ++ uint32_t paramSize; ++ uint32_t returnCode; ++} __packed; ++ ++struct extend_cmd { ++ struct tpm_cmd_hdr h; ++ uint32_t pcrNum; ++ uint8_t inDigest[SHA1_DIGEST_SIZE]; ++} __packed; ++ ++struct extend_rsp { ++ struct tpm_rsp_hdr h; ++ uint8_t outDigest[SHA1_DIGEST_SIZE]; ++} __packed; ++ ++struct sha1_start_cmd { ++ struct tpm_cmd_hdr h; ++} __packed; ++ ++struct sha1_start_rsp { ++ struct tpm_rsp_hdr h; ++ uint32_t maxNumBytes; ++} __packed; ++ ++struct sha1_update_cmd { ++ struct tpm_cmd_hdr h; ++ uint32_t numBytes; /* Must be a multiple of 64 */ ++ uint8_t hashData[]; ++} __packed; ++ ++struct sha1_update_rsp { ++ struct tpm_rsp_hdr h; ++} __packed; ++ ++struct sha1_complete_extend_cmd { ++ struct tpm_cmd_hdr h; ++ uint32_t pcrNum; ++ uint32_t hashDataSize; /* 0-64, inclusive */ ++ uint8_t hashData[]; ++} __packed; ++ ++struct sha1_complete_extend_rsp { ++ struct tpm_rsp_hdr h; ++ uint8_t hashValue[SHA1_DIGEST_SIZE]; ++ uint8_t outDigest[SHA1_DIGEST_SIZE]; ++} __packed; ++ ++struct TPM12_PCREvent { ++ uint32_t PCRIndex; ++ uint32_t Type; ++ uint8_t Digest[SHA1_DIGEST_SIZE]; ++ uint32_t Size; ++ uint8_t Data[]; ++}; ++ ++struct txt_ev_log_container_12 { ++ char Signature[20]; /* "TXT Event Container", null-terminated */ ++ uint8_t Reserved[12]; ++ uint8_t ContainerVerMajor; ++ uint8_t ContainerVerMinor; ++ uint8_t PCREventVerMajor; ++ uint8_t PCREventVerMinor; ++ uint32_t ContainerSize; /* Allocated size */ ++ uint32_t PCREventsOffset; ++ uint32_t NextEventOffset; ++ struct TPM12_PCREvent PCREvents[]; ++}; ++ ++#ifdef __EARLY_TPM__ ++/* ++ * TPM1.2 is required to support commands of up to 1101 bytes, vendors rarely ++ * go above that. Limit maximum size of block of data to be hashed to 1024. ++ */ ++#define MAX_HASH_BLOCK 1024 ++#define CMD_RSP_BUF_SIZE (sizeof(struct sha1_update_cmd) + MAX_HASH_BLOCK) ++ ++union cmd_rsp { ++ struct sha1_start_cmd start_c; ++ struct sha1_start_rsp start_r; ++ struct sha1_update_cmd update_c; ++ struct sha1_update_rsp update_r; ++ struct sha1_complete_extend_cmd finish_c; ++ struct sha1_complete_extend_rsp finish_r; ++ uint8_t buf[CMD_RSP_BUF_SIZE]; ++}; ++ ++static void tpm12_hash_extend(unsigned loc, uint8_t *buf, unsigned size, ++ unsigned pcr, uint8_t *out_digest) ++{ ++ union cmd_rsp cmd_rsp; ++ unsigned max_bytes = MAX_HASH_BLOCK; ++ unsigned o_size = sizeof(cmd_rsp); ++ ++ request_locality(loc); ++ ++ cmd_rsp.start_c = (struct sha1_start_cmd) { ++ .h.tag = swap16(TPM_TAG_RQU_COMMAND), ++ .h.paramSize = swap32(sizeof(struct sha1_start_cmd)), ++ .h.ordinal = swap32(TPM_ORD_SHA1Start), ++ }; ++ ++ send_cmd(loc, cmd_rsp.buf, sizeof(struct sha1_start_cmd), &o_size); ++ ++ // assert (o_size >= sizeof(struct sha1_start_rsp)); ++ ++ if ( max_bytes > swap32(cmd_rsp.start_r.maxNumBytes) ) ++ max_bytes = swap32(cmd_rsp.start_r.maxNumBytes); ++ ++ while ( size > 64 ) { ++ if ( size < max_bytes ) ++ max_bytes = size & ~(64 - 1); ++ ++ o_size = sizeof(cmd_rsp); ++ ++ cmd_rsp.update_c = (struct sha1_update_cmd){ ++ .h.tag = swap16(TPM_TAG_RQU_COMMAND), ++ .h.paramSize = swap32(sizeof(struct sha1_update_cmd) + max_bytes), ++ .h.ordinal = swap32(TPM_ORD_SHA1Update), ++ .numBytes = swap32(max_bytes), ++ }; ++ memcpy(cmd_rsp.update_c.hashData, buf, max_bytes); ++ ++ send_cmd(loc, cmd_rsp.buf, sizeof(struct sha1_update_cmd) + max_bytes, ++ &o_size); ++ ++ // assert (o_size >= sizeof(struct sha1_update_rsp)); ++ ++ size -= max_bytes; ++ buf += max_bytes; ++ } ++ ++ o_size = sizeof(cmd_rsp); ++ ++ cmd_rsp.finish_c = (struct sha1_complete_extend_cmd) { ++ .h.tag = swap16(TPM_TAG_RQU_COMMAND), ++ .h.paramSize = swap32(sizeof(struct sha1_complete_extend_cmd) + size), ++ .h.ordinal = swap32(TPM_ORD_SHA1CompleteExtend), ++ .pcrNum = swap32(pcr), ++ .hashDataSize = swap32(size), ++ }; ++ memcpy(cmd_rsp.finish_c.hashData, buf, size); ++ ++ send_cmd(loc, cmd_rsp.buf, sizeof(struct sha1_complete_extend_cmd) + size, ++ &o_size); ++ ++ // assert (o_size >= sizeof(struct sha1_complete_extend_rsp)); ++ ++ relinquish_locality(loc); ++ ++ if ( out_digest != NULL ) ++ memcpy(out_digest, cmd_rsp.finish_r.hashValue, SHA1_DIGEST_SIZE); ++} ++ ++#else ++ ++union cmd_rsp { ++ struct extend_cmd extend_c; ++ struct extend_rsp extend_r; ++}; ++ ++static void tpm12_hash_extend(unsigned loc, uint8_t *buf, unsigned size, ++ unsigned pcr, uint8_t *out_digest) ++{ ++ union cmd_rsp cmd_rsp; ++ unsigned o_size = sizeof(cmd_rsp); ++ ++ sha1_hash(buf, size, out_digest); ++ ++ request_locality(loc); ++ ++ cmd_rsp.extend_c = (struct extend_cmd) { ++ .h.tag = swap16(TPM_TAG_RQU_COMMAND), ++ .h.paramSize = swap32(sizeof(struct extend_cmd)), ++ .h.ordinal = swap32(TPM_ORD_Extend), ++ .pcrNum = swap32(pcr), ++ }; ++ ++ memcpy(cmd_rsp.extend_c.inDigest, out_digest, SHA1_DIGEST_SIZE); ++ ++ send_cmd(loc, (uint8_t *)&cmd_rsp, sizeof(struct extend_cmd), &o_size); ++ ++ relinquish_locality(loc); ++} ++ ++#endif /* __EARLY_TPM__ */ ++ ++static void *create_log_event12(struct txt_ev_log_container_12 *evt_log, ++ uint32_t evt_log_size, uint32_t pcr, ++ uint32_t type, uint8_t *data, ++ unsigned data_size) ++{ ++ struct TPM12_PCREvent *new_entry; ++ ++ new_entry = (void *)(((uint8_t *)evt_log) + evt_log->NextEventOffset); ++ ++ /* ++ * Check if there is enough space left for new entry. ++ * Note: it is possible to introduce a gap in event log if entry with big ++ * data_size is followed by another entry with smaller data. Maybe we should ++ * cap the event log size in such case? ++ */ ++ if ( evt_log->NextEventOffset + sizeof(struct TPM12_PCREvent) + data_size ++ > evt_log_size ) ++ return NULL; ++ ++ evt_log->NextEventOffset += sizeof(struct TPM12_PCREvent) + data_size; ++ ++ new_entry->PCRIndex = pcr; ++ new_entry->Type = type; ++ new_entry->Size = data_size; ++ ++ if ( data && data_size > 0 ) ++ memcpy(new_entry->Data, data, data_size); ++ ++ return new_entry->Digest; ++} ++ ++/************************** end of TPM1.2 specific ****************************/ ++ ++void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, ++ uint32_t type, uint8_t *log_data, unsigned log_data_size) ++{ ++ void *evt_log_addr; ++ uint32_t evt_log_size; ++ ++ find_evt_log(&evt_log_addr, &evt_log_size); ++ evt_log_addr = __va(evt_log_addr); ++ ++ if ( is_tpm12() ) { ++ uint8_t sha1_digest[SHA1_DIGEST_SIZE]; ++ ++ struct txt_ev_log_container_12 *evt_log = evt_log_addr; ++ void *entry_digest = create_log_event12(evt_log, evt_log_size, pcr, ++ type, log_data, log_data_size); ++ ++ /* We still need to write computed hash somewhere. */ ++ if ( entry_digest == NULL ) ++ entry_digest = sha1_digest; ++ ++ tpm12_hash_extend(loc, buf, size, pcr, entry_digest); ++ } ++} ++ ++#ifdef __EARLY_TPM__ ++void __stdcall tpm_extend_mbi(uint32_t *mbi) ++{ ++ /* MBI starts with uint32_t total_size. */ ++ tpm_hash_extend(DRTM_LOC, DRTM_DATA_PCR, (uint8_t *)mbi, *mbi, ++ TXT_EVTYPE_SLAUNCH, NULL, 0); ++} ++#endif +diff --git a/xen/include/xen/sha1.h b/xen/include/xen/sha1.h +new file mode 100644 +index 0000000000..4f6c392b96 +--- /dev/null ++++ b/xen/include/xen/sha1.h +@@ -0,0 +1,11 @@ ++#ifndef __XEN_SHA1_H ++#define __XEN_SHA1_H ++ ++#include ++ ++#define SHA1_DIGEST_SIZE 20 ++ ++void sha1_hash(const uint8_t message[], size_t len, ++ uint8_t hash[static SHA1_DIGEST_SIZE]); ++ ++#endif /* !__XEN_SHA1_H */ +diff --git a/xen/lib/sha1.c b/xen/lib/sha1.c +index f62305b423..3404e26ba2 100644 +--- a/xen/lib/sha1.c ++++ b/xen/lib/sha1.c +@@ -21,6 +21,7 @@ + * Software. + */ + ++#include + #include + #include + +@@ -150,21 +151,23 @@ static void sha1_compress(const uint8_t block[static 64], uint32_t state[static + + /* Full message hasher */ + +-void sha1_hash(const uint8_t message[], size_t len, uint32_t hash[static STATE_LEN]) { ++void sha1_hash(const uint8_t message[], size_t len, uint8_t hash[static SHA1_DIGEST_SIZE]) { + uint8_t block[BLOCK_LEN] = {0}; + size_t rem; + size_t off; + +- hash[0] = UINT32_C(0x67452301); +- hash[1] = UINT32_C(0xEFCDAB89); +- hash[2] = UINT32_C(0x98BADCFE); +- hash[3] = UINT32_C(0x10325476); +- hash[4] = UINT32_C(0xC3D2E1F0); ++ uint32_t state[STATE_LEN] = { ++ UINT32_C(0x67452301), ++ UINT32_C(0xEFCDAB89), ++ UINT32_C(0x98BADCFE), ++ UINT32_C(0x10325476), ++ UINT32_C(0xC3D2E1F0), ++ }; + + #define LENGTH_SIZE 8 // In bytes + + for (off = 0; len - off >= BLOCK_LEN; off += BLOCK_LEN) +- sha1_compress(&message[off], hash); ++ sha1_compress(&message[off], state); + + rem = len - off; + +@@ -174,7 +177,7 @@ void sha1_hash(const uint8_t message[], size_t len, uint32_t hash[static STATE_L + block[rem] = 0x80; + rem++; + if (BLOCK_LEN - rem < LENGTH_SIZE) { +- sha1_compress(block, hash); ++ sha1_compress(block, state); + memset(block, 0, sizeof(block)); + } + +@@ -182,11 +185,13 @@ void sha1_hash(const uint8_t message[], size_t len, uint32_t hash[static STATE_L + len >>= 5; + for (int i = 1; i < LENGTH_SIZE; i++, len >>= 8) + block[BLOCK_LEN - 1 - i] = (uint8_t)(len & 0xFFU); +- sha1_compress(block, hash); ++ sha1_compress(block, state); + +- hash[0] = __builtin_bswap32(hash[0]); +- hash[1] = __builtin_bswap32(hash[1]); +- hash[2] = __builtin_bswap32(hash[2]); +- hash[3] = __builtin_bswap32(hash[3]); +- hash[4] = __builtin_bswap32(hash[4]); ++ state[0] = __builtin_bswap32(state[0]); ++ state[1] = __builtin_bswap32(state[1]); ++ state[2] = __builtin_bswap32(state[2]); ++ state[3] = __builtin_bswap32(state[3]); ++ state[4] = __builtin_bswap32(state[4]); ++ ++ memcpy(hash, state, sizeof(state)); + } +-- +2.44.0 + diff --git a/1308-x86-boot-choose-AP-stack-based-on-APIC-ID.patch b/1308-x86-boot-choose-AP-stack-based-on-APIC-ID.patch new file mode 100644 index 00000000..cc3c9aad --- /dev/null +++ b/1308-x86-boot-choose-AP-stack-based-on-APIC-ID.patch @@ -0,0 +1,165 @@ +From c1579e33a85024000037cc0baf993f18d041c80d Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Wed, 16 Nov 2022 15:03:07 +0100 +Subject: [PATCH 1308/1323] x86/boot: choose AP stack based on APIC ID + +This is made as the first step of making parallel AP bring-up possible. +It should be enough for pre-C code. + +Parallel AP bring-up is necessary because TXT by design releases all APs +at once. In addition to that it reduces number of IPIs (and more +importantly, delays between them) required to start all logical +processors. This results in significant reduction of boot time, even +when DRTM is not used, with performance gain growing with the number of +logical CPUs. + +Signed-off-by: Krystian Hebel +Signed-off-by: Sergii Dmytruk +--- + xen/arch/x86/boot/head.S | 1 + + xen/arch/x86/boot/trampoline.S | 20 ++++++++++++++++++++ + xen/arch/x86/boot/x86_64.S | 28 +++++++++++++++++++++++++++- + xen/arch/x86/include/asm/apicdef.h | 4 ++++ + xen/arch/x86/include/asm/msr-index.h | 3 +++ + xen/arch/x86/setup.c | 7 +++++++ + 6 files changed, 62 insertions(+), 1 deletion(-) + +diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S +index 873d068d09..b2ded95b0a 100644 +--- a/xen/arch/x86/boot/head.S ++++ b/xen/arch/x86/boot/head.S +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include + #include + +diff --git a/xen/arch/x86/boot/trampoline.S b/xen/arch/x86/boot/trampoline.S +index cdecf949b4..2df4c61f94 100644 +--- a/xen/arch/x86/boot/trampoline.S ++++ b/xen/arch/x86/boot/trampoline.S +@@ -72,6 +72,26 @@ trampoline_protmode_entry: + mov $X86_CR4_PAE,%ecx + mov %ecx,%cr4 + ++ /* ++ * Get APIC ID while we're in non-paged mode. Start by checking if ++ * x2APIC is enabled. ++ */ ++ mov $MSR_APIC_BASE, %ecx ++ rdmsr ++ and $APIC_BASE_EXTD, %eax ++ jnz .Lx2apic ++ ++ /* Not x2APIC, read from MMIO */ ++ mov $(APIC_DEFAULT_PHYS_BASE + APIC_ID), %esp ++ shr $24, %esp ++ jmp 1f ++ ++.Lx2apic: ++ mov $(MSR_X2APIC_FIRST + (APIC_ID >> MSR_X2APIC_SHIFT)), %ecx ++ rdmsr ++ mov %eax, %esp ++1: ++ + /* Load pagetable base register. */ + mov $sym_offs(idle_pg_table),%eax + add bootsym_rel(trampoline_xen_phys_start,4,%eax) +diff --git a/xen/arch/x86/boot/x86_64.S b/xen/arch/x86/boot/x86_64.S +index 5d12937a0e..ad2f5058f0 100644 +--- a/xen/arch/x86/boot/x86_64.S ++++ b/xen/arch/x86/boot/x86_64.S +@@ -15,7 +15,33 @@ ENTRY(__high_start) + mov $XEN_MINIMAL_CR4,%rcx + mov %rcx,%cr4 + +- mov stack_start(%rip),%rsp ++ test %ebx,%ebx ++ cmovz stack_start(%rip), %rsp ++ jz .L_stack_set ++ ++ /* APs only: get stack base from APIC ID saved in %esp. */ ++ mov $-1, %rax ++ lea x86_cpu_to_apicid(%rip), %rcx ++1: ++ add $1, %rax ++ cmp $NR_CPUS, %eax ++ jb 2f ++ hlt ++2: ++ cmp %esp, (%rcx, %rax, 4) ++ jne 1b ++ ++ /* %eax is now Xen CPU index. */ ++ lea stack_base(%rip), %rcx ++ mov (%rcx, %rax, 8), %rsp ++ ++ test %rsp,%rsp ++ jnz 1f ++ hlt ++1: ++ add $(STACK_SIZE - CPUINFO_sizeof), %rsp ++ ++.L_stack_set: + + /* Reset EFLAGS (subsumes CLI and CLD). */ + pushq $0 +diff --git a/xen/arch/x86/include/asm/apicdef.h b/xen/arch/x86/include/asm/apicdef.h +index 2440d83c8d..251d7f5465 100644 +--- a/xen/arch/x86/include/asm/apicdef.h ++++ b/xen/arch/x86/include/asm/apicdef.h +@@ -129,6 +129,10 @@ + + #define MAX_IO_APICS 128 + ++#ifndef __ASSEMBLY__ ++ + extern bool x2apic_enabled; + ++#endif /* !__ASSEMBLY__ */ ++ + #endif +diff --git a/xen/arch/x86/include/asm/msr-index.h b/xen/arch/x86/include/asm/msr-index.h +index 871cfe70bd..0d0232b4b5 100644 +--- a/xen/arch/x86/include/asm/msr-index.h ++++ b/xen/arch/x86/include/asm/msr-index.h +@@ -163,6 +163,9 @@ + #define MSR_X2APIC_FIRST 0x00000800 + #define MSR_X2APIC_LAST 0x000008ff + ++/* MSR offset can be obtained by shifting MMIO offset this number of bits to the right. */ ++#define MSR_X2APIC_SHIFT 4 ++ + #define MSR_X2APIC_TPR 0x00000808 + #define MSR_X2APIC_PPR 0x0000080a + #define MSR_X2APIC_EOI 0x0000080b +diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c +index b20d182d98..e814c76c10 100644 +--- a/xen/arch/x86/setup.c ++++ b/xen/arch/x86/setup.c +@@ -1907,6 +1907,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) + */ + if ( !pv_shim ) + { ++ /* Separate loop to make parallel AP bringup possible. */ + for_each_present_cpu ( i ) + { + /* Set up cpu_to_node[]. */ +@@ -1914,6 +1915,12 @@ void __init noreturn __start_xen(unsigned long mbi_p) + /* Set up node_to_cpumask based on cpu_to_node[]. */ + numa_add_cpu(i); + ++ if ( stack_base[i] == NULL ) ++ stack_base[i] = cpu_alloc_stack(i); ++ } ++ ++ for_each_present_cpu ( i ) ++ { + if ( (park_offline_cpus || num_online_cpus() < max_cpus) && + !cpu_online(i) ) + { +-- +2.44.0 + diff --git a/1309-x86-smpboot.c-TXT-AP-bringup.patch b/1309-x86-smpboot.c-TXT-AP-bringup.patch new file mode 100644 index 00000000..d50d31fb --- /dev/null +++ b/1309-x86-smpboot.c-TXT-AP-bringup.patch @@ -0,0 +1,184 @@ +From 6b7785618da8206bc28257b85e2470ea2e8b6bd2 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Wed, 16 Nov 2022 15:06:18 +0100 +Subject: [PATCH 1309/1323] x86/smpboot.c: TXT AP bringup + +On Intel TXT, APs are started in one of two ways, depending on ACM +which reports it in its information table. In both cases, all APs are +started simultaneously after BSP requests them to do so. Two possible +ways are: +- GETSEC[WAKEUP] instruction, +- MONITOR address. + +GETSEC[WAKEUP] requires versions >= 7 of SINIT to MLE Data, but there is +no clear mapping of that version with regard to processor family and +it's not known which CPUs actually use it. It could have been designed +for TXT support on CPUs that lack MONITOR/MWAIT, because GETSEC[WAKEUP] +seems to be more complicated, in software and hardware alike. + +This patch implements only MONITOR approach, GETSEC[WAKEUP] support will +be added later once more details and means of testing are available and +if there is a practical need for it. + +With this patch, every AP goes through assembly part, and only when in +start_secondary() in C they re-enter MONITOR/MWAIT iff they are not the +AP that was asked to boot. The same address is reused for simplicity, +and on next wakeup call APs don't have to go through assembly part +again (GDT, paging, stack setting). + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/boot/trampoline.S | 19 ++++++++++- + xen/arch/x86/include/asm/intel_txt.h | 6 ++++ + xen/arch/x86/smpboot.c | 49 ++++++++++++++++++++++++++++ + 3 files changed, 73 insertions(+), 1 deletion(-) + +diff --git a/xen/arch/x86/boot/trampoline.S b/xen/arch/x86/boot/trampoline.S +index 2df4c61f94..4b5808b9cb 100644 +--- a/xen/arch/x86/boot/trampoline.S ++++ b/xen/arch/x86/boot/trampoline.S +@@ -59,6 +59,16 @@ GLOBAL(trampoline_realmode_entry) + ljmpl $BOOT_CS32,$bootsym_rel(trampoline_protmode_entry,6) + + .code32 ++GLOBAL(txt_ap_entry) ++ /* ++ * APs enter here in protected mode without paging. GDT is set in JOIN ++ * structure, it points to trampoline_gdt. Interrupts are disabled by ++ * TXT (including NMI and SMI), so IDT doesn't matter at this point. ++ * The only missing point is telling that we are AP by saving non-zero ++ * value in EBX. ++ */ ++ mov $1, %ebx ++ + trampoline_protmode_entry: + /* Set up a few descriptors: on entry only CS is guaranteed good. */ + mov $BOOT_DS,%eax +@@ -143,7 +153,7 @@ start64: + .word 0 + idt_48: .word 0, 0, 0 # base = limit = 0 + +-trampoline_gdt: ++GLOBAL(trampoline_gdt) + .word 0 /* 0x0000: unused (reused for GDTR) */ + gdt_48: + .word .Ltrampoline_gdt_end - trampoline_gdt - 1 +@@ -154,6 +164,13 @@ gdt_48: + .quad 0x00cf93000000ffff /* 0x0018: ring 0 data */ + .quad 0x00009b000000ffff /* 0x0020: real-mode code @ BOOT_TRAMPOLINE */ + .quad 0x000093000000ffff /* 0x0028: real-mode data @ BOOT_TRAMPOLINE */ ++ /* ++ * Intel TXT requires these two in exact order. This isn't compatible ++ * with order required by syscall, so we have duplicated entries... ++ * If order ever changes, update selector numbers in asm/intel_txt.h. ++ */ ++ .quad 0x00cf9b000000ffff /* 0x0030: ring 0 code, 32-bit mode */ ++ .quad 0x00cf93000000ffff /* 0x0038: ring 0 data */ + .Ltrampoline_gdt_end: + + /* Relocations for trampoline Real Mode segments. */ +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index 50b2f84def..61d950b3ba 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -73,10 +73,16 @@ + + #define SLAUNCH_BOOTLOADER_MAGIC 0x4c534254 + ++#define TXT_AP_BOOT_CS 0x0030 ++#define TXT_AP_BOOT_DS 0x0038 ++ + #ifndef __ASSEMBLY__ + + extern bool slaunch_active; + ++extern char txt_ap_entry[]; ++extern uint32_t trampoline_gdt[]; ++ + /* We need to differentiate between pre- and post paging enabled. */ + #ifdef __BOOT_DEFS_H__ + #define _txt(x) _p(x) +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index 41d9454d53..9993b5a71d 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -39,6 +39,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -331,6 +332,29 @@ void start_secondary(void *unused) + */ + unsigned int cpu = booting_cpu; + ++ if ( slaunch_active ) { ++ uint64_t misc_enable; ++ uint32_t my_apicid; ++ struct txt_sinit_mle_data *sinit_mle = ++ txt_sinit_mle_data_start(__va(read_txt_reg(TXTCR_HEAP_BASE))); ++ ++ /* TXT released us with MONITOR disabled in IA32_MISC_ENABLE. */ ++ rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); ++ wrmsrl(MSR_IA32_MISC_ENABLE, ++ misc_enable | MSR_IA32_MISC_ENABLE_MONITOR_ENABLE); ++ ++ /* get_apic_id() reads from x2APIC if it thinks it is enabled. */ ++ x2apic_ap_setup(); ++ my_apicid = get_apic_id(); ++ ++ while ( my_apicid != x86_cpu_to_apicid[cpu] ) { ++ asm volatile ("monitor; xor %0,%0; mwait" ++ :: "a"(__va(sinit_mle->rlp_wakeup_addr)), "c"(0), ++ "d"(0) : "memory"); ++ cpu = booting_cpu; ++ } ++ } ++ + /* Critical region without IDT or TSS. Any fault is deadly! */ + + set_current(idle_vcpu[cpu]); +@@ -424,6 +448,28 @@ void start_secondary(void *unused) + startup_cpu_idle_loop(); + } + ++static int slaunch_wake_aps(unsigned long trampoline_rm) ++{ ++ struct txt_sinit_mle_data *sinit_mle = ++ txt_sinit_mle_data_start(__va(read_txt_reg(TXTCR_HEAP_BASE))); ++ uint32_t *wakeup_addr = __va(sinit_mle->rlp_wakeup_addr); ++#define trampoline_relative(x) (trampoline_rm + ((char *)(x) - trampoline_realmode_entry)) ++ uint32_t join[4] = { ++ trampoline_gdt[1], /* GDT limit */ ++ trampoline_relative(trampoline_gdt), /* GDT base */ ++ TXT_AP_BOOT_CS, /* CS selector, DS = CS+8 */ ++ trampoline_relative(txt_ap_entry) /* EIP */ ++ }; ++ ++ write_txt_reg(TXTCR_MLE_JOIN, __pa(join)); ++ ++ smp_mb(); ++ ++ *wakeup_addr = 1; ++ ++ return 0; ++} ++ + static int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) + { + unsigned long send_status = 0, accept_status = 0; +@@ -446,6 +492,9 @@ static int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) + if ( tboot_in_measured_env() && !tboot_wake_ap(phys_apicid, start_eip) ) + return 0; + ++ if ( slaunch_active ) ++ return slaunch_wake_aps(start_eip); ++ + /* + * Be paranoid about clearing APIC errors. + */ +-- +2.44.0 + diff --git a/1310-arch-x86-don-t-access-x86_cpu_to_apicid-directly-use.patch b/1310-arch-x86-don-t-access-x86_cpu_to_apicid-directly-use.patch new file mode 100644 index 00000000..932c6c7a --- /dev/null +++ b/1310-arch-x86-don-t-access-x86_cpu_to_apicid-directly-use.patch @@ -0,0 +1,265 @@ +From 9549e17fe46935aeebe9b726865a5b70fdd69a22 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Thu, 1 Jun 2023 16:05:18 +0200 +Subject: [PATCH 1310/1323] arch/x86: don't access x86_cpu_to_apicid[] + directly, use cpu_physical_id(cpu) + +This is done in preparation to move data from x86_cpu_to_apicid[] +elsewhere. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/acpi/cpu_idle.c | 4 ++-- + xen/arch/x86/acpi/lib.c | 2 +- + xen/arch/x86/apic.c | 2 +- + xen/arch/x86/cpu/mwait-idle.c | 4 ++-- + xen/arch/x86/domain.c | 2 +- + xen/arch/x86/mpparse.c | 6 +++--- + xen/arch/x86/numa.c | 2 +- + xen/arch/x86/platform_hypercall.c | 2 +- + xen/arch/x86/setup.c | 14 +++++++------- + xen/arch/x86/smpboot.c | 6 +++--- + xen/arch/x86/spec_ctrl.c | 2 +- + xen/arch/x86/sysctl.c | 2 +- + 12 files changed, 24 insertions(+), 24 deletions(-) + +diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c +index 557bc6ef86..c01942198f 100644 +--- a/xen/arch/x86/acpi/cpu_idle.c ++++ b/xen/arch/x86/acpi/cpu_idle.c +@@ -1256,7 +1256,7 @@ int get_cpu_id(u32 acpi_id) + + for ( i = 0; i < nr_cpu_ids; i++ ) + { +- if ( apic_id == x86_cpu_to_apicid[i] ) ++ if ( apic_id == cpu_physical_id(i) ) + return i; + } + +@@ -1362,7 +1362,7 @@ long set_cx_pminfo(uint32_t acpi_id, struct xen_processor_power *power) + + if ( !cpu_online(cpu_id) ) + { +- uint32_t apic_id = x86_cpu_to_apicid[cpu_id]; ++ uint32_t apic_id = cpu_physical_id(cpu_id); + + /* + * If we've just learned of more available C states, wake the CPU if +diff --git a/xen/arch/x86/acpi/lib.c b/xen/arch/x86/acpi/lib.c +index 43831b92d1..55d0ef919c 100644 +--- a/xen/arch/x86/acpi/lib.c ++++ b/xen/arch/x86/acpi/lib.c +@@ -89,7 +89,7 @@ unsigned int acpi_get_processor_id(unsigned int cpu) + { + unsigned int acpiid, apicid; + +- if ((apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID) ++ if ((apicid = cpu_physical_id(cpu)) == BAD_APICID) + return INVALID_ACPIID; + + for (acpiid = 0; acpiid < ARRAY_SIZE(x86_acpiid_to_apicid); acpiid++) +diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c +index fcd8d5e8b2..2ac86a3774 100644 +--- a/xen/arch/x86/apic.c ++++ b/xen/arch/x86/apic.c +@@ -985,7 +985,7 @@ __next: + */ + if (boot_cpu_physical_apicid == -1U) + boot_cpu_physical_apicid = get_apic_id(); +- x86_cpu_to_apicid[0] = get_apic_id(); ++ cpu_physical_id(0) = get_apic_id(); + + ioapic_init(); + } +diff --git a/xen/arch/x86/cpu/mwait-idle.c b/xen/arch/x86/cpu/mwait-idle.c +index ffdc6fb2fc..1124f2be3d 100644 +--- a/xen/arch/x86/cpu/mwait-idle.c ++++ b/xen/arch/x86/cpu/mwait-idle.c +@@ -1213,8 +1213,8 @@ static void __init ivt_idle_state_table_update(void) + unsigned int cpu, max_apicid = boot_cpu_physical_apicid; + + for_each_present_cpu(cpu) +- if (max_apicid < x86_cpu_to_apicid[cpu]) +- max_apicid = x86_cpu_to_apicid[cpu]; ++ if (max_apicid < cpu_physical_id(cpu)) ++ max_apicid = cpu_physical_id(cpu); + switch (apicid_to_socket(max_apicid)) { + case 0: case 1: + /* 1 and 2 socket systems use default ivt_cstates */ +diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c +index aca9fa310c..9a8cc4e92e 100644 +--- a/xen/arch/x86/domain.c ++++ b/xen/arch/x86/domain.c +@@ -1538,7 +1538,7 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) + break; + + cpu_id.phys_id = +- (uint64_t)x86_cpu_to_apicid[v->vcpu_id] | ++ (uint64_t)cpu_physical_id(v->vcpu_id) | + ((uint64_t)acpi_get_processor_id(v->vcpu_id) << 32); + + rc = -EFAULT; +diff --git a/xen/arch/x86/mpparse.c b/xen/arch/x86/mpparse.c +index d8ccab2449..b8cabebe7b 100644 +--- a/xen/arch/x86/mpparse.c ++++ b/xen/arch/x86/mpparse.c +@@ -187,7 +187,7 @@ static int MP_processor_info_x(struct mpc_config_processor *m, + " Processor with apicid %i ignored\n", apicid); + return cpu; + } +- x86_cpu_to_apicid[cpu] = apicid; ++ cpu_physical_id(cpu) = apicid; + cpumask_set_cpu(cpu, &cpu_present_map); + } + +@@ -822,12 +822,12 @@ void mp_unregister_lapic(uint32_t apic_id, uint32_t cpu) + if (!cpu || (apic_id == boot_cpu_physical_apicid)) + return; + +- if (x86_cpu_to_apicid[cpu] != apic_id) ++ if (cpu_physical_id(cpu) != apic_id) + return; + + physid_clear(apic_id, phys_cpu_present_map); + +- x86_cpu_to_apicid[cpu] = BAD_APICID; ++ cpu_physical_id(cpu) = BAD_APICID; + cpumask_clear_cpu(cpu, &cpu_present_map); + } + +diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c +index 322157fab7..22ff0923d4 100644 +--- a/xen/arch/x86/numa.c ++++ b/xen/arch/x86/numa.c +@@ -340,7 +340,7 @@ void __init init_cpu_to_node(void) + + for ( i = 0; i < nr_cpu_ids; i++ ) + { +- u32 apicid = x86_cpu_to_apicid[i]; ++ u32 apicid = cpu_physical_id(i); + if ( apicid == BAD_APICID ) + continue; + node = apicid < MAX_LOCAL_APIC ? apicid_to_node[apicid] : NUMA_NO_NODE; +diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c +index e7deee2268..a63751fff0 100644 +--- a/xen/arch/x86/platform_hypercall.c ++++ b/xen/arch/x86/platform_hypercall.c +@@ -588,7 +588,7 @@ ret_t do_platform_op( + } + else + { +- g_info->apic_id = x86_cpu_to_apicid[g_info->xen_cpuid]; ++ g_info->apic_id = cpu_physical_id(g_info->xen_cpuid); + g_info->acpi_id = acpi_get_processor_id(g_info->xen_cpuid); + ASSERT(g_info->apic_id != BAD_APICID); + g_info->flags = 0; +diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c +index e814c76c10..9d6ae23ec6 100644 +--- a/xen/arch/x86/setup.c ++++ b/xen/arch/x86/setup.c +@@ -317,7 +317,7 @@ static void __init init_idle_domain(void) + void srat_detect_node(int cpu) + { + nodeid_t node; +- u32 apicid = x86_cpu_to_apicid[cpu]; ++ u32 apicid = cpu_physical_id(cpu); + + node = apicid < MAX_LOCAL_APIC ? apicid_to_node[apicid] : NUMA_NO_NODE; + if ( node == NUMA_NO_NODE ) +@@ -344,7 +344,7 @@ static void __init normalise_cpu_order(void) + + for_each_present_cpu ( i ) + { +- apicid = x86_cpu_to_apicid[i]; ++ apicid = cpu_physical_id(i); + min_diff = min_cpu = ~0u; + + /* +@@ -355,12 +355,12 @@ static void __init normalise_cpu_order(void) + j < nr_cpu_ids; + j = cpumask_next(j, &cpu_present_map) ) + { +- diff = x86_cpu_to_apicid[j] ^ apicid; ++ diff = cpu_physical_id(j) ^ apicid; + while ( diff & (diff-1) ) + diff &= diff-1; + if ( (diff < min_diff) || + ((diff == min_diff) && +- (x86_cpu_to_apicid[j] < x86_cpu_to_apicid[min_cpu])) ) ++ (cpu_physical_id(j) < cpu_physical_id(min_cpu))) ) + { + min_diff = diff; + min_cpu = j; +@@ -376,9 +376,9 @@ static void __init normalise_cpu_order(void) + + /* Switch the best-matching CPU with the next CPU in logical order. */ + j = cpumask_next(i, &cpu_present_map); +- apicid = x86_cpu_to_apicid[min_cpu]; +- x86_cpu_to_apicid[min_cpu] = x86_cpu_to_apicid[j]; +- x86_cpu_to_apicid[j] = apicid; ++ apicid = cpu_physical_id(min_cpu); ++ cpu_physical_id(min_cpu) = cpu_physical_id(j); ++ cpu_physical_id(j) = apicid; + } + } + +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index 9993b5a71d..a7bcf4d415 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -347,7 +347,7 @@ void start_secondary(void *unused) + x2apic_ap_setup(); + my_apicid = get_apic_id(); + +- while ( my_apicid != x86_cpu_to_apicid[cpu] ) { ++ while ( my_apicid != cpu_physical_id(cpu) ) { + asm volatile ("monitor; xor %0,%0; mwait" + :: "a"(__va(sinit_mle->rlp_wakeup_addr)), "c"(0), + "d"(0) : "memory"); +@@ -1211,7 +1211,7 @@ void __init smp_prepare_cpus(void) + print_cpu_info(0); + + boot_cpu_physical_apicid = get_apic_id(); +- x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; ++ cpu_physical_id(0) = boot_cpu_physical_apicid; + + stack_base[0] = (void *)((unsigned long)stack_start & ~(STACK_SIZE - 1)); + +@@ -1426,7 +1426,7 @@ int __cpu_up(unsigned int cpu) + { + int apicid, ret; + +- if ( (apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID ) ++ if ( (apicid = cpu_physical_id(cpu)) == BAD_APICID ) + return -ENODEV; + + if ( (!x2apic_enabled && apicid >= APIC_ALL_CPUS) || +diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c +index 0ff3c895ac..38375f44dd 100644 +--- a/xen/arch/x86/spec_ctrl.c ++++ b/xen/arch/x86/spec_ctrl.c +@@ -601,7 +601,7 @@ static bool __init check_smt_enabled(void) + * has a non-zero thread id component indicates that SMT is active. + */ + for_each_present_cpu ( cpu ) +- if ( x86_cpu_to_apicid[cpu] & (boot_cpu_data.x86_num_siblings - 1) ) ++ if ( cpu_physical_id(cpu) & (boot_cpu_data.x86_num_siblings - 1) ) + return true; + + return false; +diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c +index 42dc360ad6..de9b75c1ec 100644 +--- a/xen/arch/x86/sysctl.c ++++ b/xen/arch/x86/sysctl.c +@@ -59,7 +59,7 @@ static long cf_check smt_up_down_helper(void *data) + for_each_present_cpu ( cpu ) + { + /* Skip primary siblings (those whose thread id is 0). */ +- if ( !(x86_cpu_to_apicid[cpu] & sibling_mask) ) ++ if ( !(cpu_physical_id(cpu) & sibling_mask) ) + continue; + + if ( !up && core_parking_remove(cpu) ) +-- +2.44.0 + diff --git a/1311-arch-x86-smp-drop-x86_cpu_to_apicid-use-cpu_data-cpu.patch b/1311-arch-x86-smp-drop-x86_cpu_to_apicid-use-cpu_data-cpu.patch new file mode 100644 index 00000000..97dbe1d5 --- /dev/null +++ b/1311-arch-x86-smp-drop-x86_cpu_to_apicid-use-cpu_data-cpu.patch @@ -0,0 +1,156 @@ +From a4dad19b79f4fb45eab220bdbad8bef98114a17f Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Thu, 1 Jun 2023 17:01:59 +0200 +Subject: [PATCH 1311/1323] arch/x86/smp: drop x86_cpu_to_apicid, use + cpu_data[cpu].apicid instead + +Both fields held the same data. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/boot/x86_64.S | 8 +++++--- + xen/arch/x86/include/asm/asm_defns.h | 2 +- + xen/arch/x86/include/asm/processor.h | 2 ++ + xen/arch/x86/include/asm/smp.h | 4 ---- + xen/arch/x86/numa.c | 15 +++++++-------- + xen/arch/x86/smpboot.c | 8 ++++---- + xen/arch/x86/x86_64/asm-offsets.c | 4 +++- + 7 files changed, 22 insertions(+), 21 deletions(-) + +diff --git a/xen/arch/x86/boot/x86_64.S b/xen/arch/x86/boot/x86_64.S +index ad2f5058f0..ce280c914a 100644 +--- a/xen/arch/x86/boot/x86_64.S ++++ b/xen/arch/x86/boot/x86_64.S +@@ -20,15 +20,17 @@ ENTRY(__high_start) + jz .L_stack_set + + /* APs only: get stack base from APIC ID saved in %esp. */ +- mov $-1, %rax +- lea x86_cpu_to_apicid(%rip), %rcx ++ mov $0, %rax ++ lea cpu_data(%rip), %rcx ++ /* cpu_data[0] is BSP, skip it. */ + 1: + add $1, %rax ++ add $CPUINFO_X86_sizeof, %rcx + cmp $NR_CPUS, %eax + jb 2f + hlt + 2: +- cmp %esp, (%rcx, %rax, 4) ++ cmp %esp, CPUINFO_X86_apicid(%rcx) + jne 1b + + /* %eax is now Xen CPU index. */ +diff --git a/xen/arch/x86/include/asm/asm_defns.h b/xen/arch/x86/include/asm/asm_defns.h +index d9431180cf..3437b47a26 100644 +--- a/xen/arch/x86/include/asm/asm_defns.h ++++ b/xen/arch/x86/include/asm/asm_defns.h +@@ -158,7 +158,7 @@ register unsigned long current_stack_pointer asm("rsp"); + #endif + + #define CPUINFO_FEATURE_OFFSET(feature) \ +- (CPUINFO_features + (cpufeat_word(feature) * 4)) ++ (CPUINFO_X86_features + (cpufeat_word(feature) * 4)) + + #else + +diff --git a/xen/arch/x86/include/asm/processor.h b/xen/arch/x86/include/asm/processor.h +index f8f757a8a8..a64557a27b 100644 +--- a/xen/arch/x86/include/asm/processor.h ++++ b/xen/arch/x86/include/asm/processor.h +@@ -149,6 +149,8 @@ extern struct cpuinfo_x86 boot_cpu_data; + extern struct cpuinfo_x86 cpu_data[]; + #define current_cpu_data cpu_data[smp_processor_id()] + ++#define cpu_physical_id(cpu) cpu_data[cpu].apicid ++ + extern bool probe_cpuid_faulting(void); + extern void ctxt_switch_levelling(const struct vcpu *next); + extern void (*ctxt_switch_masking)(const struct vcpu *next); +diff --git a/xen/arch/x86/include/asm/smp.h b/xen/arch/x86/include/asm/smp.h +index 41a3b6a0da..b0cefdf814 100644 +--- a/xen/arch/x86/include/asm/smp.h ++++ b/xen/arch/x86/include/asm/smp.h +@@ -39,10 +39,6 @@ extern void (*mtrr_hook) (void); + + extern void zap_low_mappings(void); + +-extern u32 x86_cpu_to_apicid[]; +- +-#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] +- + #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) + extern void cpu_exit_clear(unsigned int cpu); + extern void cpu_uninit(unsigned int cpu); +diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c +index 22ff0923d4..393e4108c7 100644 +--- a/xen/arch/x86/numa.c ++++ b/xen/arch/x86/numa.c +@@ -324,14 +324,13 @@ custom_param("numa", numa_setup); + /* + * Setup early cpu_to_node. + * +- * Populate cpu_to_node[] only if x86_cpu_to_apicid[], +- * and apicid_to_node[] tables have valid entries for a CPU. +- * This means we skip cpu_to_node[] initialisation for NUMA +- * emulation and faking node case (when running a kernel compiled +- * for NUMA on a non NUMA box), which is OK as cpu_to_node[] +- * is already initialized in a round robin manner at numa_init_array, +- * prior to this call, and this initialization is good enough +- * for the fake NUMA cases. ++ * Populate cpu_to_node[] only if cpu_data[], and apicid_to_node[] ++ * tables have valid entries for a CPU. This means we skip ++ * cpu_to_node[] initialisation for NUMA emulation and faking node ++ * case (when running a kernel compiled for NUMA on a non NUMA box), ++ * which is OK as cpu_to_node[] is already initialized in a round ++ * robin manner at numa_init_array, prior to this call, and this ++ * initialization is good enough for the fake NUMA cases. + */ + void __init init_cpu_to_node(void) + { +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index a7bcf4d415..8b636322d1 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -73,10 +73,8 @@ unsigned int __read_mostly nr_sockets; + cpumask_t **__read_mostly socket_cpumask; + static cpumask_t *secondary_socket_cpumask; + +-struct cpuinfo_x86 cpu_data[NR_CPUS]; +- +-u32 x86_cpu_to_apicid[NR_CPUS] __read_mostly = +- { [0 ... NR_CPUS-1] = BAD_APICID }; ++struct cpuinfo_x86 cpu_data[NR_CPUS] = ++ { [0 ... NR_CPUS-1] .apicid = BAD_APICID }; + + static int cpu_error; + static enum cpu_state { +@@ -93,7 +91,9 @@ void *stack_base[NR_CPUS]; + + void initialize_cpu_data(unsigned int cpu) + { ++ uint32_t apicid = cpu_physical_id(cpu); + cpu_data[cpu] = boot_cpu_data; ++ cpu_physical_id(cpu) = apicid; + } + + static bool smp_store_cpu_info(unsigned int id) +diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c +index 287dac101a..6e7a8eb1a3 100644 +--- a/xen/arch/x86/x86_64/asm-offsets.c ++++ b/xen/arch/x86/x86_64/asm-offsets.c +@@ -159,7 +159,9 @@ void __dummy__(void) + OFFSET(IRQSTAT_softirq_pending, irq_cpustat_t, __softirq_pending); + BLANK(); + +- OFFSET(CPUINFO_features, struct cpuinfo_x86, x86_capability); ++ OFFSET(CPUINFO_X86_features, struct cpuinfo_x86, x86_capability); ++ OFFSET(CPUINFO_X86_apicid, struct cpuinfo_x86, apicid); ++ DEFINE(CPUINFO_X86_sizeof, sizeof(struct cpuinfo_x86)); + BLANK(); + + OFFSET(MB_flags, multiboot_info_t, flags); +-- +2.44.0 + diff --git a/1312-arch-x86-smp-move-stack_base-to-cpu_data.patch b/1312-arch-x86-smp-move-stack_base-to-cpu_data.patch new file mode 100644 index 00000000..b096f79c --- /dev/null +++ b/1312-arch-x86-smp-move-stack_base-to-cpu_data.patch @@ -0,0 +1,222 @@ +From 305728b23643c43cb52730474dbca6a4274e4be7 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Thu, 1 Jun 2023 19:27:22 +0200 +Subject: [PATCH 1312/1323] arch/x86/smp: move stack_base to cpu_data + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/boot/x86_64.S | 5 ++--- + xen/arch/x86/include/asm/processor.h | 1 + + xen/arch/x86/include/asm/smp.h | 2 +- + xen/arch/x86/setup.c | 6 +++--- + xen/arch/x86/smpboot.c | 25 +++++++++++++------------ + xen/arch/x86/tboot.c | 4 ++-- + xen/arch/x86/traps.c | 4 ++-- + xen/arch/x86/x86_64/asm-offsets.c | 1 + + xen/include/xen/smp.h | 2 -- + 9 files changed, 25 insertions(+), 25 deletions(-) + +diff --git a/xen/arch/x86/boot/x86_64.S b/xen/arch/x86/boot/x86_64.S +index ce280c914a..61040315f9 100644 +--- a/xen/arch/x86/boot/x86_64.S ++++ b/xen/arch/x86/boot/x86_64.S +@@ -33,9 +33,8 @@ ENTRY(__high_start) + cmp %esp, CPUINFO_X86_apicid(%rcx) + jne 1b + +- /* %eax is now Xen CPU index. */ +- lea stack_base(%rip), %rcx +- mov (%rcx, %rax, 8), %rsp ++ /* %rcx is now cpu_data[cpu], read stack base from it. */ ++ mov CPUINFO_X86_stack_base(%rcx), %rsp + + test %rsp,%rsp + jnz 1f +diff --git a/xen/arch/x86/include/asm/processor.h b/xen/arch/x86/include/asm/processor.h +index a64557a27b..4e78a19afe 100644 +--- a/xen/arch/x86/include/asm/processor.h ++++ b/xen/arch/x86/include/asm/processor.h +@@ -137,6 +137,7 @@ struct cpuinfo_x86 { + __u32 phys_proc_id; /* package ID of each logical CPU */ + __u32 cpu_core_id; /* core ID of each logical CPU*/ + __u32 compute_unit_id; /* AMD compute unit ID of each logical CPU */ ++ void *stack_base; + unsigned short x86_clflush_size; + } __cacheline_aligned; + +diff --git a/xen/arch/x86/include/asm/smp.h b/xen/arch/x86/include/asm/smp.h +index b0cefdf814..4432a8ea1b 100644 +--- a/xen/arch/x86/include/asm/smp.h ++++ b/xen/arch/x86/include/asm/smp.h +@@ -76,7 +76,7 @@ extern cpumask_t **socket_cpumask; + * by certain scheduling code only. + */ + #define get_cpu_current(cpu) \ +- (get_cpu_info_from_stack((unsigned long)stack_base[cpu])->current_vcpu) ++ (get_cpu_info_from_stack((unsigned long)cpu_data[cpu].stack_base)->current_vcpu) + + extern unsigned int disabled_cpus; + extern bool unaccounted_cpus; +diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c +index 9d6ae23ec6..4ce475db08 100644 +--- a/xen/arch/x86/setup.c ++++ b/xen/arch/x86/setup.c +@@ -683,7 +683,7 @@ static void __init noreturn reinit_bsp_stack(void) + /* Update SYSCALL trampolines */ + percpu_traps_init(); + +- stack_base[0] = stack; ++ cpu_data[0].stack_base = stack; + + rc = setup_cpu_root_pgt(0); + if ( rc ) +@@ -1915,8 +1915,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) + /* Set up node_to_cpumask based on cpu_to_node[]. */ + numa_add_cpu(i); + +- if ( stack_base[i] == NULL ) +- stack_base[i] = cpu_alloc_stack(i); ++ if ( cpu_data[i].stack_base == NULL ) ++ cpu_data[i].stack_base = cpu_alloc_stack(i); + } + + for_each_present_cpu ( i ) +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index 8b636322d1..490204038a 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -87,13 +87,15 @@ static enum cpu_state { + } cpu_state; + #define set_cpu_state(state) do { smp_mb(); cpu_state = (state); } while (0) + +-void *stack_base[NR_CPUS]; +- + void initialize_cpu_data(unsigned int cpu) + { + uint32_t apicid = cpu_physical_id(cpu); ++ void *stack = cpu_data[cpu].stack_base; ++ + cpu_data[cpu] = boot_cpu_data; ++ + cpu_physical_id(cpu) = apicid; ++ cpu_data[cpu].stack_base = stack; + } + + static bool smp_store_cpu_info(unsigned int id) +@@ -636,8 +638,6 @@ static int do_boot_cpu(int apicid, int cpu) + printk("Booting processor %d/%d eip %lx\n", + cpu, apicid, start_eip); + +- stack_start = stack_base[cpu] + STACK_SIZE - sizeof(struct cpu_info); +- + /* This grunge runs the startup process for the targeted processor. */ + + set_cpu_state(CPU_STATE_INIT); +@@ -913,7 +913,7 @@ int setup_cpu_root_pgt(unsigned int cpu) + + /* Install direct map page table entries for stack, IDT, and TSS. */ + for ( off = rc = 0; !rc && off < STACK_SIZE; off += PAGE_SIZE ) +- rc = clone_mapping(__va(__pa(stack_base[cpu])) + off, rpt); ++ rc = clone_mapping(__va(__pa(cpu_data[cpu].stack_base)) + off, rpt); + + if ( !rc ) + rc = clone_mapping(idt_tables[cpu], rpt); +@@ -1064,10 +1064,10 @@ static void cpu_smpboot_free(unsigned int cpu, bool remove) + FREE_XENHEAP_PAGE(per_cpu(gdt, cpu)); + FREE_XENHEAP_PAGE(idt_tables[cpu]); + +- if ( stack_base[cpu] ) ++ if ( cpu_data[cpu].stack_base ) + { +- memguard_unguard_stack(stack_base[cpu]); +- FREE_XENHEAP_PAGES(stack_base[cpu], STACK_ORDER); ++ memguard_unguard_stack(cpu_data[cpu].stack_base); ++ FREE_XENHEAP_PAGES(cpu_data[cpu].stack_base, STACK_ORDER); + } + } + } +@@ -1101,11 +1101,11 @@ static int cpu_smpboot_alloc(unsigned int cpu) + if ( node != NUMA_NO_NODE ) + memflags = MEMF_node(node); + +- if ( stack_base[cpu] == NULL && +- (stack_base[cpu] = cpu_alloc_stack(cpu)) == NULL ) ++ if ( cpu_data[cpu].stack_base == NULL && ++ (cpu_data[cpu].stack_base = cpu_alloc_stack(cpu)) == NULL ) + goto out; + +- info = get_cpu_info_from_stack((unsigned long)stack_base[cpu]); ++ info = get_cpu_info_from_stack((unsigned long)cpu_data[cpu].stack_base); + info->processor_id = cpu; + info->per_cpu_offset = __per_cpu_offset[cpu]; + +@@ -1213,7 +1213,8 @@ void __init smp_prepare_cpus(void) + boot_cpu_physical_apicid = get_apic_id(); + cpu_physical_id(0) = boot_cpu_physical_apicid; + +- stack_base[0] = (void *)((unsigned long)stack_start & ~(STACK_SIZE - 1)); ++ cpu_data[0].stack_base = (void *) ++ ((unsigned long)stack_start & ~(STACK_SIZE - 1)); + + set_nr_sockets(); + +diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c +index 94a2f27cf4..9730f12b06 100644 +--- a/xen/arch/x86/tboot.c ++++ b/xen/arch/x86/tboot.c +@@ -238,9 +238,9 @@ static int mfn_in_guarded_stack(unsigned long mfn) + + for ( i = 0; i < nr_cpu_ids; i++ ) + { +- if ( !stack_base[i] ) ++ if ( !cpu_data[i].stack_base ) + continue; +- p = (void *)((unsigned long)stack_base[i] + STACK_SIZE - ++ p = (void *)((unsigned long)cpu_data[i].stack_base + STACK_SIZE - + PRIMARY_STACK_SIZE - PAGE_SIZE); + if ( mfn == virt_to_mfn(p) ) + return -1; +diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c +index e65cc60041..7471da5d2e 100644 +--- a/xen/arch/x86/traps.c ++++ b/xen/arch/x86/traps.c +@@ -623,9 +623,9 @@ void show_stack_overflow(unsigned int cpu, const struct cpu_user_regs *regs) + unsigned long curr_stack_base = esp & ~(STACK_SIZE - 1); + unsigned long esp_top, esp_bottom; + +- if ( _p(curr_stack_base) != stack_base[cpu] ) ++ if ( _p(curr_stack_base) != cpu_data[cpu].stack_base ) + printk("Current stack base %p differs from expected %p\n", +- _p(curr_stack_base), stack_base[cpu]); ++ _p(curr_stack_base), cpu_data[cpu].stack_base); + + esp_bottom = (esp | (STACK_SIZE - 1)) + 1; + esp_top = esp_bottom - PRIMARY_STACK_SIZE; +diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c +index 6e7a8eb1a3..d5d1f7b024 100644 +--- a/xen/arch/x86/x86_64/asm-offsets.c ++++ b/xen/arch/x86/x86_64/asm-offsets.c +@@ -161,6 +161,7 @@ void __dummy__(void) + + OFFSET(CPUINFO_X86_features, struct cpuinfo_x86, x86_capability); + OFFSET(CPUINFO_X86_apicid, struct cpuinfo_x86, apicid); ++ OFFSET(CPUINFO_X86_stack_base, struct cpuinfo_x86, stack_base); + DEFINE(CPUINFO_X86_sizeof, sizeof(struct cpuinfo_x86)); + BLANK(); + +diff --git a/xen/include/xen/smp.h b/xen/include/xen/smp.h +index 0a9219173f..994fdc4742 100644 +--- a/xen/include/xen/smp.h ++++ b/xen/include/xen/smp.h +@@ -67,8 +67,6 @@ void smp_send_call_function_mask(const cpumask_t *mask); + + int alloc_cpu_id(void); + +-extern void *stack_base[NR_CPUS]; +- + void initialize_cpu_data(unsigned int cpu); + int setup_cpu_root_pgt(unsigned int cpu); + +-- +2.44.0 + diff --git a/1313-arch-x86-smp-call-x2apic_ap_setup-earlier.patch b/1313-arch-x86-smp-call-x2apic_ap_setup-earlier.patch new file mode 100644 index 00000000..29f4a234 --- /dev/null +++ b/1313-arch-x86-smp-call-x2apic_ap_setup-earlier.patch @@ -0,0 +1,48 @@ +From 6948345e4f505c7c40128b5beca28ba350f4cbc8 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Tue, 13 Jun 2023 15:44:36 +0200 +Subject: [PATCH 1313/1323] arch/x86/smp: call x2apic_ap_setup() earlier + +It used to be called from smp_callin(), however BUG_ON() was invoked on +multiple occasions before that. It may end up calling machine_restart() +which tries to get APIC ID for CPU running this code. If BSP detected +that x2APIC is enabled, get_apic_id() will try to use it for all CPUs. +Enabling x2APIC on secondary CPUs earlier protects against an endless +loop of #GP exceptions caused by attempts to read IA32_X2APIC_APICID +MSR while x2APIC is disabled in IA32_APIC_BASE. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/smpboot.c | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index 490204038a..7b8829dc78 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -196,7 +196,6 @@ static void smp_callin(void) + * update until we finish. We are free to set up this CPU: first the APIC. + */ + Dprintk("CALLIN, before setup_local_APIC().\n"); +- x2apic_ap_setup(); + setup_local_APIC(false); + + /* Save our processor parameters. */ +@@ -386,6 +385,14 @@ void start_secondary(void *unused) + get_cpu_info()->xen_cr3 = 0; + get_cpu_info()->pv_cr3 = 0; + ++ /* ++ * BUG_ON() used in load_system_tables() and later code may end up calling ++ * machine_restart() which tries to get APIC ID for CPU running this code. ++ * If BSP detected that x2APIC is enabled, get_apic_id() will try to use it ++ * for _all_ CPUs. Enable x2APIC on secondary CPUs now so we won't end up ++ * with endless #GP loop. ++ */ ++ x2apic_ap_setup(); + load_system_tables(); + + /* Full exception support from here on in. */ +-- +2.44.0 + diff --git a/1314-arch-x86-shutdown-protect-against-recurrent-machine_.patch b/1314-arch-x86-shutdown-protect-against-recurrent-machine_.patch new file mode 100644 index 00000000..901c73e3 --- /dev/null +++ b/1314-arch-x86-shutdown-protect-against-recurrent-machine_.patch @@ -0,0 +1,54 @@ +From 19ab5f48334eb5ad6862f27f9e39b12147512577 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Tue, 13 Jun 2023 15:56:12 +0200 +Subject: [PATCH 1314/1323] arch/x86/shutdown: protect against recurrent + machine_restart() + +If multiple CPUs called machine_restart() before actual restart took +place, but after boot CPU declared itself not online, ASSERT in +on_selected_cpus() will fail. Few calls later execution would end up +in machine_restart() again, with another frame on call stack for new +exception. + +To protect against running out of stack, code checks if boot CPU is +still online before calling on_selected_cpus(). + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/shutdown.c | 20 +++++++++++++++++--- + 1 file changed, 17 insertions(+), 3 deletions(-) + +diff --git a/xen/arch/x86/shutdown.c b/xen/arch/x86/shutdown.c +index 7619544d14..32c70505ed 100644 +--- a/xen/arch/x86/shutdown.c ++++ b/xen/arch/x86/shutdown.c +@@ -577,9 +577,23 @@ void machine_restart(unsigned int delay_millisecs) + /* Ensure we are the boot CPU. */ + if ( get_apic_id() != boot_cpu_physical_apicid ) + { +- /* Send IPI to the boot CPU (logical cpu 0). */ +- on_selected_cpus(cpumask_of(0), __machine_restart, +- &delay_millisecs, 0); ++ /* ++ * Send IPI to the boot CPU (logical cpu 0). ++ * ++ * If multiple CPUs called machine_restart() before actual restart ++ * took place, but after boot CPU declared itself not online, ASSERT ++ * in on_selected_cpus() will fail. Few calls later we would end up ++ * here again, with another frame on call stack for new exception. ++ * To protect against running out of stack, check if boot CPU is ++ * online. ++ * ++ * Note this is not an atomic operation, so it is possible for ++ * on_selected_cpus() to be called once after boot CPU is offline ++ * before we hit halt() below. ++ */ ++ if ( cpu_online(0) ) ++ on_selected_cpus(cpumask_of(0), __machine_restart, ++ &delay_millisecs, 0); + for ( ; ; ) + halt(); + } +-- +2.44.0 + diff --git a/1315-arch-x86-smp-drop-booting_cpu-variable.patch b/1315-arch-x86-smp-drop-booting_cpu-variable.patch new file mode 100644 index 00000000..35b3e8fa --- /dev/null +++ b/1315-arch-x86-smp-drop-booting_cpu-variable.patch @@ -0,0 +1,121 @@ +From 16c8ccc734174260d92dec48deb2edf9207258e6 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Tue, 13 Jun 2023 18:58:21 +0200 +Subject: [PATCH 1315/1323] arch/x86/smp: drop booting_cpu variable + +CPU id is obtained as a side effect of searching for appropriate +stack for AP. It can be used as a parameter to start_secondary(). +Coincidentally this also makes further work on making AP bring-up +code parallel easier. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/boot/x86_64.S | 13 +++++++++---- + xen/arch/x86/smpboot.c | 16 +++++----------- + 2 files changed, 14 insertions(+), 15 deletions(-) + +diff --git a/xen/arch/x86/boot/x86_64.S b/xen/arch/x86/boot/x86_64.S +index 61040315f9..a5f1f681c9 100644 +--- a/xen/arch/x86/boot/x86_64.S ++++ b/xen/arch/x86/boot/x86_64.S +@@ -20,20 +20,24 @@ ENTRY(__high_start) + jz .L_stack_set + + /* APs only: get stack base from APIC ID saved in %esp. */ +- mov $0, %rax ++ mov $0, %rbx + lea cpu_data(%rip), %rcx + /* cpu_data[0] is BSP, skip it. */ + 1: +- add $1, %rax ++ add $1, %rbx + add $CPUINFO_X86_sizeof, %rcx +- cmp $NR_CPUS, %eax ++ cmp $NR_CPUS, %rbx + jb 2f + hlt + 2: + cmp %esp, CPUINFO_X86_apicid(%rcx) + jne 1b + +- /* %rcx is now cpu_data[cpu], read stack base from it. */ ++ /* ++ * At this point: ++ * - %rcx is cpu_data[cpu], read stack base from it, ++ * - %rbx (callee-save) is Xen cpu number, pass it to start_secondary(). ++ */ + mov CPUINFO_X86_stack_base(%rcx), %rsp + + test %rsp,%rsp +@@ -101,6 +105,7 @@ ENTRY(__high_start) + .L_ap_cet_done: + #endif /* CONFIG_XEN_SHSTK || CONFIG_XEN_IBT */ + ++ mov %rbx, %rdi + call start_secondary + BUG /* start_secondary() shouldn't return. */ + +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index 7b8829dc78..63e7245c7f 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -234,8 +234,6 @@ static void smp_callin(void) + cpu_relax(); + } + +-static int booting_cpu; +- + /* CPUs for which sibling maps can be computed. */ + static cpumask_t cpu_sibling_setup_map; + +@@ -323,15 +321,14 @@ static void set_cpu_sibling_map(unsigned int cpu) + } + } + +-void start_secondary(void *unused) ++void start_secondary(unsigned int cpu) + { + struct cpu_info *info = get_cpu_info(); + + /* +- * Dont put anything before smp_callin(), SMP booting is so fragile that we ++ * Don't put anything before smp_callin(), SMP booting is so fragile that we + * want to limit the things done here to the most necessary things. + */ +- unsigned int cpu = booting_cpu; + + if ( slaunch_active ) { + uint64_t misc_enable; +@@ -352,7 +349,6 @@ void start_secondary(void *unused) + asm volatile ("monitor; xor %0,%0; mwait" + :: "a"(__va(sinit_mle->rlp_wakeup_addr)), "c"(0), + "d"(0) : "memory"); +- cpu = booting_cpu; + } + } + +@@ -381,9 +377,9 @@ void start_secondary(void *unused) + */ + spin_debug_disable(); + +- get_cpu_info()->use_pv_cr3 = false; +- get_cpu_info()->xen_cr3 = 0; +- get_cpu_info()->pv_cr3 = 0; ++ info->use_pv_cr3 = false; ++ info->xen_cr3 = 0; ++ info->pv_cr3 = 0; + + /* + * BUG_ON() used in load_system_tables() and later code may end up calling +@@ -632,8 +628,6 @@ static int do_boot_cpu(int apicid, int cpu) + */ + mtrr_save_state(); + +- booting_cpu = cpu; +- + start_eip = bootsym_phys(trampoline_realmode_entry); + + /* start_eip needs be page aligned, and below the 1M boundary. */ +-- +2.44.0 + diff --git a/1316-arch-x86-smp-make-cpu_state-per-CPU.patch b/1316-arch-x86-smp-make-cpu_state-per-CPU.patch new file mode 100644 index 00000000..2e661d94 --- /dev/null +++ b/1316-arch-x86-smp-make-cpu_state-per-CPU.patch @@ -0,0 +1,219 @@ +From 67021dc55f12c551505b9bf61eefd5989f9f3291 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Fri, 16 Jun 2023 12:18:23 +0200 +Subject: [PATCH 1316/1323] arch/x86/smp: make cpu_state per-CPU + +This will be used for parallel AP bring-up. + +CPU_STATE_INIT changed direction. It was previously set by BSP and never +consumed by AP. Now it signals that AP got through assembly part of +initialization and waits for BSP to call notifiers that set up data +structures required for further initialization. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/include/asm/processor.h | 1 + + xen/arch/x86/smpboot.c | 80 +++++++++++++++++----------- + 2 files changed, 49 insertions(+), 32 deletions(-) + +diff --git a/xen/arch/x86/include/asm/processor.h b/xen/arch/x86/include/asm/processor.h +index 4e78a19afe..592ff38bae 100644 +--- a/xen/arch/x86/include/asm/processor.h ++++ b/xen/arch/x86/include/asm/processor.h +@@ -138,6 +138,7 @@ struct cpuinfo_x86 { + __u32 cpu_core_id; /* core ID of each logical CPU*/ + __u32 compute_unit_id; /* AMD compute unit ID of each logical CPU */ + void *stack_base; ++ unsigned int cpu_state; + unsigned short x86_clflush_size; + } __cacheline_aligned; + +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index 63e7245c7f..5478a6b76b 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -77,15 +77,18 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] = + { [0 ... NR_CPUS-1] .apicid = BAD_APICID }; + + static int cpu_error; +-static enum cpu_state { ++enum cpu_state { + CPU_STATE_DYING, /* slave -> master: I am dying */ + CPU_STATE_DEAD, /* slave -> master: I am completely dead */ +- CPU_STATE_INIT, /* master -> slave: Early bringup phase 1 */ +- CPU_STATE_CALLOUT, /* master -> slave: Early bringup phase 2 */ ++ CPU_STATE_INIT, /* slave -> master: Early bringup phase 1 completed */ ++ CPU_STATE_CALLOUT, /* master -> slave: Start early bringup phase 2 */ + CPU_STATE_CALLIN, /* slave -> master: Completed phase 2 */ + CPU_STATE_ONLINE /* master -> slave: Go fully online now. */ +-} cpu_state; +-#define set_cpu_state(state) do { smp_mb(); cpu_state = (state); } while (0) ++}; ++#define set_cpu_state(cpu, state) do { \ ++ smp_mb(); \ ++ cpu_data[cpu].cpu_state = (state); \ ++} while (0) + + void initialize_cpu_data(unsigned int cpu) + { +@@ -180,16 +183,7 @@ static void synchronize_tsc_slave(unsigned int slave) + static void smp_callin(void) + { + unsigned int cpu = smp_processor_id(); +- int i, rc; +- +- /* Wait 2s total for startup. */ +- Dprintk("Waiting for CALLOUT.\n"); +- for ( i = 0; cpu_state != CPU_STATE_CALLOUT; i++ ) +- { +- BUG_ON(i >= 200); +- cpu_relax(); +- mdelay(10); +- } ++ int rc; + + /* + * The boot CPU has finished the init stage and is spinning on cpu_state +@@ -225,12 +219,12 @@ static void smp_callin(void) + } + + /* Allow the master to continue. */ +- set_cpu_state(CPU_STATE_CALLIN); ++ set_cpu_state(cpu, CPU_STATE_CALLIN); + + synchronize_tsc_slave(cpu); + + /* And wait for our final Ack. */ +- while ( cpu_state != CPU_STATE_ONLINE ) ++ while ( cpu_data[cpu].cpu_state != CPU_STATE_ONLINE ) + cpu_relax(); + } + +@@ -325,6 +319,9 @@ void start_secondary(unsigned int cpu) + { + struct cpu_info *info = get_cpu_info(); + ++ /* Tell BSP that we are awake. */ ++ set_cpu_state(cpu, CPU_STATE_INIT); ++ + /* + * Don't put anything before smp_callin(), SMP booting is so fragile that we + * want to limit the things done here to the most necessary things. +@@ -354,6 +351,10 @@ void start_secondary(unsigned int cpu) + + /* Critical region without IDT or TSS. Any fault is deadly! */ + ++ /* Wait until data set up by CPU_UP_PREPARE notifiers is ready. */ ++ while ( cpu_data[cpu].cpu_state != CPU_STATE_CALLOUT ) ++ cpu_relax(); ++ + set_current(idle_vcpu[cpu]); + this_cpu(curr_vcpu) = idle_vcpu[cpu]; + rdmsrl(MSR_EFER, this_cpu(efer)); +@@ -641,26 +642,35 @@ static int do_boot_cpu(int apicid, int cpu) + + /* This grunge runs the startup process for the targeted processor. */ + +- set_cpu_state(CPU_STATE_INIT); +- + /* Starting actual IPI sequence... */ + boot_error = wakeup_secondary_cpu(apicid, start_eip); + + if ( !boot_error ) + { +- /* Allow AP to start initializing. */ +- set_cpu_state(CPU_STATE_CALLOUT); +- Dprintk("After Callout %d.\n", cpu); +- +- /* Wait 5s total for a response. */ +- for ( timeout = 0; timeout < 50000; timeout++ ) ++ /* Wait 2s total for a response. */ ++ for ( timeout = 0; timeout < 20000; timeout++ ) + { +- if ( cpu_state != CPU_STATE_CALLOUT ) ++ if ( cpu_data[cpu].cpu_state == CPU_STATE_INIT ) + break; + udelay(100); + } + +- if ( cpu_state == CPU_STATE_CALLIN ) ++ if ( cpu_data[cpu].cpu_state == CPU_STATE_INIT ) ++ { ++ /* Allow AP to start initializing. */ ++ set_cpu_state(cpu, CPU_STATE_CALLOUT); ++ Dprintk("After Callout %d.\n", cpu); ++ ++ /* Wait 5s total for a response. */ ++ for ( timeout = 0; timeout < 500000; timeout++ ) ++ { ++ if ( cpu_data[cpu].cpu_state != CPU_STATE_CALLOUT ) ++ break; ++ udelay(10); ++ } ++ } ++ ++ if ( cpu_data[cpu].cpu_state == CPU_STATE_CALLIN ) + { + /* number CPUs logically, starting from 1 (BSP is 0) */ + Dprintk("OK.\n"); +@@ -668,7 +678,7 @@ static int do_boot_cpu(int apicid, int cpu) + synchronize_tsc_master(cpu); + Dprintk("CPU has booted.\n"); + } +- else if ( cpu_state == CPU_STATE_DEAD ) ++ else if ( cpu_data[cpu].cpu_state == CPU_STATE_DEAD ) + { + smp_rmb(); + rc = cpu_error; +@@ -739,7 +749,7 @@ unsigned long alloc_stub_page(unsigned int cpu, unsigned long *mfn) + void cpu_exit_clear(unsigned int cpu) + { + cpu_uninit(cpu); +- set_cpu_state(CPU_STATE_DEAD); ++ set_cpu_state(cpu, CPU_STATE_DEAD); + } + + static int clone_mapping(const void *ptr, root_pgentry_t *rpt) +@@ -1217,6 +1227,12 @@ void __init smp_prepare_cpus(void) + cpu_data[0].stack_base = (void *) + ((unsigned long)stack_start & ~(STACK_SIZE - 1)); + ++ /* Set state as CALLOUT so APs won't change it in initialize_cpu_data() */ ++ boot_cpu_data.cpu_state = CPU_STATE_CALLOUT; ++ ++ /* Not really used anywhere, but set it just in case. */ ++ set_cpu_state(0, CPU_STATE_ONLINE); ++ + set_nr_sockets(); + + socket_cpumask = xzalloc_array(cpumask_t *, nr_sockets); +@@ -1318,7 +1334,7 @@ void __cpu_disable(void) + { + int cpu = smp_processor_id(); + +- set_cpu_state(CPU_STATE_DYING); ++ set_cpu_state(cpu, CPU_STATE_DYING); + + local_irq_disable(); + clear_local_APIC(); +@@ -1343,7 +1359,7 @@ void __cpu_die(unsigned int cpu) + unsigned int i = 0; + enum cpu_state seen_state; + +- while ( (seen_state = cpu_state) != CPU_STATE_DEAD ) ++ while ( (seen_state = cpu_data[cpu].cpu_state) != CPU_STATE_DEAD ) + { + BUG_ON(seen_state != CPU_STATE_DYING); + mdelay(100); +@@ -1444,7 +1460,7 @@ int __cpu_up(unsigned int cpu) + + time_latch_stamps(); + +- set_cpu_state(CPU_STATE_ONLINE); ++ set_cpu_state(cpu, CPU_STATE_ONLINE); + while ( !cpu_online(cpu) ) + { + cpu_relax(); +-- +2.44.0 + diff --git a/1317-arch-x86-smp-remove-MONITOR-MWAIT-loop-for-TXT-AP-br.patch b/1317-arch-x86-smp-remove-MONITOR-MWAIT-loop-for-TXT-AP-br.patch new file mode 100644 index 00000000..13ec3e6b --- /dev/null +++ b/1317-arch-x86-smp-remove-MONITOR-MWAIT-loop-for-TXT-AP-br.patch @@ -0,0 +1,81 @@ +From 7deddb4e27b7f4b89d80b1d5a44210f0cfe386e7 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Fri, 16 Jun 2023 14:31:27 +0200 +Subject: [PATCH 1317/1323] arch/x86/smp: remove MONITOR/MWAIT loop for TXT AP + bringup + +This is no longer necessary, since AP loops on cpu_state and CPU +index is passed as argument. + +In addition, move TXT JOIN structure to static data. There is no +guarantee that it would be consumed before it is overwritten on BSP +stack. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/smpboot.c | 42 +++++++++++++----------------------------- + 1 file changed, 13 insertions(+), 29 deletions(-) + +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index 5478a6b76b..85c5d14414 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -327,28 +327,6 @@ void start_secondary(unsigned int cpu) + * want to limit the things done here to the most necessary things. + */ + +- if ( slaunch_active ) { +- uint64_t misc_enable; +- uint32_t my_apicid; +- struct txt_sinit_mle_data *sinit_mle = +- txt_sinit_mle_data_start(__va(read_txt_reg(TXTCR_HEAP_BASE))); +- +- /* TXT released us with MONITOR disabled in IA32_MISC_ENABLE. */ +- rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); +- wrmsrl(MSR_IA32_MISC_ENABLE, +- misc_enable | MSR_IA32_MISC_ENABLE_MONITOR_ENABLE); +- +- /* get_apic_id() reads from x2APIC if it thinks it is enabled. */ +- x2apic_ap_setup(); +- my_apicid = get_apic_id(); +- +- while ( my_apicid != cpu_physical_id(cpu) ) { +- asm volatile ("monitor; xor %0,%0; mwait" +- :: "a"(__va(sinit_mle->rlp_wakeup_addr)), "c"(0), +- "d"(0) : "memory"); +- } +- } +- + /* Critical region without IDT or TSS. Any fault is deadly! */ + + /* Wait until data set up by CPU_UP_PREPARE notifiers is ready. */ +@@ -459,13 +437,19 @@ static int slaunch_wake_aps(unsigned long trampoline_rm) + struct txt_sinit_mle_data *sinit_mle = + txt_sinit_mle_data_start(__va(read_txt_reg(TXTCR_HEAP_BASE))); + uint32_t *wakeup_addr = __va(sinit_mle->rlp_wakeup_addr); +-#define trampoline_relative(x) (trampoline_rm + ((char *)(x) - trampoline_realmode_entry)) +- uint32_t join[4] = { +- trampoline_gdt[1], /* GDT limit */ +- trampoline_relative(trampoline_gdt), /* GDT base */ +- TXT_AP_BOOT_CS, /* CS selector, DS = CS+8 */ +- trampoline_relative(txt_ap_entry) /* EIP */ +- }; ++ static uint32_t join[4] = {0}; ++ ++ /* Check if already started. */ ++ if ( join[0] != 0) ++ return -1; ++ ++#define trampoline_relative(x) \ ++ (trampoline_rm + ((char *)(x) - trampoline_realmode_entry)) ++ ++ join[0] = trampoline_gdt[1]; /* GDT limit */ ++ join[1] = trampoline_relative(trampoline_gdt); /* GDT base */ ++ join[2] = TXT_AP_BOOT_CS; /* CS selector, DS = CS+8 */ ++ join[3] = trampoline_relative(txt_ap_entry); /* EIP */ + + write_txt_reg(TXTCR_MLE_JOIN, __pa(join)); + +-- +2.44.0 + diff --git a/1318-arch-x86-smp-don-t-send-INIT-SIPI-SIPI-if-AP-is-alre.patch b/1318-arch-x86-smp-don-t-send-INIT-SIPI-SIPI-if-AP-is-alre.patch new file mode 100644 index 00000000..8d682d38 --- /dev/null +++ b/1318-arch-x86-smp-don-t-send-INIT-SIPI-SIPI-if-AP-is-alre.patch @@ -0,0 +1,82 @@ +From 14d94b6a19134657c5f891021d89dadd7c6a20c5 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Fri, 16 Jun 2023 14:41:17 +0200 +Subject: [PATCH 1318/1323] arch/x86/smp: don't send INIT-SIPI-SIPI if AP is + already running + +This is another requirement for parallel AP bringup. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/smpboot.c | 37 +++++++++++++++++++++---------------- + 1 file changed, 21 insertions(+), 16 deletions(-) + +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index 85c5d14414..23c87032eb 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -605,7 +605,6 @@ int alloc_cpu_id(void) + static int do_boot_cpu(int apicid, int cpu) + { + int timeout, boot_error = 0, rc = 0; +- unsigned long start_eip; + + /* + * Save current MTRR state in case it was changed since early boot +@@ -613,21 +612,31 @@ static int do_boot_cpu(int apicid, int cpu) + */ + mtrr_save_state(); + +- start_eip = bootsym_phys(trampoline_realmode_entry); ++ /* Check if AP is already up. */ ++ if ( cpu_data[cpu].cpu_state != CPU_STATE_INIT ) ++ { ++ /* This grunge runs the startup process for the targeted processor. */ ++ unsigned long start_eip; ++ start_eip = bootsym_phys(trampoline_realmode_entry); + +- /* start_eip needs be page aligned, and below the 1M boundary. */ +- if ( start_eip & ~0xff000 ) +- panic("AP trampoline %#lx not suitably positioned\n", start_eip); ++ /* start_eip needs be page aligned, and below the 1M boundary. */ ++ if ( start_eip & ~0xff000 ) ++ panic("AP trampoline %#lx not suitably positioned\n", start_eip); + +- /* So we see what's up */ +- if ( opt_cpu_info ) +- printk("Booting processor %d/%d eip %lx\n", +- cpu, apicid, start_eip); ++ /* So we see what's up */ ++ if ( opt_cpu_info ) ++ printk("AP trampoline at %lx\n", start_eip); + +- /* This grunge runs the startup process for the targeted processor. */ ++ /* mark "stuck" area as not stuck */ ++ bootsym(trampoline_cpu_started) = 0; ++ smp_mb(); + +- /* Starting actual IPI sequence... */ +- boot_error = wakeup_secondary_cpu(apicid, start_eip); ++ /* Starting actual IPI sequence... */ ++ boot_error = wakeup_secondary_cpu(apicid, start_eip); ++ } ++ ++ if ( opt_cpu_info ) ++ printk("Booting processor %d/%d\n", cpu, apicid); + + if ( !boot_error ) + { +@@ -686,10 +695,6 @@ static int do_boot_cpu(int apicid, int cpu) + rc = -EIO; + } + +- /* mark "stuck" area as not stuck */ +- bootsym(trampoline_cpu_started) = 0; +- smp_mb(); +- + return rc; + } + +-- +2.44.0 + diff --git a/1319-arch-x86-smp-start-APs-in-parallel-during-boot.patch b/1319-arch-x86-smp-start-APs-in-parallel-during-boot.patch new file mode 100644 index 00000000..d13e3307 --- /dev/null +++ b/1319-arch-x86-smp-start-APs-in-parallel-during-boot.patch @@ -0,0 +1,128 @@ +From 604c2b10d60c4d8cf902309e0434a70256049d6d Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Fri, 16 Jun 2023 15:45:32 +0200 +Subject: [PATCH 1319/1323] arch/x86/smp: start APs in parallel during boot + +Multiple delays are required when sending IPIs and waiting for +responses. During boot, 4 such IPIs were sent per each AP. With this +change, only one set of broadcast IPIs is sent. This reduces boot time, +especially for platforms with large number of cores. + +Single CPU initialization is still possible, it is used for hotplug. + +During wakeup from S3 APs are started one by one. It should be possible +to enable parallel execution there as well, but I don't have a way of +testing it as of now. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/include/asm/smp.h | 1 + + xen/arch/x86/setup.c | 2 ++ + xen/arch/x86/smpboot.c | 31 +++++++++++++++++++++++++++---- + 3 files changed, 30 insertions(+), 4 deletions(-) + +diff --git a/xen/arch/x86/include/asm/smp.h b/xen/arch/x86/include/asm/smp.h +index 4432a8ea1b..68c8a349fc 100644 +--- a/xen/arch/x86/include/asm/smp.h ++++ b/xen/arch/x86/include/asm/smp.h +@@ -31,6 +31,7 @@ DECLARE_PER_CPU(cpumask_var_t, send_ipi_cpumask); + extern bool park_offline_cpus; + + void smp_send_nmi_allbutself(void); ++void smp_send_init_sipi_sipi_allbutself(void); + + void send_IPI_mask(const cpumask_t *, int vector); + void send_IPI_self(int vector); +diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c +index 4ce475db08..6bfbe2872d 100644 +--- a/xen/arch/x86/setup.c ++++ b/xen/arch/x86/setup.c +@@ -1919,6 +1919,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) + cpu_data[i].stack_base = cpu_alloc_stack(i); + } + ++ smp_send_init_sipi_sipi_allbutself(); ++ + for_each_present_cpu ( i ) + { + if ( (park_offline_cpus || num_online_cpus() < max_cpus) && +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index 23c87032eb..f8d1e4c76d 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -462,7 +462,7 @@ static int slaunch_wake_aps(unsigned long trampoline_rm) + + static int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) + { +- unsigned long send_status = 0, accept_status = 0; ++ unsigned long send_status = 0, accept_status = 0, sh = 0; + int maxlvt, timeout, i; + + /* +@@ -485,6 +485,12 @@ static int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) + if ( slaunch_active ) + return slaunch_wake_aps(start_eip); + ++ /* ++ * Use destination shorthand for broadcasting IPIs during boot. ++ */ ++ if ( phys_apicid == BAD_APICID ) ++ sh = APIC_DEST_ALLBUT; ++ + /* + * Be paranoid about clearing APIC errors. + */ +@@ -498,7 +504,7 @@ static int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) + /* + * Turn INIT on target chip via IPI + */ +- apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, ++ apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT | sh, + phys_apicid); + + if ( !x2apic_enabled ) +@@ -515,7 +521,7 @@ static int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) + + Dprintk("Deasserting INIT.\n"); + +- apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid); ++ apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT | sh, phys_apicid); + + Dprintk("Waiting for send to finish...\n"); + timeout = 0; +@@ -552,7 +558,7 @@ static int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) + * STARTUP IPI + * Boot on the stack + */ +- apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), phys_apicid); ++ apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12) | sh, phys_apicid); + + if ( !x2apic_enabled ) + { +@@ -1200,6 +1206,23 @@ static struct notifier_block cpu_smpboot_nfb = { + .notifier_call = cpu_smpboot_callback + }; + ++void smp_send_init_sipi_sipi_allbutself(void) ++{ ++ unsigned long start_eip; ++ start_eip = bootsym_phys(trampoline_realmode_entry); ++ ++ /* start_eip needs be page aligned, and below the 1M boundary. */ ++ if ( start_eip & ~0xff000 ) ++ panic("AP trampoline %#lx not suitably positioned\n", start_eip); ++ ++ /* So we see what's up */ ++ if ( opt_cpu_info ) ++ printk("Booting APs in parallel, eip %lx\n", start_eip); ++ ++ /* Starting actual broadcast IPI sequence... */ ++ wakeup_secondary_cpu(BAD_APICID, start_eip); ++} ++ + void __init smp_prepare_cpus(void) + { + register_cpu_notifier(&cpu_smpboot_nfb); +-- +2.44.0 + diff --git a/1320-x86-sha256.c-add-file.patch b/1320-x86-sha256.c-add-file.patch new file mode 100644 index 00000000..09696f75 --- /dev/null +++ b/1320-x86-sha256.c-add-file.patch @@ -0,0 +1,281 @@ +From 32613c64c5e4f292908275429413b350b13a8a9b Mon Sep 17 00:00:00 2001 +From: Sergii Dmytruk +Date: Mon, 26 Jun 2023 00:17:15 +0300 +Subject: [PATCH 1320/1323] x86/sha256.c: add file + +Signed-off-by: Sergii Dmytruk +--- + xen/lib/Makefile | 1 + + xen/lib/sha256.c | 248 +++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 249 insertions(+) + create mode 100644 xen/lib/sha256.c + +diff --git a/xen/lib/Makefile b/xen/lib/Makefile +index 3820e0bfbb..40f7960ef1 100644 +--- a/xen/lib/Makefile ++++ b/xen/lib/Makefile +@@ -29,6 +29,7 @@ lib-y += strsep.o + lib-y += strspn.o + lib-y += strstr.o + lib-$(CONFIG_X86) += sha1.o ++lib-$(CONFIG_X86) += sha256.o + lib-$(CONFIG_X86) += xxhash32.o + lib-$(CONFIG_X86) += xxhash64.o + +diff --git a/xen/lib/sha256.c b/xen/lib/sha256.c +new file mode 100644 +index 0000000000..27e0571a71 +--- /dev/null ++++ b/xen/lib/sha256.c +@@ -0,0 +1,248 @@ ++/* ++ * SHA-256 hash in C ++ * ++ * Copyright (c) 2023 Project Nayuki. (MIT License) ++ * https://www.nayuki.io/page/fast-sha2-hashes-in-x86-assembly ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy of ++ * this software and associated documentation files (the "Software"), to deal in ++ * the Software without restriction, including without limitation the rights to ++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of ++ * the Software, and to permit persons to whom the Software is furnished to do so, ++ * subject to the following conditions: ++ * - The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * - The Software is provided "as is", without warranty of any kind, express or ++ * implied, including but not limited to the warranties of merchantability, ++ * fitness for a particular purpose and noninfringement. In no event shall the ++ * authors or copyright holders be liable for any claim, damages or other ++ * liability, whether in an action of contract, tort or otherwise, arising from, ++ * out of or in connection with the Software or the use or other dealings in the ++ * Software. ++ */ ++ ++#include ++#include ++ ++#define UINT32_C(v) v ## U ++ ++#define BLOCK_LEN 64 // In bytes ++#define STATE_LEN 8 // In words ++ ++static void sha256_compress(const uint8_t block[static 64], uint32_t state[static 8]) { ++ #define ROTR32(x, n) (((0U + (x)) << (32 - (n))) | ((x) >> (n))) // Assumes that x is uint32_t and 0 < n < 32 ++ ++ #define LOADSCHEDULE(i) \ ++ schedule[i] = (uint32_t)block[i * 4 + 0] << 24 \ ++ | (uint32_t)block[i * 4 + 1] << 16 \ ++ | (uint32_t)block[i * 4 + 2] << 8 \ ++ | (uint32_t)block[i * 4 + 3] << 0; ++ ++ #define SCHEDULE(i) \ ++ schedule[i] = 0U + schedule[i - 16] + schedule[i - 7] \ ++ + (ROTR32(schedule[i - 15], 7) ^ ROTR32(schedule[i - 15], 18) ^ (schedule[i - 15] >> 3)) \ ++ + (ROTR32(schedule[i - 2], 17) ^ ROTR32(schedule[i - 2], 19) ^ (schedule[i - 2] >> 10)); ++ ++ #define ROUND(a, b, c, d, e, f, g, h, i, k) \ ++ h = 0U + h + (ROTR32(e, 6) ^ ROTR32(e, 11) ^ ROTR32(e, 25)) + (g ^ (e & (f ^ g))) + UINT32_C(k) + schedule[i]; \ ++ d = 0U + d + h; \ ++ h = 0U + h + (ROTR32(a, 2) ^ ROTR32(a, 13) ^ ROTR32(a, 22)) + ((a & (b | c)) | (b & c)); ++ ++ uint32_t schedule[64]; ++ ++ uint32_t a = state[0]; ++ uint32_t b = state[1]; ++ uint32_t c = state[2]; ++ uint32_t d = state[3]; ++ uint32_t e = state[4]; ++ uint32_t f = state[5]; ++ uint32_t g = state[6]; ++ uint32_t h = state[7]; ++ ++ LOADSCHEDULE( 0) ++ LOADSCHEDULE( 1) ++ LOADSCHEDULE( 2) ++ LOADSCHEDULE( 3) ++ LOADSCHEDULE( 4) ++ LOADSCHEDULE( 5) ++ LOADSCHEDULE( 6) ++ LOADSCHEDULE( 7) ++ LOADSCHEDULE( 8) ++ LOADSCHEDULE( 9) ++ LOADSCHEDULE(10) ++ LOADSCHEDULE(11) ++ LOADSCHEDULE(12) ++ LOADSCHEDULE(13) ++ LOADSCHEDULE(14) ++ LOADSCHEDULE(15) ++ SCHEDULE(16) ++ SCHEDULE(17) ++ SCHEDULE(18) ++ SCHEDULE(19) ++ SCHEDULE(20) ++ SCHEDULE(21) ++ SCHEDULE(22) ++ SCHEDULE(23) ++ SCHEDULE(24) ++ SCHEDULE(25) ++ SCHEDULE(26) ++ SCHEDULE(27) ++ SCHEDULE(28) ++ SCHEDULE(29) ++ SCHEDULE(30) ++ SCHEDULE(31) ++ SCHEDULE(32) ++ SCHEDULE(33) ++ SCHEDULE(34) ++ SCHEDULE(35) ++ SCHEDULE(36) ++ SCHEDULE(37) ++ SCHEDULE(38) ++ SCHEDULE(39) ++ SCHEDULE(40) ++ SCHEDULE(41) ++ SCHEDULE(42) ++ SCHEDULE(43) ++ SCHEDULE(44) ++ SCHEDULE(45) ++ SCHEDULE(46) ++ SCHEDULE(47) ++ SCHEDULE(48) ++ SCHEDULE(49) ++ SCHEDULE(50) ++ SCHEDULE(51) ++ SCHEDULE(52) ++ SCHEDULE(53) ++ SCHEDULE(54) ++ SCHEDULE(55) ++ SCHEDULE(56) ++ SCHEDULE(57) ++ SCHEDULE(58) ++ SCHEDULE(59) ++ SCHEDULE(60) ++ SCHEDULE(61) ++ SCHEDULE(62) ++ SCHEDULE(63) ++ ++ ROUND(a, b, c, d, e, f, g, h, 0, 0x428A2F98) ++ ROUND(h, a, b, c, d, e, f, g, 1, 0x71374491) ++ ROUND(g, h, a, b, c, d, e, f, 2, 0xB5C0FBCF) ++ ROUND(f, g, h, a, b, c, d, e, 3, 0xE9B5DBA5) ++ ROUND(e, f, g, h, a, b, c, d, 4, 0x3956C25B) ++ ROUND(d, e, f, g, h, a, b, c, 5, 0x59F111F1) ++ ROUND(c, d, e, f, g, h, a, b, 6, 0x923F82A4) ++ ROUND(b, c, d, e, f, g, h, a, 7, 0xAB1C5ED5) ++ ROUND(a, b, c, d, e, f, g, h, 8, 0xD807AA98) ++ ROUND(h, a, b, c, d, e, f, g, 9, 0x12835B01) ++ ROUND(g, h, a, b, c, d, e, f, 10, 0x243185BE) ++ ROUND(f, g, h, a, b, c, d, e, 11, 0x550C7DC3) ++ ROUND(e, f, g, h, a, b, c, d, 12, 0x72BE5D74) ++ ROUND(d, e, f, g, h, a, b, c, 13, 0x80DEB1FE) ++ ROUND(c, d, e, f, g, h, a, b, 14, 0x9BDC06A7) ++ ROUND(b, c, d, e, f, g, h, a, 15, 0xC19BF174) ++ ROUND(a, b, c, d, e, f, g, h, 16, 0xE49B69C1) ++ ROUND(h, a, b, c, d, e, f, g, 17, 0xEFBE4786) ++ ROUND(g, h, a, b, c, d, e, f, 18, 0x0FC19DC6) ++ ROUND(f, g, h, a, b, c, d, e, 19, 0x240CA1CC) ++ ROUND(e, f, g, h, a, b, c, d, 20, 0x2DE92C6F) ++ ROUND(d, e, f, g, h, a, b, c, 21, 0x4A7484AA) ++ ROUND(c, d, e, f, g, h, a, b, 22, 0x5CB0A9DC) ++ ROUND(b, c, d, e, f, g, h, a, 23, 0x76F988DA) ++ ROUND(a, b, c, d, e, f, g, h, 24, 0x983E5152) ++ ROUND(h, a, b, c, d, e, f, g, 25, 0xA831C66D) ++ ROUND(g, h, a, b, c, d, e, f, 26, 0xB00327C8) ++ ROUND(f, g, h, a, b, c, d, e, 27, 0xBF597FC7) ++ ROUND(e, f, g, h, a, b, c, d, 28, 0xC6E00BF3) ++ ROUND(d, e, f, g, h, a, b, c, 29, 0xD5A79147) ++ ROUND(c, d, e, f, g, h, a, b, 30, 0x06CA6351) ++ ROUND(b, c, d, e, f, g, h, a, 31, 0x14292967) ++ ROUND(a, b, c, d, e, f, g, h, 32, 0x27B70A85) ++ ROUND(h, a, b, c, d, e, f, g, 33, 0x2E1B2138) ++ ROUND(g, h, a, b, c, d, e, f, 34, 0x4D2C6DFC) ++ ROUND(f, g, h, a, b, c, d, e, 35, 0x53380D13) ++ ROUND(e, f, g, h, a, b, c, d, 36, 0x650A7354) ++ ROUND(d, e, f, g, h, a, b, c, 37, 0x766A0ABB) ++ ROUND(c, d, e, f, g, h, a, b, 38, 0x81C2C92E) ++ ROUND(b, c, d, e, f, g, h, a, 39, 0x92722C85) ++ ROUND(a, b, c, d, e, f, g, h, 40, 0xA2BFE8A1) ++ ROUND(h, a, b, c, d, e, f, g, 41, 0xA81A664B) ++ ROUND(g, h, a, b, c, d, e, f, 42, 0xC24B8B70) ++ ROUND(f, g, h, a, b, c, d, e, 43, 0xC76C51A3) ++ ROUND(e, f, g, h, a, b, c, d, 44, 0xD192E819) ++ ROUND(d, e, f, g, h, a, b, c, 45, 0xD6990624) ++ ROUND(c, d, e, f, g, h, a, b, 46, 0xF40E3585) ++ ROUND(b, c, d, e, f, g, h, a, 47, 0x106AA070) ++ ROUND(a, b, c, d, e, f, g, h, 48, 0x19A4C116) ++ ROUND(h, a, b, c, d, e, f, g, 49, 0x1E376C08) ++ ROUND(g, h, a, b, c, d, e, f, 50, 0x2748774C) ++ ROUND(f, g, h, a, b, c, d, e, 51, 0x34B0BCB5) ++ ROUND(e, f, g, h, a, b, c, d, 52, 0x391C0CB3) ++ ROUND(d, e, f, g, h, a, b, c, 53, 0x4ED8AA4A) ++ ROUND(c, d, e, f, g, h, a, b, 54, 0x5B9CCA4F) ++ ROUND(b, c, d, e, f, g, h, a, 55, 0x682E6FF3) ++ ROUND(a, b, c, d, e, f, g, h, 56, 0x748F82EE) ++ ROUND(h, a, b, c, d, e, f, g, 57, 0x78A5636F) ++ ROUND(g, h, a, b, c, d, e, f, 58, 0x84C87814) ++ ROUND(f, g, h, a, b, c, d, e, 59, 0x8CC70208) ++ ROUND(e, f, g, h, a, b, c, d, 60, 0x90BEFFFA) ++ ROUND(d, e, f, g, h, a, b, c, 61, 0xA4506CEB) ++ ROUND(c, d, e, f, g, h, a, b, 62, 0xBEF9A3F7) ++ ROUND(b, c, d, e, f, g, h, a, 63, 0xC67178F2) ++ ++ state[0] = 0U + state[0] + a; ++ state[1] = 0U + state[1] + b; ++ state[2] = 0U + state[2] + c; ++ state[3] = 0U + state[3] + d; ++ state[4] = 0U + state[4] + e; ++ state[5] = 0U + state[5] + f; ++ state[6] = 0U + state[6] + g; ++ state[7] = 0U + state[7] + h; ++} ++ ++/* Full message hasher */ ++ ++void sha256_hash(const uint8_t message[], size_t len, uint32_t hash[static STATE_LEN]) { ++ uint8_t block[BLOCK_LEN] = {0}; ++ size_t off; ++ size_t rem; ++ ++ hash[0] = UINT32_C(0x6A09E667); ++ hash[1] = UINT32_C(0xBB67AE85); ++ hash[2] = UINT32_C(0x3C6EF372); ++ hash[3] = UINT32_C(0xA54FF53A); ++ hash[4] = UINT32_C(0x510E527F); ++ hash[5] = UINT32_C(0x9B05688C); ++ hash[6] = UINT32_C(0x1F83D9AB); ++ hash[7] = UINT32_C(0x5BE0CD19); ++ ++ #define LENGTH_SIZE 8 // In bytes ++ ++ for (off = 0; len - off >= BLOCK_LEN; off += BLOCK_LEN) ++ sha256_compress(&message[off], hash); ++ ++ rem = len - off; ++ if (rem > 0) ++ memcpy(block, &message[off], rem); ++ ++ block[rem] = 0x80; ++ rem++; ++ if (BLOCK_LEN - rem < LENGTH_SIZE) { ++ sha256_compress(block, hash); ++ memset(block, 0, sizeof(block)); ++ } ++ ++ block[BLOCK_LEN - 1] = (uint8_t)((len & 0x1FU) << 3); ++ len >>= 5; ++ for (int i = 1; i < LENGTH_SIZE; i++, len >>= 8) ++ block[BLOCK_LEN - 1 - i] = (uint8_t)(len & 0xFFU); ++ sha256_compress(block, hash); ++ ++ hash[0] = __builtin_bswap32(hash[0]); ++ hash[1] = __builtin_bswap32(hash[1]); ++ hash[2] = __builtin_bswap32(hash[2]); ++ hash[3] = __builtin_bswap32(hash[3]); ++ hash[4] = __builtin_bswap32(hash[4]); ++ hash[5] = __builtin_bswap32(hash[5]); ++ hash[6] = __builtin_bswap32(hash[6]); ++ hash[7] = __builtin_bswap32(hash[7]); ++} +-- +2.44.0 + diff --git a/1321-x86-tpm.c-support-extending-PCRs-of-TPM2.0.patch b/1321-x86-tpm.c-support-extending-PCRs-of-TPM2.0.patch new file mode 100644 index 00000000..b63d1ead --- /dev/null +++ b/1321-x86-tpm.c-support-extending-PCRs-of-TPM2.0.patch @@ -0,0 +1,637 @@ +From aa848d3818740dd03ebf20b0c224f51fa9947bff Mon Sep 17 00:00:00 2001 +From: Sergii Dmytruk +Date: Wed, 28 Jun 2023 20:23:24 +0300 +Subject: [PATCH 1321/1323] x86/tpm.c: support extending PCRs of TPM2.0 + +SHA1 and SHA256 is hardcoded here, but their support by TPM is checked +for. Addition of event log for TPM2.0 will generalize the code further. + +Signed-off-by: Sergii Dmytruk +--- + xen/arch/x86/tpm.c | 446 +++++++++++++++++++++++++++++++++++++-- + xen/include/xen/sha256.h | 11 + + xen/lib/sha256.c | 49 +++-- + 3 files changed, 472 insertions(+), 34 deletions(-) + create mode 100644 xen/include/xen/sha256.h + +diff --git a/xen/arch/x86/tpm.c b/xen/arch/x86/tpm.c +index 8281796d20..8e923985a2 100644 +--- a/xen/arch/x86/tpm.c ++++ b/xen/arch/x86/tpm.c +@@ -38,6 +38,15 @@ asm ( + * The implementation is necessary if compiler chooses to not use an inline + * builtin. + */ ++void *memset(void *dest, int c, size_t n) ++{ ++ uint8_t *d = dest; ++ ++ while ( n-- ) ++ *d++ = c; ++ ++ return dest; ++} + void *memcpy(void *dest, const void *src, size_t n) + { + const uint8_t *s = src; +@@ -59,6 +68,7 @@ void *memcpy(void *dest, const void *src, size_t n) + #endif /* __EARLY_TPM__ */ + + #include ++#include + + #define TPM_TIS_BASE 0xFED40000 + #define TPM_LOC_REG(loc, reg) (0x1000 * (loc) + (reg)) +@@ -78,6 +88,7 @@ void *memcpy(void *dest, const void *src, size_t n) + + #define swap16(x) __builtin_bswap16(x) + #define swap32(x) __builtin_bswap32(x) ++#define memset(s, c, n) __builtin_memset(s, c, n) + #define memcpy(d, s, n) __builtin_memcpy(d, s, n) + + static inline volatile uint32_t tis_read32(unsigned reg) +@@ -158,14 +169,15 @@ static inline bool is_tpm12(void) + (tis_read32(TPM_STS_(0)) & TPM_FAMILY_MASK) == 0); + } + +-/****************************** TPM1.2 specific *******************************/ +-#define TPM_ORD_Extend 0x00000014 +-#define TPM_ORD_SHA1Start 0x000000A0 +-#define TPM_ORD_SHA1Update 0x000000A1 +-#define TPM_ORD_SHA1CompleteExtend 0x000000A3 ++/****************************** TPM1.2 & TPM2.0 *******************************/ + +-#define TPM_TAG_RQU_COMMAND 0x00C1 +-#define TPM_TAG_RSP_COMMAND 0x00C4 ++/* ++ * TPM1.2 is required to support commands of up to 1101 bytes, vendors rarely ++ * go above that. Limit maximum size of block of data to be hashed to 1024. ++ * ++ * TPM2.0 should support hashing of at least 1024 bytes. ++ */ ++#define MAX_HASH_BLOCK 1024 + + /* All fields of following structs are big endian. */ + struct tpm_cmd_hdr { +@@ -180,6 +192,17 @@ struct tpm_rsp_hdr { + uint32_t returnCode; + } __packed; + ++/****************************** TPM1.2 specific *******************************/ ++ ++#define TPM_ORD_Extend 0x00000014 ++#define TPM_ORD_SHA1Start 0x000000A0 ++#define TPM_ORD_SHA1Update 0x000000A1 ++#define TPM_ORD_SHA1CompleteExtend 0x000000A3 ++ ++#define TPM_TAG_RQU_COMMAND 0x00C1 ++#define TPM_TAG_RSP_COMMAND 0x00C4 ++ ++/* All fields of following structs are big endian. */ + struct extend_cmd { + struct tpm_cmd_hdr h; + uint32_t pcrNum; +@@ -245,11 +268,6 @@ struct txt_ev_log_container_12 { + }; + + #ifdef __EARLY_TPM__ +-/* +- * TPM1.2 is required to support commands of up to 1101 bytes, vendors rarely +- * go above that. Limit maximum size of block of data to be hashed to 1024. +- */ +-#define MAX_HASH_BLOCK 1024 + #define CMD_RSP_BUF_SIZE (sizeof(struct sha1_update_cmd) + MAX_HASH_BLOCK) + + union cmd_rsp { +@@ -395,6 +413,382 @@ static void *create_log_event12(struct txt_ev_log_container_12 *evt_log, + + /************************** end of TPM1.2 specific ****************************/ + ++/****************************** TPM2.0 specific *******************************/ ++ ++/* ++ * These constants are for TPM2.0 but don't have a distinct prefix to match ++ * names in the specification. ++ */ ++ ++#define TPM_HT_PCR 0x00 ++ ++#define TPM_RH_NULL 0x40000007 ++#define TPM_RS_PW 0x40000009 ++ ++#define HR_SHIFT 24 ++#define HR_PCR (TPM_HT_PCR << HR_SHIFT) ++ ++#define TPM_ST_NO_SESSIONS 0x8001 ++#define TPM_ST_SESSIONS 0x8002 ++ ++#define TPM_ALG_SHA1 0x0004 ++#define TPM_ALG_SHA256 0x000b ++#define TPM_ALG_NULL 0x0010 ++ ++#define TPM2_PCR_Extend 0x00000182 ++#define TPM2_PCR_HashSequenceStart 0x00000186 ++#define TPM2_PCR_SequenceUpdate 0x0000015C ++#define TPM2_PCR_EventSequenceComplete 0x00000185 ++ ++#define PUT_BYTES(p, bytes, size) do { \ ++ memcpy((p), (bytes), (size)); \ ++ (p) += (size); \ ++ } while ( 0 ) ++ ++#define PUT_16BIT(p, data) do { \ ++ *(uint16_t *)(p) = swap16(data); \ ++ (p) += 2; \ ++ } while ( 0 ) ++ ++/* All fields of following structs are big endian. */ ++struct tpm2_session_header { ++ uint32_t handle; ++ uint16_t nonceSize; ++ uint8_t nonce[0]; ++ uint8_t attrs; ++ uint16_t hmacSize; ++ uint8_t hmac[0]; ++} __packed; ++ ++struct tpm2_extend_cmd { ++ struct tpm_cmd_hdr h; ++ uint32_t pcrHandle; ++ uint32_t sessionHdrSize; ++ struct tpm2_session_header pcrSession; ++ uint32_t hashCount; ++ uint8_t hashes[0]; ++} __packed; ++ ++struct tpm2_extend_rsp { ++ struct tpm_rsp_hdr h; ++} __packed; ++ ++struct tpm2_sequence_start_cmd { ++ struct tpm_cmd_hdr h; ++ uint16_t hmacSize; ++ uint8_t hmac[0]; ++ uint16_t hashAlg; ++} __packed; ++ ++struct tpm2_sequence_start_rsp { ++ struct tpm_rsp_hdr h; ++ uint32_t sequenceHandle; ++} __packed; ++ ++struct tpm2_sequence_update_cmd { ++ struct tpm_cmd_hdr h; ++ uint32_t sequenceHandle; ++ uint32_t sessionHdrSize; ++ struct tpm2_session_header session; ++ uint16_t dataSize; ++ uint8_t data[0]; ++} __packed; ++ ++struct tpm2_sequence_update_rsp { ++ struct tpm_rsp_hdr h; ++} __packed; ++ ++struct tpm2_sequence_complete_cmd { ++ struct tpm_cmd_hdr h; ++ uint32_t pcrHandle; ++ uint32_t sequenceHandle; ++ uint32_t sessionHdrSize; ++ struct tpm2_session_header pcrSession; ++ struct tpm2_session_header sequenceSession; ++ uint16_t dataSize; ++ uint8_t data[0]; ++} __packed; ++ ++struct tpm2_sequence_complete_rsp { ++ struct tpm_rsp_hdr h; ++ uint32_t paramSize; ++ uint32_t hashCount; ++ uint8_t hashes[0]; ++ /* ++ * Each hash is represented as: ++ * struct { ++ * uint16_t hashAlg; ++ * uint8_t hash[size of hashAlg]; ++ * }; ++ */ ++} __packed; ++ ++/* ++ * These two structure are for convenience, they don't correspond to anything in ++ * any spec. ++ */ ++struct tpm2_log_hash { ++ uint16_t alg; /* TPM_ALG_* */ ++ uint16_t size; ++ uint8_t *data; /* Non-owning reference to a buffer inside log entry. */ ++}; ++/* Should be more than enough for now and awhile in the future. */ ++#define MAX_HASH_COUNT 8 ++struct tpm2_log_hashes { ++ uint32_t count; ++ struct tpm2_log_hash hashes[MAX_HASH_COUNT]; ++}; ++ ++#ifdef __EARLY_TPM__ ++ ++union tpm2_cmd_rsp { ++ uint8_t b[sizeof(struct tpm2_sequence_update_cmd) + MAX_HASH_BLOCK]; ++ struct tpm_cmd_hdr c; ++ struct tpm_rsp_hdr r; ++ struct tpm2_sequence_start_cmd start_c; ++ struct tpm2_sequence_start_rsp start_r; ++ struct tpm2_sequence_update_cmd update_c; ++ struct tpm2_sequence_update_rsp update_r; ++ struct tpm2_sequence_complete_cmd finish_c; ++ struct tpm2_sequence_complete_rsp finish_r; ++}; ++ ++static uint32_t tpm2_hash_extend(unsigned loc, uint8_t *buf, unsigned size, ++ unsigned pcr, ++ struct tpm2_log_hashes *log_hashes) ++{ ++ uint32_t seq_handle; ++ unsigned max_bytes = MAX_HASH_BLOCK; ++ ++ union tpm2_cmd_rsp cmd_rsp; ++ unsigned o_size; ++ unsigned i; ++ uint8_t *p; ++ uint32_t rc; ++ ++ cmd_rsp.start_c = (struct tpm2_sequence_start_cmd) { ++ .h.tag = swap16(TPM_ST_NO_SESSIONS), ++ .h.paramSize = swap32(sizeof(cmd_rsp.start_c)), ++ .h.ordinal = swap32(TPM2_PCR_HashSequenceStart), ++ .hashAlg = swap16(TPM_ALG_NULL), /* Compute all supported hashes. */ ++ }; ++ ++ request_locality(loc); ++ ++ o_size = sizeof(cmd_rsp); ++ send_cmd(loc, cmd_rsp.b, swap32(cmd_rsp.c.paramSize), &o_size); ++ ++ if ( cmd_rsp.r.tag == swap16(TPM_ST_NO_SESSIONS) && ++ cmd_rsp.r.paramSize == swap32(10) ) { ++ rc = swap32(cmd_rsp.r.returnCode); ++ if ( rc != 0 ) ++ goto error; ++ } ++ ++ seq_handle = swap32(cmd_rsp.start_r.sequenceHandle); ++ ++ while ( size > 64 ) { ++ if ( size < max_bytes ) ++ max_bytes = size & ~(64 - 1); ++ ++ cmd_rsp.update_c = (struct tpm2_sequence_update_cmd) { ++ .h.tag = swap16(TPM_ST_SESSIONS), ++ .h.paramSize = swap32(sizeof(cmd_rsp.update_c) + max_bytes), ++ .h.ordinal = swap32(TPM2_PCR_SequenceUpdate), ++ .sequenceHandle = swap32(seq_handle), ++ .sessionHdrSize = swap32(sizeof(struct tpm2_session_header)), ++ .session.handle = swap32(TPM_RS_PW), ++ .dataSize = swap16(max_bytes), ++ }; ++ ++ memcpy(cmd_rsp.update_c.data, buf, max_bytes); ++ ++ o_size = sizeof(cmd_rsp); ++ send_cmd(loc, cmd_rsp.b, swap32(cmd_rsp.c.paramSize), &o_size); ++ ++ if ( cmd_rsp.r.tag == swap16(TPM_ST_NO_SESSIONS) && ++ cmd_rsp.r.paramSize == swap32(10) ) { ++ rc = swap32(cmd_rsp.r.returnCode); ++ if ( rc != 0 ) ++ goto error; ++ } ++ ++ size -= max_bytes; ++ buf += max_bytes; ++ } ++ ++ cmd_rsp.finish_c = (struct tpm2_sequence_complete_cmd) { ++ .h.tag = swap16(TPM_ST_SESSIONS), ++ .h.paramSize = swap32(sizeof(cmd_rsp.finish_c) + size), ++ .h.ordinal = swap32(TPM2_PCR_EventSequenceComplete), ++ .pcrHandle = swap32(HR_PCR + pcr), ++ .sequenceHandle = swap32(seq_handle), ++ .sessionHdrSize = swap32(sizeof(struct tpm2_session_header)*2), ++ .pcrSession.handle = swap32(TPM_RS_PW), ++ .sequenceSession.handle = swap32(TPM_RS_PW), ++ .dataSize = swap16(size), ++ }; ++ ++ memcpy(cmd_rsp.finish_c.data, buf, size); ++ ++ o_size = sizeof(cmd_rsp); ++ send_cmd(loc, cmd_rsp.b, swap32(cmd_rsp.c.paramSize), &o_size); ++ ++ if ( cmd_rsp.r.tag == swap16(TPM_ST_NO_SESSIONS) && ++ cmd_rsp.r.paramSize == swap32(10) ) { ++ rc = swap32(cmd_rsp.r.returnCode); ++ if ( rc != 0 ) ++ goto error; ++ } ++ ++ p = cmd_rsp.finish_r.hashes; ++ for ( i = 0; i < swap32(cmd_rsp.finish_r.hashCount); ++i ) { ++ unsigned j; ++ uint16_t hash_type; ++ ++ hash_type = swap16(*(uint16_t *)p); ++ p += sizeof(uint16_t); ++ ++ for ( j = 0; j < log_hashes->count; ++j ) { ++ struct tpm2_log_hash *hash = &log_hashes->hashes[j]; ++ if ( hash->alg == hash_type ) { ++ memcpy(hash->data, p, hash->size); ++ p += hash->size; ++ break; ++ } ++ } ++ ++ if ( j == log_hashes->count ) { ++ /* Can't continue parsing without knowing hash size. */ ++ break; ++ } ++ } ++ ++ rc = 0; ++ ++error: ++ relinquish_locality(loc); ++ return rc; ++} ++ ++#else ++ ++union tpm2_cmd_rsp { ++ /* Enough space for multiple hashes. */ ++ uint8_t b[sizeof(struct tpm2_extend_cmd) + 1024]; ++ struct tpm_cmd_hdr c; ++ struct tpm_rsp_hdr r; ++ struct tpm2_extend_cmd extend_c; ++ struct tpm2_extend_rsp extend_r; ++}; ++ ++static uint32_t tpm20_pcr_extend(unsigned loc, uint32_t pcr_handle, ++ const struct tpm2_log_hashes *log_hashes) ++{ ++ union tpm2_cmd_rsp cmd_rsp; ++ unsigned o_size; ++ unsigned i; ++ uint8_t *p; ++ ++ cmd_rsp.extend_c = (struct tpm2_extend_cmd) { ++ .h.tag = swap16(TPM_ST_SESSIONS), ++ .h.ordinal = swap32(TPM2_PCR_Extend), ++ .pcrHandle = swap32(pcr_handle), ++ .sessionHdrSize = swap32(sizeof(struct tpm2_session_header)), ++ .pcrSession.handle = swap32(TPM_RS_PW), ++ .hashCount = swap32(log_hashes->count), ++ }; ++ ++ p = cmd_rsp.extend_c.hashes; ++ for ( i = 0; i < log_hashes->count; ++i ) { ++ const struct tpm2_log_hash *hash = &log_hashes->hashes[i]; ++ ++ if ( p + sizeof(uint16_t) + hash->size > &cmd_rsp.b[sizeof(cmd_rsp)] ) { ++ printk(XENLOG_ERR "Hit TPM message size implementation limit: %ld\n", ++ sizeof(cmd_rsp)); ++ return -1; ++ } ++ ++ *(uint16_t *)p = swap16(hash->alg); ++ p += sizeof(uint16_t); ++ ++ memcpy(p, hash->data, hash->size); ++ p += hash->size; ++ } ++ ++ /* Fill in command size (size of the whole buffer). */ ++ cmd_rsp.extend_c.h.paramSize = swap32(sizeof(cmd_rsp.extend_c) + ++ (p - cmd_rsp.extend_c.hashes)), ++ ++ o_size = sizeof(cmd_rsp); ++ send_cmd(loc, cmd_rsp.b, swap32(cmd_rsp.c.paramSize), &o_size); ++ ++ return swap32(cmd_rsp.r.returnCode); ++} ++ ++static bool tpm_supports_hash(unsigned loc, const struct tpm2_log_hash *hash) ++{ ++ uint32_t rc; ++ struct tpm2_log_hashes hashes = { ++ .count = 1, ++ .hashes[0] = *hash, ++ }; ++ ++ /* This is a valid way of checking hash support, using it to not implement ++ * TPM2_GetCapability(). */ ++ rc = tpm20_pcr_extend(loc, /*pcr_handle=*/TPM_RH_NULL, &hashes); ++ ++ return rc == 0; ++} ++ ++static uint32_t tpm2_hash_extend(unsigned loc, uint8_t *buf, unsigned size, ++ unsigned pcr, ++ const struct tpm2_log_hashes *log_hashes) ++{ ++ uint32_t rc; ++ unsigned i; ++ struct tpm2_log_hashes supported_hashes = {0}; ++ ++ request_locality(loc); ++ ++ for ( i = 0; i < log_hashes->count; ++i ) { ++ const struct tpm2_log_hash *hash = &log_hashes->hashes[i]; ++ if ( !tpm_supports_hash(loc, hash) ) { ++ printk(XENLOG_WARNING "Skipped hash unsupported by TPM: %d\n", ++ hash->alg); ++ continue; ++ } ++ ++ if ( hash->alg == TPM_ALG_SHA1 ) { ++ sha1_hash(buf, size, hash->data); ++ } else if ( hash->alg == TPM_ALG_SHA256 ) { ++ sha256_hash(buf, size, hash->data); ++ } else { ++ /* This is called "OneDigest" in TXT Software Development Guide. */ ++ memset(hash->data, 0, size); ++ hash->data[0] = 1; ++ } ++ ++ if ( supported_hashes.count == MAX_HASH_COUNT ) { ++ printk(XENLOG_ERR "Hit hash count implementation limit: %d\n", ++ MAX_HASH_COUNT); ++ return -1; ++ } ++ ++ supported_hashes.hashes[supported_hashes.count] = *hash; ++ ++supported_hashes.count; ++ } ++ ++ rc = tpm20_pcr_extend(loc, HR_PCR + pcr, &supported_hashes); ++ relinquish_locality(loc); ++ ++ return rc; ++} ++ ++#endif /* __EARLY_TPM__ */ ++ ++/************************** end of TPM2.0 specific ****************************/ ++ + void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, + uint32_t type, uint8_t *log_data, unsigned log_data_size) + { +@@ -416,6 +810,34 @@ void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, + entry_digest = sha1_digest; + + tpm12_hash_extend(loc, buf, size, pcr, entry_digest); ++ } else { ++ uint8_t sha1_digest[SHA1_DIGEST_SIZE]; ++ uint8_t sha256_digest[SHA256_DIGEST_SIZE]; ++ uint32_t rc; ++ ++ struct tpm2_log_hashes log_hashes = { ++ .count = 2, ++ .hashes = { ++ { ++ .alg = TPM_ALG_SHA1, ++ .size = SHA1_DIGEST_SIZE, ++ .data = sha1_digest, ++ }, ++ { ++ .alg = TPM_ALG_SHA256, ++ .size = SHA256_DIGEST_SIZE, ++ .data = sha256_digest, ++ }, ++ }, ++ }; ++ ++ rc = tpm2_hash_extend(loc, buf, size, pcr, &log_hashes); ++ if ( rc != 0 ) { ++#ifndef __EARLY_TPM__ ++ printk(XENLOG_ERR "Extending PCR%u failed with TPM error: 0x%08x\n", ++ pcr, rc); ++#endif ++ } + } + } + +diff --git a/xen/include/xen/sha256.h b/xen/include/xen/sha256.h +new file mode 100644 +index 0000000000..b1fdd3985a +--- /dev/null ++++ b/xen/include/xen/sha256.h +@@ -0,0 +1,11 @@ ++#ifndef __XEN_SHA256_H ++#define __XEN_SHA256_H ++ ++#include ++ ++#define SHA256_DIGEST_SIZE 32 ++ ++void sha256_hash(const uint8_t message[], size_t len, ++ uint8_t hash[static SHA256_DIGEST_SIZE]); ++ ++#endif /* !__XEN_SHA256_H */ +diff --git a/xen/lib/sha256.c b/xen/lib/sha256.c +index 27e0571a71..652932ad7c 100644 +--- a/xen/lib/sha256.c ++++ b/xen/lib/sha256.c +@@ -21,6 +21,7 @@ + * Software. + */ + ++#include + #include + #include + +@@ -29,7 +30,7 @@ + #define BLOCK_LEN 64 // In bytes + #define STATE_LEN 8 // In words + +-static void sha256_compress(const uint8_t block[static 64], uint32_t state[static 8]) { ++static void sha256_compress(const uint8_t block[static BLOCK_LEN], uint32_t state[static STATE_LEN]) { + #define ROTR32(x, n) (((0U + (x)) << (32 - (n))) | ((x) >> (n))) // Assumes that x is uint32_t and 0 < n < 32 + + #define LOADSCHEDULE(i) \ +@@ -201,24 +202,26 @@ static void sha256_compress(const uint8_t block[static 64], uint32_t state[stati + + /* Full message hasher */ + +-void sha256_hash(const uint8_t message[], size_t len, uint32_t hash[static STATE_LEN]) { ++void sha256_hash(const uint8_t message[], size_t len, uint8_t hash[static SHA256_DIGEST_SIZE]) { + uint8_t block[BLOCK_LEN] = {0}; + size_t off; + size_t rem; + +- hash[0] = UINT32_C(0x6A09E667); +- hash[1] = UINT32_C(0xBB67AE85); +- hash[2] = UINT32_C(0x3C6EF372); +- hash[3] = UINT32_C(0xA54FF53A); +- hash[4] = UINT32_C(0x510E527F); +- hash[5] = UINT32_C(0x9B05688C); +- hash[6] = UINT32_C(0x1F83D9AB); +- hash[7] = UINT32_C(0x5BE0CD19); ++ uint32_t state[STATE_LEN] = { ++ UINT32_C(0x6A09E667), ++ UINT32_C(0xBB67AE85), ++ UINT32_C(0x3C6EF372), ++ UINT32_C(0xA54FF53A), ++ UINT32_C(0x510E527F), ++ UINT32_C(0x9B05688C), ++ UINT32_C(0x1F83D9AB), ++ UINT32_C(0x5BE0CD19), ++ }; + + #define LENGTH_SIZE 8 // In bytes + + for (off = 0; len - off >= BLOCK_LEN; off += BLOCK_LEN) +- sha256_compress(&message[off], hash); ++ sha256_compress(&message[off], state); + + rem = len - off; + if (rem > 0) +@@ -227,7 +230,7 @@ void sha256_hash(const uint8_t message[], size_t len, uint32_t hash[static STATE + block[rem] = 0x80; + rem++; + if (BLOCK_LEN - rem < LENGTH_SIZE) { +- sha256_compress(block, hash); ++ sha256_compress(block, state); + memset(block, 0, sizeof(block)); + } + +@@ -235,14 +238,16 @@ void sha256_hash(const uint8_t message[], size_t len, uint32_t hash[static STATE + len >>= 5; + for (int i = 1; i < LENGTH_SIZE; i++, len >>= 8) + block[BLOCK_LEN - 1 - i] = (uint8_t)(len & 0xFFU); +- sha256_compress(block, hash); +- +- hash[0] = __builtin_bswap32(hash[0]); +- hash[1] = __builtin_bswap32(hash[1]); +- hash[2] = __builtin_bswap32(hash[2]); +- hash[3] = __builtin_bswap32(hash[3]); +- hash[4] = __builtin_bswap32(hash[4]); +- hash[5] = __builtin_bswap32(hash[5]); +- hash[6] = __builtin_bswap32(hash[6]); +- hash[7] = __builtin_bswap32(hash[7]); ++ sha256_compress(block, state); ++ ++ state[0] = __builtin_bswap32(state[0]); ++ state[1] = __builtin_bswap32(state[1]); ++ state[2] = __builtin_bswap32(state[2]); ++ state[3] = __builtin_bswap32(state[3]); ++ state[4] = __builtin_bswap32(state[4]); ++ state[5] = __builtin_bswap32(state[5]); ++ state[6] = __builtin_bswap32(state[6]); ++ state[7] = __builtin_bswap32(state[7]); ++ ++ memcpy(hash, state, sizeof(state)); + } +-- +2.44.0 + diff --git a/1322-x86-tpm.c-implement-event-log-for-TPM2.0.patch b/1322-x86-tpm.c-implement-event-log-for-TPM2.0.patch new file mode 100644 index 00000000..c19a8a3a --- /dev/null +++ b/1322-x86-tpm.c-implement-event-log-for-TPM2.0.patch @@ -0,0 +1,257 @@ +From 38a0f2d812504c0bd2cb55158ad9765c26f4edf4 Mon Sep 17 00:00:00 2001 +From: Sergii Dmytruk +Date: Sat, 1 Jul 2023 00:41:35 +0300 +Subject: [PATCH 1322/1323] x86/tpm.c: implement event log for TPM2.0 + +Signed-off-by: Sergii Dmytruk +--- + xen/arch/x86/include/asm/intel_txt.h | 33 ++++++ + xen/arch/x86/tpm.c | 163 +++++++++++++++++++++++---- + 2 files changed, 172 insertions(+), 24 deletions(-) + +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index 61d950b3ba..aab896aca3 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -200,6 +200,39 @@ struct txt_sinit_mle_data { + /* Ext Data Elements */ + } __packed; + ++/* Types of extended data. */ ++#define TXT_HEAP_EXTDATA_TYPE_END 0 ++#define TXT_HEAP_EXTDATA_TYPE_BIOS_SPEC_VER 1 ++#define TXT_HEAP_EXTDATA_TYPE_ACM 2 ++#define TXT_HEAP_EXTDATA_TYPE_STM 3 ++#define TXT_HEAP_EXTDATA_TYPE_CUSTOM 4 ++#define TXT_HEAP_EXTDATA_TYPE_MADT 6 ++#define TXT_HEAP_EXTDATA_TYPE_EVENT_LOG_POINTER2_1 8 ++#define TXT_HEAP_EXTDATA_TYPE_MCFG 9 ++#define TXT_HEAP_EXTDATA_TYPE_TPR_REQ 13 ++#define TXT_HEAP_EXTDATA_TYPE_DTPR 14 ++#define TXT_HEAP_EXTDATA_TYPE_CEDT 15 ++ ++/* ++ * Self-describing data structure that is used for extensions to TXT heap ++ * tables. ++ */ ++struct txt_ext_data_element { ++ uint32_t type; /* One of TXT_HEAP_EXTDATA_TYPE_*. */ ++ uint32_t size; ++ uint8_t data[0]; /* size bytes. */ ++} __packed; ++ ++/* ++ * Extended data describing TPM 2.0 log. ++ */ ++struct heap_event_log_pointer_element2_1 { ++ uint64_t physical_address; ++ uint32_t allocated_event_container_size; ++ uint32_t first_record_offset; ++ uint32_t next_record_offset; ++} __packed; ++ + /* + * Functions to extract data from the Intel TXT Heap Memory. The layout + * of the heap is as follows: +diff --git a/xen/arch/x86/tpm.c b/xen/arch/x86/tpm.c +index 8e923985a2..6dc349d3d5 100644 +--- a/xen/arch/x86/tpm.c ++++ b/xen/arch/x86/tpm.c +@@ -539,6 +539,44 @@ struct tpm2_log_hashes { + struct tpm2_log_hash hashes[MAX_HASH_COUNT]; + }; + ++struct tpm2_pcr_event_header { ++ uint32_t pcrIndex; ++ uint32_t eventType; ++ uint32_t digestCount; ++ uint8_t digests[0]; ++ /* ++ * Each hash is represented as: ++ * struct { ++ * uint16_t hashAlg; ++ * uint8_t hash[size of hashAlg]; ++ * }; ++ */ ++ /* uint32_t eventSize; */ ++ /* uint8_t event[0]; */ ++} __packed; ++ ++struct tpm2_digest_sizes { ++ uint16_t algId; ++ uint16_t digestSize; ++} __packed; ++ ++struct tpm2_spec_id_event { ++ uint32_t pcrIndex; ++ uint32_t eventType; ++ uint8_t digest[20]; ++ uint32_t eventSize; ++ uint8_t signature[16]; ++ uint32_t platformClass; ++ uint8_t specVersionMinor; ++ uint8_t specVersionMajor; ++ uint8_t specErrata; ++ uint8_t uintnSize; ++ uint32_t digestCount; ++ struct tpm2_digest_sizes digestSizes[0]; /* variable number of members */ ++ /* uint8_t vendorInfoSize; */ ++ /* uint8_t vendorInfo[vendorInfoSize]; */ ++} __packed; ++ + #ifdef __EARLY_TPM__ + + union tpm2_cmd_rsp { +@@ -759,15 +797,12 @@ static uint32_t tpm2_hash_extend(unsigned loc, uint8_t *buf, unsigned size, + continue; + } + +- if ( hash->alg == TPM_ALG_SHA1 ) { ++ if ( hash->alg == TPM_ALG_SHA1 ) + sha1_hash(buf, size, hash->data); +- } else if ( hash->alg == TPM_ALG_SHA256 ) { ++ else if ( hash->alg == TPM_ALG_SHA256 ) + sha256_hash(buf, size, hash->data); +- } else { +- /* This is called "OneDigest" in TXT Software Development Guide. */ +- memset(hash->data, 0, size); +- hash->data[0] = 1; +- } ++ else ++ /* create_log_event20() took care of initializing the digest. */; + + if ( supported_hashes.count == MAX_HASH_COUNT ) { + printk(XENLOG_ERR "Hit hash count implementation limit: %d\n", +@@ -787,6 +822,99 @@ static uint32_t tpm2_hash_extend(unsigned loc, uint8_t *buf, unsigned size, + + #endif /* __EARLY_TPM__ */ + ++static struct heap_event_log_pointer_element2_1 *find_evt_log_ext_data(void) ++{ ++ struct txt_os_sinit_data *os_sinit; ++ struct txt_ext_data_element *ext_data; ++ ++ os_sinit = txt_os_sinit_data_start(__va(read_txt_reg(TXTCR_HEAP_BASE))); ++ ext_data = (void *)((uint8_t *)os_sinit + sizeof(*os_sinit)); ++ ++ /* ++ * Find TXT_HEAP_EXTDATA_TYPE_EVENT_LOG_POINTER2_1 which is necessary to ++ * know where to put the next entry. ++ */ ++ while ( ext_data->type != TXT_HEAP_EXTDATA_TYPE_END ) { ++ if ( ext_data->type == TXT_HEAP_EXTDATA_TYPE_EVENT_LOG_POINTER2_1 ) ++ break; ++ ext_data = (void *)&ext_data->data[ext_data->size]; ++ } ++ ++ if ( ext_data->type == TXT_HEAP_EXTDATA_TYPE_END ) ++ return NULL; ++ ++ return (void *)&ext_data->data[0]; ++} ++ ++static struct tpm2_log_hashes ++create_log_event20(struct tpm2_spec_id_event *evt_log, uint32_t evt_log_size, ++ uint32_t pcr, uint32_t type, uint8_t *data, ++ unsigned data_size) ++{ ++ struct tpm2_log_hashes log_hashes = {0}; ++ ++ struct heap_event_log_pointer_element2_1 *log_ext_data; ++ struct tpm2_pcr_event_header *new_entry; ++ uint32_t entry_size; ++ unsigned i; ++ uint8_t *p; ++ ++ log_ext_data = find_evt_log_ext_data(); ++ if ( log_ext_data == NULL ) ++ return log_hashes; ++ ++ entry_size = sizeof(*new_entry); ++ for ( i = 0; i < evt_log->digestCount; ++i ) { ++ entry_size += sizeof(uint16_t); /* hash type */ ++ entry_size += evt_log->digestSizes[i].digestSize; ++ } ++ entry_size += sizeof(uint32_t); /* data size field */ ++ entry_size += data_size; ++ ++ /* ++ * Check if there is enough space left for new entry. ++ * Note: it is possible to introduce a gap in event log if entry with big ++ * data_size is followed by another entry with smaller data. Maybe we should ++ * cap the event log size in such case? ++ */ ++ if ( log_ext_data->next_record_offset + entry_size > evt_log_size ) ++ return log_hashes; ++ ++ new_entry = (void *)((uint8_t *)evt_log + log_ext_data->next_record_offset); ++ log_ext_data->next_record_offset += entry_size; ++ ++ new_entry->pcrIndex = pcr; ++ new_entry->eventType = type; ++ new_entry->digestCount = evt_log->digestCount; ++ ++ p = &new_entry->digests[0]; ++ for ( i = 0; i < evt_log->digestCount; ++i ) { ++ uint16_t alg = evt_log->digestSizes[i].algId; ++ uint16_t size = evt_log->digestSizes[i].digestSize; ++ ++ *(uint16_t *)p = alg; ++ p += sizeof(uint16_t); ++ ++ log_hashes.hashes[i].alg = alg; ++ log_hashes.hashes[i].size = size; ++ log_hashes.hashes[i].data = p; ++ p += size; ++ ++ /* This is called "OneDigest" in TXT Software Development Guide. */ ++ memset(log_hashes.hashes[i].data, 0, size); ++ log_hashes.hashes[i].data[0] = 1; ++ } ++ log_hashes.count = evt_log->digestCount; ++ ++ *(uint32_t *)p = data_size; ++ p += sizeof(uint32_t); ++ ++ if ( data && data_size > 0 ) ++ memcpy(p, data, data_size); ++ ++ return log_hashes; ++} ++ + /************************** end of TPM2.0 specific ****************************/ + + void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, +@@ -811,25 +939,12 @@ void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, + + tpm12_hash_extend(loc, buf, size, pcr, entry_digest); + } else { +- uint8_t sha1_digest[SHA1_DIGEST_SIZE]; +- uint8_t sha256_digest[SHA256_DIGEST_SIZE]; + uint32_t rc; + +- struct tpm2_log_hashes log_hashes = { +- .count = 2, +- .hashes = { +- { +- .alg = TPM_ALG_SHA1, +- .size = SHA1_DIGEST_SIZE, +- .data = sha1_digest, +- }, +- { +- .alg = TPM_ALG_SHA256, +- .size = SHA256_DIGEST_SIZE, +- .data = sha256_digest, +- }, +- }, +- }; ++ struct tpm2_spec_id_event *evt_log = evt_log_addr; ++ struct tpm2_log_hashes log_hashes = ++ create_log_event20(evt_log, evt_log_size, pcr, type, log_data, ++ log_data_size); + + rc = tpm2_hash_extend(loc, buf, size, pcr, &log_hashes); + if ( rc != 0 ) { +-- +2.44.0 + diff --git a/1323-arch-x86-process-DRTM-policy.patch b/1323-arch-x86-process-DRTM-policy.patch new file mode 100644 index 00000000..b0076a0e --- /dev/null +++ b/1323-arch-x86-process-DRTM-policy.patch @@ -0,0 +1,272 @@ +From d6111adaacf523b2c74b519373f46a23ea44ab90 Mon Sep 17 00:00:00 2001 +From: Sergii Dmytruk +Date: Sun, 29 Oct 2023 00:42:04 +0300 +Subject: [PATCH 1323/1323] arch/x86: process DRTM policy + +Signed-off-by: Sergii Dmytruk +--- + xen/arch/x86/include/asm/intel_txt.h | 12 ++ + xen/arch/x86/setup.c | 9 ++ + xen/arch/x86/tpm.c | 196 +++++++++++++++++++++++++++ + 3 files changed, 217 insertions(+) + +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index aab896aca3..b939d00ba7 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -1,3 +1,5 @@ ++#include ++ + /* + * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE) + */ +@@ -380,4 +382,14 @@ extern void txt_restore_mtrrs(bool e820_verbose); + void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, + uint32_t type, uint8_t *log_data, unsigned log_data_size); + ++/* Measures essential parts of SLR table before making use of them. */ ++void tpm_measure_slrt(void); ++ ++/* Takes measurements of DRTM policy entries except for MBI and SLRT which ++ * should have been measured by the time this is called. Also performs sanity ++ * checks of the policy and panics on failure. In particular, the function ++ * verifies that DRTM is consistent with MultibootInfo (MBI) (the MBI address ++ * is assumed to be virtual). */ ++void tpm_process_drtm_policy(const multiboot_info_t *mbi); ++ + #endif /* __ASSEMBLY__ */ +diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c +index 6bfbe2872d..808a15c41d 100644 +--- a/xen/arch/x86/setup.c ++++ b/xen/arch/x86/setup.c +@@ -1172,6 +1172,9 @@ void __init noreturn __start_xen(unsigned long mbi_p) + { + /* Prepare for TXT-related code. */ + map_txt_mem_regions(); ++ /* Measure SLRT here because it gets used by init_e820(), the rest is ++ * measured below by tpm_process_drtm_policy(). */ ++ tpm_measure_slrt(); + /* Reserve TXT heap and SINIT. */ + protect_txt_mem_regions(); + } +@@ -1194,6 +1197,12 @@ void __init noreturn __start_xen(unsigned long mbi_p) + /* Create a temporary copy of the E820 map. */ + memcpy(&boot_e820, &e820, sizeof(e820)); + ++ /* Process all yet unmeasured DRTM entries after E820 initialization to not ++ * do this while memory is uncached (too slow). This must also happen before ++ * fields of Multiboot modules change their format below. */ ++ if ( slaunch_active ) ++ tpm_process_drtm_policy(mbi); ++ + /* Early kexec reservation (explicit static start address). */ + nr_pages = 0; + for ( i = 0; i < e820.nr_map; i++ ) +diff --git a/xen/arch/x86/tpm.c b/xen/arch/x86/tpm.c +index 6dc349d3d5..1d07ca8d00 100644 +--- a/xen/arch/x86/tpm.c ++++ b/xen/arch/x86/tpm.c +@@ -963,4 +963,200 @@ void __stdcall tpm_extend_mbi(uint32_t *mbi) + tpm_hash_extend(DRTM_LOC, DRTM_DATA_PCR, (uint8_t *)mbi, *mbi, + TXT_EVTYPE_SLAUNCH, NULL, 0); + } ++#else ++static struct slr_table *slr_get_table(void) ++{ ++ struct txt_os_mle_data *os_mle; ++ struct slr_table *slrt; ++ ++ os_mle = txt_os_mle_data_start(__va(read_txt_reg(TXTCR_HEAP_BASE))); ++ ++ map_l2(os_mle->slrt, PAGE_SIZE); ++ slrt = __va(os_mle->slrt); ++ ++ if ( slrt->magic != SLR_TABLE_MAGIC ) ++ panic("SLRT has invalid magic value: %#08x!\n", slrt->magic); ++ /* XXX: are newer revisions allowed? */ ++ if ( slrt->revision != SLR_TABLE_REVISION ) ++ panic("SLRT is of unsupported revision: %#04x!\n", slrt->revision); ++ if ( slrt->architecture != SLR_INTEL_TXT ) ++ panic("SLRT is for unexpected architecture: %#04x!\n", ++ slrt->architecture); ++ if ( slrt->size > slrt->max_size ) ++ panic("SLRT is larger than its max size: %#08x > %#08x!\n", ++ slrt->size, slrt->max_size); ++ ++ if ( slrt->size > PAGE_SIZE ) ++ map_l2(os_mle->slrt, slrt->size); ++ ++ return slrt; ++} ++ ++void tpm_measure_slrt(void) ++{ ++ struct slr_table *slrt = slr_get_table(); ++ ++ if ( slrt->revision == 1 ) { ++ /* In revision one of the SLRT, only Intel info table is measured. */ ++ struct slr_entry_intel_info *intel_info = ++ (void *)slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO); ++ if ( intel_info == NULL ) ++ panic("SLRT is missing Intel-specific information!\n"); ++ ++ tpm_hash_extend(DRTM_LOC, DRTM_DATA_PCR, (uint8_t *)intel_info, ++ sizeof(*intel_info), TXT_EVTYPE_SLAUNCH, NULL, 0); ++ } else { ++ /* ++ * slr_get_table() checks that the revision is valid, so we must not ++ * get here unless the code is wrong. ++ */ ++ panic("Unhandled SLRT revision: %d!\n", slrt->revision); ++ } ++} ++ ++static struct slr_entry_policy *slr_get_policy(struct slr_table *slrt) ++{ ++ struct slr_entry_policy *policy; ++ ++ policy = (struct slr_entry_policy *) ++ slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_DRTM_POLICY); ++ if (policy == NULL) ++ panic("SLRT is missing DRTM policy!\n"); ++ ++ /* XXX: are newer revisions allowed? */ ++ if ( policy->revision != SLR_POLICY_REVISION ) ++ panic("DRTM policy in SLRT is of unsupported revision: %#04x!\n", ++ slrt->revision); ++ ++ return policy; ++} ++ ++static void check_drtm_policy(struct slr_table *slrt, ++ struct slr_entry_policy *policy, ++ struct slr_policy_entry *policy_entry, ++ const multiboot_info_t *mbi) ++{ ++ uint32_t i; ++ module_t *mods; ++ uint32_t num_mod_entries; ++ ++ if ( policy->nr_entries < 2 ) ++ panic("DRTM policy in SLRT contains less than 2 entries (%d)!\n", ++ policy->nr_entries); ++ ++ /* MBI policy entry must be the first one, so that measuring order matches ++ * policy order. */ ++ if ( policy_entry[0].entity_type != SLR_ET_MULTIBOOT2_INFO ) ++ panic("First entry of DRTM policy in SLRT is not MBI: %#04x!\n", ++ policy_entry[0].entity_type); ++ if ( policy_entry[0].pcr != DRTM_DATA_PCR ) ++ panic("MBI was measured to %d instead of %d PCR!\n", DRTM_DATA_PCR, ++ policy_entry[0].pcr); ++ ++ /* SLRT policy entry must be the second one. */ ++ if ( policy_entry[1].entity_type != SLR_ET_SLRT ) ++ panic("Second entry of DRTM policy in SLRT is not SLRT: %#04x!\n", ++ policy_entry[1].entity_type); ++ if ( policy_entry[1].pcr != DRTM_DATA_PCR ) ++ panic("SLRT was measured to %d instead of %d PCR!\n", DRTM_DATA_PCR, ++ policy_entry[1].pcr); ++ if ( policy_entry[1].entity != (uint64_t)__pa(slrt) ) ++ panic("SLRT address (%#08lx) differes from its DRTM entry (%#08lx)\n", ++ __pa(slrt), policy_entry[1].entity); ++ ++ mods = __va(mbi->mods_addr); ++ for ( i = 0; i < mbi->mods_count; i++ ) { ++ uint16_t j; ++ uint64_t start = mods[i].mod_start; ++ uint64_t size = mods[i].mod_end - mods[i].mod_start; ++ ++ for ( j = 0; j < policy->nr_entries; j++ ) { ++ if ( policy_entry[j].entity_type != SLR_ET_MULTIBOOT2_MODULE ) ++ continue; ++ ++ if ( policy_entry[j].entity == start && ++ policy_entry[j].size == size ) ++ break; ++ } ++ ++ if ( j >= policy->nr_entries ) { ++ panic("Couldn't find Multiboot module \"%s\" (at %d) in DRTM of Secure Launch\n", ++ (const char *)__va(mods[i].string), i); ++ } ++ } ++ ++ num_mod_entries = 0; ++ for ( i = 0; i < policy->nr_entries; i++ ) { ++ if ( policy_entry[i].entity_type == SLR_ET_MULTIBOOT2_MODULE ) ++ num_mod_entries++; ++ } ++ ++ if ( mbi->mods_count != num_mod_entries ) { ++ panic("Unexpected number of Multiboot modules: %d instead of %d\n", ++ (int)mbi->mods_count, (int)num_mod_entries); ++ } ++} ++ ++void tpm_process_drtm_policy(const multiboot_info_t *mbi) ++{ ++ struct slr_table *slrt; ++ struct slr_entry_policy *policy; ++ struct slr_policy_entry *policy_entry; ++ uint16_t i; ++ ++ slrt = slr_get_table(); ++ ++ policy = slr_get_policy(slrt); ++ policy_entry = (struct slr_policy_entry *) ++ ((uint8_t *)policy + sizeof(*policy)); ++ ++ check_drtm_policy(slrt, policy, policy_entry, mbi); ++ /* MBI was measured in tpm_extend_mbi(). */ ++ policy_entry[0].flags |= SLR_POLICY_FLAG_MEASURED; ++ /* SLRT was measured in tpm_measure_slrt(). */ ++ policy_entry[1].flags |= SLR_POLICY_FLAG_MEASURED; ++ ++ for ( i = 2; i < policy->nr_entries; i++ ) { ++ uint64_t start = policy_entry[i].entity; ++ uint64_t size = policy_entry[i].size; ++ ++ /* No already measured entries are expected here. */ ++ if ( policy_entry[i].flags & SLR_POLICY_FLAG_MEASURED ) ++ panic("DRTM entry at %d was measured out of order!\n", i); ++ ++ switch ( policy_entry[i].entity_type ) { ++ case SLR_ET_MULTIBOOT2_INFO: ++ panic("Duplicated MBI entry in DRTM of Secure Launch at %d\n", i); ++ case SLR_ET_SLRT: ++ panic("Duplicated SLRT entry in DRTM of Secure Launch at %d\n", i); ++ ++ case SLR_ET_UNSPECIFIED: ++ case SLR_ET_BOOT_PARAMS: ++ case SLR_ET_SETUP_DATA: ++ case SLR_ET_CMDLINE: ++ case SLR_ET_UEFI_MEMMAP: ++ case SLR_ET_RAMDISK: ++ case SLR_ET_MULTIBOOT2_MODULE: ++ case SLR_ET_TXT_OS2MLE: ++ /* Measure this entry below. */ ++ break; ++ ++ case SLR_ET_UNUSED: ++ /* Skip this entry. */ ++ continue; ++ } ++ ++ if ( policy_entry[i].flags & SLR_POLICY_IMPLICIT_SIZE ) ++ panic("Unexpected implicitly-sized DRTM entry of Secure Launch at %d\n", ++ i); ++ ++ map_l2(start, size); ++ tpm_hash_extend(DRTM_LOC, policy_entry[i].pcr, __va(start), size, ++ TXT_EVTYPE_SLAUNCH, (uint8_t *)policy_entry[i].evt_info, ++ strnlen(policy_entry[i].evt_info, ++ TPM_EVENT_INFO_LENGTH)); ++ ++ policy_entry[i].flags |= SLR_POLICY_FLAG_MEASURED; ++ } ++} + #endif +-- +2.44.0 + diff --git a/xen.spec.in b/xen.spec.in index 080828b1..5e2ff4cd 100644 --- a/xen.spec.in +++ b/xen.spec.in @@ -202,6 +202,32 @@ Patch1200: 1200-hypercall-XENMEM_get_mfn_from_pfn.patch Patch1201: 1201-patch-gvt-hvmloader.patch.patch Patch1202: 1202-libxl-Add-partially-Intel-GVT-g-support-xengt-device.patch +# Intel TXT support patches +Patch1300: 1300-x86-include-asm-intel_txt.h-constants-and-accessors-.patch +Patch1301: 1301-x86-boot-add-MLE-header-and-new-entry-point.patch +Patch1302: 1302-x86-boot-txt_early-add-early-TXT-tests-and-restore-M.patch +Patch1303: 1303-include-xen-slr_table.h-Secure-Launch-Resource-Table.patch +Patch1304: 1304-xen-arch-x86-reserve-TXT-memory.patch +Patch1305: 1305-x86-intel_txt.c-restore-boot-MTRRs.patch +Patch1306: 1306-x86-sha1.c-add-file.patch +Patch1307: 1307-x86-tpm.c-code-for-early-hashing-and-extending-PCRs-.patch +Patch1308: 1308-x86-boot-choose-AP-stack-based-on-APIC-ID.patch +Patch1309: 1309-x86-smpboot.c-TXT-AP-bringup.patch +Patch1310: 1310-arch-x86-don-t-access-x86_cpu_to_apicid-directly-use.patch +Patch1311: 1311-arch-x86-smp-drop-x86_cpu_to_apicid-use-cpu_data-cpu.patch +Patch1312: 1312-arch-x86-smp-move-stack_base-to-cpu_data.patch +Patch1313: 1313-arch-x86-smp-call-x2apic_ap_setup-earlier.patch +Patch1314: 1314-arch-x86-shutdown-protect-against-recurrent-machine_.patch +Patch1315: 1315-arch-x86-smp-drop-booting_cpu-variable.patch +Patch1316: 1316-arch-x86-smp-make-cpu_state-per-CPU.patch +Patch1317: 1317-arch-x86-smp-remove-MONITOR-MWAIT-loop-for-TXT-AP-br.patch +Patch1318: 1318-arch-x86-smp-don-t-send-INIT-SIPI-SIPI-if-AP-is-alre.patch +Patch1319: 1319-arch-x86-smp-start-APs-in-parallel-during-boot.patch +Patch1320: 1320-x86-sha256.c-add-file.patch +Patch1321: 1321-x86-tpm.c-support-extending-PCRs-of-TPM2.0.patch +Patch1322: 1322-x86-tpm.c-implement-event-log-for-TPM2.0.patch +Patch1323: 1323-arch-x86-process-DRTM-policy.patch + %if %build_qemutrad BuildRequires: libidn-devel zlib-devel SDL-devel curl-devel BuildRequires: libX11-devel gtk2-devel libaio-devel